aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/arm/cluster-pm-race-avoidance.txt498
-rw-r--r--Documentation/arm/vlocks.txt211
-rw-r--r--Documentation/kernel-parameters.txt29
-rw-r--r--Makefile5
-rw-r--r--arch/arm/Kconfig12
-rw-r--r--arch/arm/Kconfig.debug11
-rw-r--r--arch/arm/boot/compressed/Makefile3
-rw-r--r--arch/arm/boot/compressed/debug.S12
-rw-r--r--arch/arm/boot/compressed/misc.c8
-rw-r--r--arch/arm/common/Makefile3
-rw-r--r--arch/arm/common/mcpm_entry.c263
-rw-r--r--arch/arm/common/mcpm_head.S219
-rw-r--r--arch/arm/common/mcpm_platsmp.c92
-rw-r--r--arch/arm/common/vlock.S108
-rw-r--r--arch/arm/common/vlock.h29
-rw-r--r--arch/arm/include/asm/atomic.h24
-rw-r--r--arch/arm/include/asm/cacheflush.h75
-rw-r--r--arch/arm/include/asm/cp15.h16
-rw-r--r--arch/arm/include/asm/cputype.h61
-rw-r--r--arch/arm/include/asm/glue-cache.h8
-rw-r--r--arch/arm/include/asm/glue-df.h20
-rw-r--r--arch/arm/include/asm/hardware/iop3xx.h2
-rw-r--r--arch/arm/include/asm/kvm_arm.h4
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/include/asm/kvm_emulate.h107
-rw-r--r--arch/arm/include/asm/kvm_host.h42
-rw-r--r--arch/arm/include/asm/kvm_mmu.h67
-rw-r--r--arch/arm/include/asm/kvm_vgic.h1
-rw-r--r--arch/arm/include/asm/mach/pci.h11
-rw-r--r--arch/arm/include/asm/mcpm.h209
-rw-r--r--arch/arm/include/asm/pgtable-3level.h2
-rw-r--r--arch/arm/include/asm/pgtable.h9
-rw-r--r--arch/arm/include/asm/thread_info.h1
-rw-r--r--arch/arm/include/asm/tlbflush.h13
-rw-r--r--arch/arm/include/debug/uncompress.h7
-rw-r--r--arch/arm/include/uapi/asm/kvm.h12
-rw-r--r--arch/arm/kernel/asm-offsets.c12
-rw-r--r--arch/arm/kernel/bios32.c6
-rw-r--r--arch/arm/kernel/entry-armv.S46
-rw-r--r--arch/arm/kernel/entry-common.S8
-rw-r--r--arch/arm/kernel/entry-header.S66
-rw-r--r--arch/arm/kernel/head-common.S9
-rw-r--r--arch/arm/kernel/head-nommu.S8
-rw-r--r--arch/arm/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm/kernel/perf_event.c5
-rw-r--r--arch/arm/kernel/return_address.c5
-rw-r--r--arch/arm/kernel/sched_clock.c4
-rw-r--r--arch/arm/kernel/setup.c7
-rw-r--r--arch/arm/kernel/smp_scu.c2
-rw-r--r--arch/arm/kernel/smp_tlb.c9
-rw-r--r--arch/arm/kernel/tcm.c1
-rw-r--r--arch/arm/kvm/Makefile2
-rw-r--r--arch/arm/kvm/arm.c195
-rw-r--r--arch/arm/kvm/coproc.c32
-rw-r--r--arch/arm/kvm/coproc.h4
-rw-r--r--arch/arm/kvm/emulate.c75
-rw-r--r--arch/arm/kvm/guest.c17
-rw-r--r--arch/arm/kvm/handle_exit.c164
-rw-r--r--arch/arm/kvm/interrupts.S13
-rw-r--r--arch/arm/kvm/mmio.c46
-rw-r--r--arch/arm/kvm/mmu.c184
-rw-r--r--arch/arm/kvm/vgic.c2
-rw-r--r--arch/arm/mach-omap2/id.c4
-rw-r--r--arch/arm/mach-omap2/omap-smp.c2
-rw-r--r--arch/arm/mm/Kconfig14
-rw-r--r--arch/arm/mm/Makefile1
-rw-r--r--arch/arm/mm/alignment.c2
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c1
-rw-r--r--arch/arm/mm/cache-v3.S137
-rw-r--r--arch/arm/mm/cache-v4.S2
-rw-r--r--arch/arm/mm/dma-mapping.c15
-rw-r--r--arch/arm/mm/flush.c15
-rw-r--r--arch/arm/mm/mmu.c19
-rw-r--r--arch/arm/mm/proc-arm740.S30
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-syms.c2
-rw-r--r--arch/arm/mm/proc-v6.S4
-rw-r--r--arch/arm/mm/proc-v7-2level.S3
-rw-r--r--arch/arm/mm/proc-v7-3level.S3
-rw-r--r--arch/arm/mm/proc-v7.S7
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/mm/tcm.h (renamed from arch/arm/kernel/tcm.h)0
-rw-r--r--arch/arm/tools/mach-types991
-rw-r--r--arch/avr32/include/asm/io.h4
-rw-r--r--arch/powerpc/kernel/entry_64.S4
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/kernel/signal_64.c2
-rw-r--r--arch/powerpc/kernel/tm.S2
-rw-r--r--arch/powerpc/kvm/e500.h24
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c84
-rw-r--r--arch/powerpc/kvm/e500mc.c7
-rw-r--r--arch/s390/include/asm/io.h4
-rw-r--r--arch/s390/include/asm/pgtable.h4
-rw-r--r--arch/sparc/include/asm/Kbuild5
-rw-r--r--arch/sparc/include/asm/cputime.h6
-rw-r--r--arch/sparc/include/asm/emergency-restart.h6
-rw-r--r--arch/sparc/include/asm/mutex.h9
-rw-r--r--arch/sparc/include/asm/pgtable_64.h1
-rw-r--r--arch/sparc/include/asm/serial.h6
-rw-r--r--arch/sparc/include/asm/smp_32.h5
-rw-r--r--arch/sparc/include/asm/switch_to_64.h3
-rw-r--r--arch/sparc/include/asm/tlbflush_64.h37
-rw-r--r--arch/sparc/include/uapi/asm/Kbuild1
-rw-r--r--arch/sparc/include/uapi/asm/types.h17
-rw-r--r--arch/sparc/kernel/smp_64.c41
-rw-r--r--arch/sparc/lib/bitext.c6
-rw-r--r--arch/sparc/mm/iommu.c2
-rw-r--r--arch/sparc/mm/srmmu.c4
-rw-r--r--arch/sparc/mm/tlb.c38
-rw-r--r--arch/sparc/mm/tsb.c57
-rw-r--r--arch/sparc/mm/ultra.S119
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/compressed/eboot.c47
-rw-r--r--arch/x86/include/asm/efi.h7
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h1
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c18
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c20
-rw-r--r--arch/x86/kernel/microcode_core_early.c38
-rw-r--r--arch/x86/kernel/setup.c45
-rw-r--r--arch/x86/platform/efi/efi.c168
-rw-r--r--block/blk-core.c1
-rw-r--r--drivers/block/rbd.c3
-rw-r--r--drivers/char/hpet.c14
-rw-r--r--drivers/dma/at_hdmac.c9
-rw-r--r--drivers/firmware/Kconfig1
-rw-r--r--drivers/firmware/efivars.c98
-rw-r--r--drivers/idle/intel_idle.c1
-rw-r--r--drivers/input/tablet/wacom_wac.c8
-rw-r--r--drivers/irqchip/irq-gic.c3
-rw-r--r--drivers/md/dm.c1
-rw-r--r--drivers/md/raid5.c11
-rw-r--r--drivers/mtd/mtdchar.c59
-rw-r--r--drivers/net/bonding/bond_main.c90
-rw-r--r--drivers/net/can/mcp251x.c10
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c31
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/freescale/fec.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c110
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c6
-rw-r--r--drivers/net/ethernet/marvell/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c104
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pri_detector.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c2
-rw-r--r--drivers/net/wireless/b43/phy_n.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c264
-rw-r--r--drivers/platform/x86/hp-wmi.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c10
-rw-r--r--drivers/sbus/char/bbc_i2c.c4
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c29
-rw-r--r--drivers/video/fbmem.c39
-rw-r--r--drivers/video/mmp/core.c2
-rw-r--r--fs/binfmt_elf.c1
-rw-r--r--fs/bio.c2
-rw-r--r--fs/exec.c4
-rw-r--r--fs/hfsplus/extents.c2
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/proc/array.c1
-rw-r--r--include/asm-generic/pgtable.h10
-rw-r--r--include/linux/blktrace_api.h1
-rw-r--r--include/linux/efi.h9
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set_ahash.h30
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/linux/ssb/ssb_driver_chipcommon.h2
-rw-r--r--include/linux/swiotlb.h1
-rw-r--r--include/linux/ucs2_string.h14
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/irda/irlmp.h3
-rw-r--r--include/net/scm.h4
-rw-r--r--include/trace/events/block.h8
-rw-r--r--include/trace/events/sched.h2
-rw-r--r--include/uapi/linux/fuse.h436
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/hrtimer.c3
-rw-r--r--kernel/kexec.c118
-rw-r--r--kernel/kprobes.c19
-rw-r--r--kernel/kthread.c52
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/smpboot.c14
-rw-r--r--kernel/trace/blktrace.c26
-rw-r--r--kernel/user_namespace.c22
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/swiotlb.c19
-rw-r--r--lib/ucs2_string.c51
-rw-r--r--mm/hugetlb.c12
-rw-r--r--mm/memory.c47
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/802/mrp.c4
-rw-r--r--net/batman-adv/main.c5
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/routing.c38
-rw-r--r--net/batman-adv/translation-table.c2
-rw-r--r--net/batman-adv/vis.c4
-rw-r--r--net/bridge/br_if.c3
-rw-r--r--net/bridge/br_private.h1
-rw-r--r--net/bridge/br_stp_if.c1
-rw-r--r--net/core/dev.c3
-rw-r--r--net/ipv4/esp4.c6
-rw-r--r--net/ipv4/ip_fragment.c14
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c8
-rw-r--r--net/ipv4/syncookies.c4
-rw-r--r--net/ipv4/tcp_input.c64
-rw-r--r--net/ipv4/tcp_output.c8
-rw-r--r--net/ipv6/addrconf.c24
-rw-r--r--net/ipv6/addrconf_core.c19
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c8
-rw-r--r--net/ipv6/reassembly.c12
-rw-r--r--net/irda/iriap.c3
-rw-r--r--net/irda/irlmp.c10
-rw-r--r--net/mac80211/iface.c27
-rw-r--r--net/mac80211/mlme.c24
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c6
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c18
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c22
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c22
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c18
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c10
-rw-r--r--net/netfilter/nf_conntrack_sip.c6
-rw-r--r--net/netfilter/nf_nat_core.c40
-rw-r--r--net/openvswitch/datapath.c30
-rw-r--r--net/openvswitch/flow.c2
-rw-r--r--net/sched/cls_fw.c2
-rwxr-xr-xscripts/checkpatch.pl1
-rw-r--r--sound/core/pcm_native.c12
-rw-r--r--tools/power/x86/turbostat/turbostat.c5
251 files changed, 5119 insertions, 2581 deletions
diff --git a/Documentation/arm/cluster-pm-race-avoidance.txt b/Documentation/arm/cluster-pm-race-avoidance.txt
new file mode 100644
index 000000000000..750b6fc24af9
--- /dev/null
+++ b/Documentation/arm/cluster-pm-race-avoidance.txt
@@ -0,0 +1,498 @@
1Cluster-wide Power-up/power-down race avoidance algorithm
2=========================================================
3
4This file documents the algorithm which is used to coordinate CPU and
5cluster setup and teardown operations and to manage hardware coherency
6controls safely.
7
8The section "Rationale" explains what the algorithm is for and why it is
9needed. "Basic model" explains general concepts using a simplified view
10of the system. The other sections explain the actual details of the
11algorithm in use.
12
13
14Rationale
15---------
16
17In a system containing multiple CPUs, it is desirable to have the
18ability to turn off individual CPUs when the system is idle, reducing
19power consumption and thermal dissipation.
20
21In a system containing multiple clusters of CPUs, it is also desirable
22to have the ability to turn off entire clusters.
23
24Turning entire clusters off and on is a risky business, because it
25involves performing potentially destructive operations affecting a group
26of independently running CPUs, while the OS continues to run. This
27means that we need some coordination in order to ensure that critical
28cluster-level operations are only performed when it is truly safe to do
29so.
30
31Simple locking may not be sufficient to solve this problem, because
32mechanisms like Linux spinlocks may rely on coherency mechanisms which
33are not immediately enabled when a cluster powers up. Since enabling or
34disabling those mechanisms may itself be a non-atomic operation (such as
35writing some hardware registers and invalidating large caches), other
36methods of coordination are required in order to guarantee safe
37power-down and power-up at the cluster level.
38
39The mechanism presented in this document describes a coherent memory
40based protocol for performing the needed coordination. It aims to be as
41lightweight as possible, while providing the required safety properties.
42
43
44Basic model
45-----------
46
47Each cluster and CPU is assigned a state, as follows:
48
49 DOWN
50 COMING_UP
51 UP
52 GOING_DOWN
53
54 +---------> UP ----------+
55 | v
56
57 COMING_UP GOING_DOWN
58
59 ^ |
60 +--------- DOWN <--------+
61
62
63DOWN: The CPU or cluster is not coherent, and is either powered off or
64 suspended, or is ready to be powered off or suspended.
65
66COMING_UP: The CPU or cluster has committed to moving to the UP state.
67 It may be part way through the process of initialisation and
68 enabling coherency.
69
70UP: The CPU or cluster is active and coherent at the hardware
71 level. A CPU in this state is not necessarily being used
72 actively by the kernel.
73
74GOING_DOWN: The CPU or cluster has committed to moving to the DOWN
75 state. It may be part way through the process of teardown and
76 coherency exit.
77
78
79Each CPU has one of these states assigned to it at any point in time.
80The CPU states are described in the "CPU state" section, below.
81
82Each cluster is also assigned a state, but it is necessary to split the
83state value into two parts (the "cluster" state and "inbound" state) and
84to introduce additional states in order to avoid races between different
85CPUs in the cluster simultaneously modifying the state. The cluster-
86level states are described in the "Cluster state" section.
87
88To help distinguish the CPU states from cluster states in this
89discussion, the state names are given a CPU_ prefix for the CPU states,
90and a CLUSTER_ or INBOUND_ prefix for the cluster states.
91
92
93CPU state
94---------
95
96In this algorithm, each individual core in a multi-core processor is
97referred to as a "CPU". CPUs are assumed to be single-threaded:
98therefore, a CPU can only be doing one thing at a single point in time.
99
100This means that CPUs fit the basic model closely.
101
102The algorithm defines the following states for each CPU in the system:
103
104 CPU_DOWN
105 CPU_COMING_UP
106 CPU_UP
107 CPU_GOING_DOWN
108
109 cluster setup and
110 CPU setup complete policy decision
111 +-----------> CPU_UP ------------+
112 | v
113
114 CPU_COMING_UP CPU_GOING_DOWN
115
116 ^ |
117 +----------- CPU_DOWN <----------+
118 policy decision CPU teardown complete
119 or hardware event
120
121
122The definitions of the four states correspond closely to the states of
123the basic model.
124
125Transitions between states occur as follows.
126
127A trigger event (spontaneous) means that the CPU can transition to the
128next state as a result of making local progress only, with no
129requirement for any external event to happen.
130
131
132CPU_DOWN:
133
134 A CPU reaches the CPU_DOWN state when it is ready for
135 power-down. On reaching this state, the CPU will typically
136 power itself down or suspend itself, via a WFI instruction or a
137 firmware call.
138
139 Next state: CPU_COMING_UP
140 Conditions: none
141
142 Trigger events:
143
144 a) an explicit hardware power-up operation, resulting
145 from a policy decision on another CPU;
146
147 b) a hardware event, such as an interrupt.
148
149
150CPU_COMING_UP:
151
152 A CPU cannot start participating in hardware coherency until the
153 cluster is set up and coherent. If the cluster is not ready,
154 then the CPU will wait in the CPU_COMING_UP state until the
155 cluster has been set up.
156
157 Next state: CPU_UP
158 Conditions: The CPU's parent cluster must be in CLUSTER_UP.
159 Trigger events: Transition of the parent cluster to CLUSTER_UP.
160
161 Refer to the "Cluster state" section for a description of the
162 CLUSTER_UP state.
163
164
165CPU_UP:
166 When a CPU reaches the CPU_UP state, it is safe for the CPU to
167 start participating in local coherency.
168
169 This is done by jumping to the kernel's CPU resume code.
170
171 Note that the definition of this state is slightly different
172 from the basic model definition: CPU_UP does not mean that the
173 CPU is coherent yet, but it does mean that it is safe to resume
174 the kernel. The kernel handles the rest of the resume
175 procedure, so the remaining steps are not visible as part of the
176 race avoidance algorithm.
177
178 The CPU remains in this state until an explicit policy decision
179 is made to shut down or suspend the CPU.
180
181 Next state: CPU_GOING_DOWN
182 Conditions: none
183 Trigger events: explicit policy decision
184
185
186CPU_GOING_DOWN:
187
188 While in this state, the CPU exits coherency, including any
189 operations required to achieve this (such as cleaning data
190 caches).
191
192 Next state: CPU_DOWN
193 Conditions: local CPU teardown complete
194 Trigger events: (spontaneous)
195
196
197Cluster state
198-------------
199
200A cluster is a group of connected CPUs with some common resources.
201Because a cluster contains multiple CPUs, it can be doing multiple
202things at the same time. This has some implications. In particular, a
203CPU can start up while another CPU is tearing the cluster down.
204
205In this discussion, the "outbound side" is the view of the cluster state
206as seen by a CPU tearing the cluster down. The "inbound side" is the
207view of the cluster state as seen by a CPU setting the CPU up.
208
209In order to enable safe coordination in such situations, it is important
210that a CPU which is setting up the cluster can advertise its state
211independently of the CPU which is tearing down the cluster. For this
212reason, the cluster state is split into two parts:
213
214 "cluster" state: The global state of the cluster; or the state
215 on the outbound side:
216
217 CLUSTER_DOWN
218 CLUSTER_UP
219 CLUSTER_GOING_DOWN
220
221 "inbound" state: The state of the cluster on the inbound side.
222
223 INBOUND_NOT_COMING_UP
224 INBOUND_COMING_UP
225
226
227 The different pairings of these states results in six possible
228 states for the cluster as a whole:
229
230 CLUSTER_UP
231 +==========> INBOUND_NOT_COMING_UP -------------+
232 # |
233 |
234 CLUSTER_UP <----+ |
235 INBOUND_COMING_UP | v
236
237 ^ CLUSTER_GOING_DOWN CLUSTER_GOING_DOWN
238 # INBOUND_COMING_UP <=== INBOUND_NOT_COMING_UP
239
240 CLUSTER_DOWN | |
241 INBOUND_COMING_UP <----+ |
242 |
243 ^ |
244 +=========== CLUSTER_DOWN <------------+
245 INBOUND_NOT_COMING_UP
246
247 Transitions -----> can only be made by the outbound CPU, and
248 only involve changes to the "cluster" state.
249
250 Transitions ===##> can only be made by the inbound CPU, and only
251 involve changes to the "inbound" state, except where there is no
252 further transition possible on the outbound side (i.e., the
253 outbound CPU has put the cluster into the CLUSTER_DOWN state).
254
255 The race avoidance algorithm does not provide a way to determine
256 which exact CPUs within the cluster play these roles. This must
257 be decided in advance by some other means. Refer to the section
258 "Last man and first man selection" for more explanation.
259
260
261 CLUSTER_DOWN/INBOUND_NOT_COMING_UP is the only state where the
262 cluster can actually be powered down.
263
264 The parallelism of the inbound and outbound CPUs is observed by
265 the existence of two different paths from CLUSTER_GOING_DOWN/
266 INBOUND_NOT_COMING_UP (corresponding to GOING_DOWN in the basic
267 model) to CLUSTER_DOWN/INBOUND_COMING_UP (corresponding to
268 COMING_UP in the basic model). The second path avoids cluster
269 teardown completely.
270
271 CLUSTER_UP/INBOUND_COMING_UP is equivalent to UP in the basic
272 model. The final transition to CLUSTER_UP/INBOUND_NOT_COMING_UP
273 is trivial and merely resets the state machine ready for the
274 next cycle.
275
276 Details of the allowable transitions follow.
277
278 The next state in each case is notated
279
280 <cluster state>/<inbound state> (<transitioner>)
281
282 where the <transitioner> is the side on which the transition
283 can occur; either the inbound or the outbound side.
284
285
286CLUSTER_DOWN/INBOUND_NOT_COMING_UP:
287
288 Next state: CLUSTER_DOWN/INBOUND_COMING_UP (inbound)
289 Conditions: none
290 Trigger events:
291
292 a) an explicit hardware power-up operation, resulting
293 from a policy decision on another CPU;
294
295 b) a hardware event, such as an interrupt.
296
297
298CLUSTER_DOWN/INBOUND_COMING_UP:
299
300 In this state, an inbound CPU sets up the cluster, including
301 enabling of hardware coherency at the cluster level and any
302 other operations (such as cache invalidation) which are required
303 in order to achieve this.
304
305 The purpose of this state is to do sufficient cluster-level
306 setup to enable other CPUs in the cluster to enter coherency
307 safely.
308
309 Next state: CLUSTER_UP/INBOUND_COMING_UP (inbound)
310 Conditions: cluster-level setup and hardware coherency complete
311 Trigger events: (spontaneous)
312
313
314CLUSTER_UP/INBOUND_COMING_UP:
315
316 Cluster-level setup is complete and hardware coherency is
317 enabled for the cluster. Other CPUs in the cluster can safely
318 enter coherency.
319
320 This is a transient state, leading immediately to
321 CLUSTER_UP/INBOUND_NOT_COMING_UP. All other CPUs on the cluster
322 should consider treat these two states as equivalent.
323
324 Next state: CLUSTER_UP/INBOUND_NOT_COMING_UP (inbound)
325 Conditions: none
326 Trigger events: (spontaneous)
327
328
329CLUSTER_UP/INBOUND_NOT_COMING_UP:
330
331 Cluster-level setup is complete and hardware coherency is
332 enabled for the cluster. Other CPUs in the cluster can safely
333 enter coherency.
334
335 The cluster will remain in this state until a policy decision is
336 made to power the cluster down.
337
338 Next state: CLUSTER_GOING_DOWN/INBOUND_NOT_COMING_UP (outbound)
339 Conditions: none
340 Trigger events: policy decision to power down the cluster
341
342
343CLUSTER_GOING_DOWN/INBOUND_NOT_COMING_UP:
344
345 An outbound CPU is tearing the cluster down. The selected CPU
346 must wait in this state until all CPUs in the cluster are in the
347 CPU_DOWN state.
348
349 When all CPUs are in the CPU_DOWN state, the cluster can be torn
350 down, for example by cleaning data caches and exiting
351 cluster-level coherency.
352
353 To avoid wasteful unnecessary teardown operations, the outbound
354 should check the inbound cluster state for asynchronous
355 transitions to INBOUND_COMING_UP. Alternatively, individual
356 CPUs can be checked for entry into CPU_COMING_UP or CPU_UP.
357
358
359 Next states:
360
361 CLUSTER_DOWN/INBOUND_NOT_COMING_UP (outbound)
362 Conditions: cluster torn down and ready to power off
363 Trigger events: (spontaneous)
364
365 CLUSTER_GOING_DOWN/INBOUND_COMING_UP (inbound)
366 Conditions: none
367 Trigger events:
368
369 a) an explicit hardware power-up operation,
370 resulting from a policy decision on another
371 CPU;
372
373 b) a hardware event, such as an interrupt.
374
375
376CLUSTER_GOING_DOWN/INBOUND_COMING_UP:
377
378 The cluster is (or was) being torn down, but another CPU has
379 come online in the meantime and is trying to set up the cluster
380 again.
381
382 If the outbound CPU observes this state, it has two choices:
383
384 a) back out of teardown, restoring the cluster to the
385 CLUSTER_UP state;
386
387 b) finish tearing the cluster down and put the cluster
388 in the CLUSTER_DOWN state; the inbound CPU will
389 set up the cluster again from there.
390
391 Choice (a) permits the removal of some latency by avoiding
392 unnecessary teardown and setup operations in situations where
393 the cluster is not really going to be powered down.
394
395
396 Next states:
397
398 CLUSTER_UP/INBOUND_COMING_UP (outbound)
399 Conditions: cluster-level setup and hardware
400 coherency complete
401 Trigger events: (spontaneous)
402
403 CLUSTER_DOWN/INBOUND_COMING_UP (outbound)
404 Conditions: cluster torn down and ready to power off
405 Trigger events: (spontaneous)
406
407
408Last man and First man selection
409--------------------------------
410
411The CPU which performs cluster tear-down operations on the outbound side
412is commonly referred to as the "last man".
413
414The CPU which performs cluster setup on the inbound side is commonly
415referred to as the "first man".
416
417The race avoidance algorithm documented above does not provide a
418mechanism to choose which CPUs should play these roles.
419
420
421Last man:
422
423When shutting down the cluster, all the CPUs involved are initially
424executing Linux and hence coherent. Therefore, ordinary spinlocks can
425be used to select a last man safely, before the CPUs become
426non-coherent.
427
428
429First man:
430
431Because CPUs may power up asynchronously in response to external wake-up
432events, a dynamic mechanism is needed to make sure that only one CPU
433attempts to play the first man role and do the cluster-level
434initialisation: any other CPUs must wait for this to complete before
435proceeding.
436
437Cluster-level initialisation may involve actions such as configuring
438coherency controls in the bus fabric.
439
440The current implementation in mcpm_head.S uses a separate mutual exclusion
441mechanism to do this arbitration. This mechanism is documented in
442detail in vlocks.txt.
443
444
445Features and Limitations
446------------------------
447
448Implementation:
449
450 The current ARM-based implementation is split between
451 arch/arm/common/mcpm_head.S (low-level inbound CPU operations) and
452 arch/arm/common/mcpm_entry.c (everything else):
453
454 __mcpm_cpu_going_down() signals the transition of a CPU to the
455 CPU_GOING_DOWN state.
456
457 __mcpm_cpu_down() signals the transition of a CPU to the CPU_DOWN
458 state.
459
460 A CPU transitions to CPU_COMING_UP and then to CPU_UP via the
461 low-level power-up code in mcpm_head.S. This could
462 involve CPU-specific setup code, but in the current
463 implementation it does not.
464
465 __mcpm_outbound_enter_critical() and __mcpm_outbound_leave_critical()
466 handle transitions from CLUSTER_UP to CLUSTER_GOING_DOWN
467 and from there to CLUSTER_DOWN or back to CLUSTER_UP (in
468 the case of an aborted cluster power-down).
469
470 These functions are more complex than the __mcpm_cpu_*()
471 functions due to the extra inter-CPU coordination which
472 is needed for safe transitions at the cluster level.
473
474 A cluster transitions from CLUSTER_DOWN back to CLUSTER_UP via
475 the low-level power-up code in mcpm_head.S. This
476 typically involves platform-specific setup code,
477 provided by the platform-specific power_up_setup
478 function registered via mcpm_sync_init.
479
480Deep topologies:
481
482 As currently described and implemented, the algorithm does not
483 support CPU topologies involving more than two levels (i.e.,
484 clusters of clusters are not supported). The algorithm could be
485 extended by replicating the cluster-level states for the
486 additional topological levels, and modifying the transition
487 rules for the intermediate (non-outermost) cluster levels.
488
489
490Colophon
491--------
492
493Originally created and documented by Dave Martin for Linaro Limited, in
494collaboration with Nicolas Pitre and Achin Gupta.
495
496Copyright (C) 2012-2013 Linaro Limited
497Distributed under the terms of Version 2 of the GNU General Public
498License, as defined in linux/COPYING.
diff --git a/Documentation/arm/vlocks.txt b/Documentation/arm/vlocks.txt
new file mode 100644
index 000000000000..415960a9bab0
--- /dev/null
+++ b/Documentation/arm/vlocks.txt
@@ -0,0 +1,211 @@
1vlocks for Bare-Metal Mutual Exclusion
2======================================
3
4Voting Locks, or "vlocks" provide a simple low-level mutual exclusion
5mechanism, with reasonable but minimal requirements on the memory
6system.
7
8These are intended to be used to coordinate critical activity among CPUs
9which are otherwise non-coherent, in situations where the hardware
10provides no other mechanism to support this and ordinary spinlocks
11cannot be used.
12
13
14vlocks make use of the atomicity provided by the memory system for
15writes to a single memory location. To arbitrate, every CPU "votes for
16itself", by storing a unique number to a common memory location. The
17final value seen in that memory location when all the votes have been
18cast identifies the winner.
19
20In order to make sure that the election produces an unambiguous result
21in finite time, a CPU will only enter the election in the first place if
22no winner has been chosen and the election does not appear to have
23started yet.
24
25
26Algorithm
27---------
28
29The easiest way to explain the vlocks algorithm is with some pseudo-code:
30
31
32 int currently_voting[NR_CPUS] = { 0, };
33 int last_vote = -1; /* no votes yet */
34
35 bool vlock_trylock(int this_cpu)
36 {
37 /* signal our desire to vote */
38 currently_voting[this_cpu] = 1;
39 if (last_vote != -1) {
40 /* someone already volunteered himself */
41 currently_voting[this_cpu] = 0;
42 return false; /* not ourself */
43 }
44
45 /* let's suggest ourself */
46 last_vote = this_cpu;
47 currently_voting[this_cpu] = 0;
48
49 /* then wait until everyone else is done voting */
50 for_each_cpu(i) {
51 while (currently_voting[i] != 0)
52 /* wait */;
53 }
54
55 /* result */
56 if (last_vote == this_cpu)
57 return true; /* we won */
58 return false;
59 }
60
61 bool vlock_unlock(void)
62 {
63 last_vote = -1;
64 }
65
66
67The currently_voting[] array provides a way for the CPUs to determine
68whether an election is in progress, and plays a role analogous to the
69"entering" array in Lamport's bakery algorithm [1].
70
71However, once the election has started, the underlying memory system
72atomicity is used to pick the winner. This avoids the need for a static
73priority rule to act as a tie-breaker, or any counters which could
74overflow.
75
76As long as the last_vote variable is globally visible to all CPUs, it
77will contain only one value that won't change once every CPU has cleared
78its currently_voting flag.
79
80
81Features and limitations
82------------------------
83
84 * vlocks are not intended to be fair. In the contended case, it is the
85 _last_ CPU which attempts to get the lock which will be most likely
86 to win.
87
88 vlocks are therefore best suited to situations where it is necessary
89 to pick a unique winner, but it does not matter which CPU actually
90 wins.
91
92 * Like other similar mechanisms, vlocks will not scale well to a large
93 number of CPUs.
94
95 vlocks can be cascaded in a voting hierarchy to permit better scaling
96 if necessary, as in the following hypothetical example for 4096 CPUs:
97
98 /* first level: local election */
99 my_town = towns[(this_cpu >> 4) & 0xf];
100 I_won = vlock_trylock(my_town, this_cpu & 0xf);
101 if (I_won) {
102 /* we won the town election, let's go for the state */
103 my_state = states[(this_cpu >> 8) & 0xf];
104 I_won = vlock_lock(my_state, this_cpu & 0xf));
105 if (I_won) {
106 /* and so on */
107 I_won = vlock_lock(the_whole_country, this_cpu & 0xf];
108 if (I_won) {
109 /* ... */
110 }
111 vlock_unlock(the_whole_country);
112 }
113 vlock_unlock(my_state);
114 }
115 vlock_unlock(my_town);
116
117
118ARM implementation
119------------------
120
121The current ARM implementation [2] contains some optimisations beyond
122the basic algorithm:
123
124 * By packing the members of the currently_voting array close together,
125 we can read the whole array in one transaction (providing the number
126 of CPUs potentially contending the lock is small enough). This
127 reduces the number of round-trips required to external memory.
128
129 In the ARM implementation, this means that we can use a single load
130 and comparison:
131
132 LDR Rt, [Rn]
133 CMP Rt, #0
134
135 ...in place of code equivalent to:
136
137 LDRB Rt, [Rn]
138 CMP Rt, #0
139 LDRBEQ Rt, [Rn, #1]
140 CMPEQ Rt, #0
141 LDRBEQ Rt, [Rn, #2]
142 CMPEQ Rt, #0
143 LDRBEQ Rt, [Rn, #3]
144 CMPEQ Rt, #0
145
146 This cuts down on the fast-path latency, as well as potentially
147 reducing bus contention in contended cases.
148
149 The optimisation relies on the fact that the ARM memory system
150 guarantees coherency between overlapping memory accesses of
151 different sizes, similarly to many other architectures. Note that
152 we do not care which element of currently_voting appears in which
153 bits of Rt, so there is no need to worry about endianness in this
154 optimisation.
155
156 If there are too many CPUs to read the currently_voting array in
157 one transaction then multiple transations are still required. The
158 implementation uses a simple loop of word-sized loads for this
159 case. The number of transactions is still fewer than would be
160 required if bytes were loaded individually.
161
162
163 In principle, we could aggregate further by using LDRD or LDM, but
164 to keep the code simple this was not attempted in the initial
165 implementation.
166
167
168 * vlocks are currently only used to coordinate between CPUs which are
169 unable to enable their caches yet. This means that the
170 implementation removes many of the barriers which would be required
171 when executing the algorithm in cached memory.
172
173 packing of the currently_voting array does not work with cached
174 memory unless all CPUs contending the lock are cache-coherent, due
175 to cache writebacks from one CPU clobbering values written by other
176 CPUs. (Though if all the CPUs are cache-coherent, you should be
177 probably be using proper spinlocks instead anyway).
178
179
180 * The "no votes yet" value used for the last_vote variable is 0 (not
181 -1 as in the pseudocode). This allows statically-allocated vlocks
182 to be implicitly initialised to an unlocked state simply by putting
183 them in .bss.
184
185 An offset is added to each CPU's ID for the purpose of setting this
186 variable, so that no CPU uses the value 0 for its ID.
187
188
189Colophon
190--------
191
192Originally created and documented by Dave Martin for Linaro Limited, for
193use in ARM-based big.LITTLE platforms, with review and input gratefully
194received from Nicolas Pitre and Achin Gupta. Thanks to Nicolas for
195grabbing most of this text out of the relevant mail thread and writing
196up the pseudocode.
197
198Copyright (C) 2012-2013 Linaro Limited
199Distributed under the terms of Version 2 of the GNU General Public
200License, as defined in linux/COPYING.
201
202
203References
204----------
205
206[1] Lamport, L. "A New Solution of Dijkstra's Concurrent Programming
207 Problem", Communications of the ACM 17, 8 (August 1974), 453-455.
208
209 http://en.wikipedia.org/wiki/Lamport%27s_bakery_algorithm
210
211[2] linux/arch/arm/common/vlock.S, www.kernel.org.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 4609e81dbc37..8ccbf27aead4 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -596,9 +596,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
596 is selected automatically. Check 596 is selected automatically. Check
597 Documentation/kdump/kdump.txt for further details. 597 Documentation/kdump/kdump.txt for further details.
598 598
599 crashkernel_low=size[KMG]
600 [KNL, x86] parts under 4G.
601
602 crashkernel=range1:size1[,range2:size2,...][@offset] 599 crashkernel=range1:size1[,range2:size2,...][@offset]
603 [KNL] Same as above, but depends on the memory 600 [KNL] Same as above, but depends on the memory
604 in the running system. The syntax of range is 601 in the running system. The syntax of range is
@@ -606,6 +603,26 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
606 a memory unit (amount[KMG]). See also 603 a memory unit (amount[KMG]). See also
607 Documentation/kdump/kdump.txt for an example. 604 Documentation/kdump/kdump.txt for an example.
608 605
606 crashkernel=size[KMG],high
607 [KNL, x86_64] range could be above 4G. Allow kernel
608 to allocate physical memory region from top, so could
609 be above 4G if system have more than 4G ram installed.
610 Otherwise memory region will be allocated below 4G, if
611 available.
612 It will be ignored if crashkernel=X is specified.
613 crashkernel=size[KMG],low
614 [KNL, x86_64] range under 4G. When crashkernel=X,high
615 is passed, kernel could allocate physical memory region
616 above 4G, that cause second kernel crash on system
617 that require some amount of low memory, e.g. swiotlb
618 requires at least 64M+32K low memory. Kernel would
619 try to allocate 72M below 4G automatically.
620 This one let user to specify own low range under 4G
621 for second kernel instead.
622 0: to disable low allocation.
623 It will be ignored when crashkernel=X,high is not used
624 or memory reserved is below 4G.
625
609 cs89x0_dma= [HW,NET] 626 cs89x0_dma= [HW,NET]
610 Format: <dma> 627 Format: <dma>
611 628
@@ -788,6 +805,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
788 edd= [EDD] 805 edd= [EDD]
789 Format: {"off" | "on" | "skip[mbr]"} 806 Format: {"off" | "on" | "skip[mbr]"}
790 807
808 efi_no_storage_paranoia [EFI; X86]
809 Using this parameter you can use more than 50% of
810 your efi variable storage. Use this parameter only if
811 you are really sure that your UEFI does sane gc and
812 fulfills the spec otherwise your board may brick.
813
791 eisa_irq_edge= [PARISC,HW] 814 eisa_irq_edge= [PARISC,HW]
792 See header of drivers/parisc/eisa.c. 815 See header of drivers/parisc/eisa.c.
793 816
diff --git a/Makefile b/Makefile
index 9cf6783c2ec3..46263d808876 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 9 2PATCHLEVEL = 9
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc7 4EXTRAVERSION = -rc8
5NAME = Unicycling Gorilla 5NAME = Unicycling Gorilla
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -513,7 +513,8 @@ ifeq ($(KBUILD_EXTMOD),)
513# Carefully list dependencies so we do not try to build scripts twice 513# Carefully list dependencies so we do not try to build scripts twice
514# in parallel 514# in parallel
515PHONY += scripts 515PHONY += scripts
516scripts: scripts_basic include/config/auto.conf include/config/tristate.conf 516scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \
517 asm-generic
517 $(Q)$(MAKE) $(build)=$(@) 518 $(Q)$(MAKE) $(build)=$(@)
518 519
519# Objects we will link into vmlinux / subdirs we need to visit 520# Objects we will link into vmlinux / subdirs we need to visit
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 1cacda426a0e..00bdfdbdd4a8 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -58,6 +58,7 @@ config ARM
58 select CLONE_BACKWARDS 58 select CLONE_BACKWARDS
59 select OLD_SIGSUSPEND3 59 select OLD_SIGSUSPEND3
60 select OLD_SIGACTION 60 select OLD_SIGACTION
61 select HAVE_CONTEXT_TRACKING
61 help 62 help
62 The ARM series is a line of low-power-consumption RISC chip designs 63 The ARM series is a line of low-power-consumption RISC chip designs
63 licensed by ARM Ltd and targeted at embedded applications and 64 licensed by ARM Ltd and targeted at embedded applications and
@@ -1606,6 +1607,14 @@ config HAVE_ARM_TWD
1606 help 1607 help
1607 This options enables support for the ARM timer and watchdog unit 1608 This options enables support for the ARM timer and watchdog unit
1608 1609
1610config MCPM
1611 bool "Multi-Cluster Power Management"
1612 depends on CPU_V7 && SMP
1613 help
1614 This option provides the common power management infrastructure
1615 for (multi-)cluster based systems, such as big.LITTLE based
1616 systems.
1617
1609choice 1618choice
1610 prompt "Memory split" 1619 prompt "Memory split"
1611 default VMSPLIT_3G 1620 default VMSPLIT_3G
@@ -1693,8 +1702,9 @@ config SCHED_HRTICK
1693 def_bool HIGH_RES_TIMERS 1702 def_bool HIGH_RES_TIMERS
1694 1703
1695config THUMB2_KERNEL 1704config THUMB2_KERNEL
1696 bool "Compile the kernel in Thumb-2 mode" 1705 bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY
1697 depends on CPU_V7 && !CPU_V6 && !CPU_V6K 1706 depends on CPU_V7 && !CPU_V6 && !CPU_V6K
1707 default y if CPU_THUMBONLY
1698 select AEABI 1708 select AEABI
1699 select ARM_ASM_UNIFIED 1709 select ARM_ASM_UNIFIED
1700 select ARM_UNWIND 1710 select ARM_UNWIND
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 9b31f4311ea2..791fbeba40c6 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -602,6 +602,17 @@ config DEBUG_LL_INCLUDE
602 default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1 602 default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
603 default "mach/debug-macro.S" 603 default "mach/debug-macro.S"
604 604
605config DEBUG_UNCOMPRESS
606 bool
607 default y if ARCH_MULTIPLATFORM && DEBUG_LL && \
608 !DEBUG_OMAP2PLUS_UART && \
609 !DEBUG_TEGRA_UART
610
611config UNCOMPRESS_INCLUDE
612 string
613 default "debug/uncompress.h" if ARCH_MULTIPLATFORM
614 default "mach/uncompress.h"
615
605config EARLY_PRINTK 616config EARLY_PRINTK
606 bool "Early printk" 617 bool "Early printk"
607 depends on DEBUG_LL 618 depends on DEBUG_LL
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index afed28e37ea5..3580d57ea218 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -24,6 +24,9 @@ endif
24AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) 24AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)
25HEAD = head.o 25HEAD = head.o
26OBJS += misc.o decompress.o 26OBJS += misc.o decompress.o
27ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y)
28OBJS += debug.o
29endif
27FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c 30FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c
28 31
29# string library code (-Os is enforced to keep it much smaller) 32# string library code (-Os is enforced to keep it much smaller)
diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S
new file mode 100644
index 000000000000..6e8382d5b7a4
--- /dev/null
+++ b/arch/arm/boot/compressed/debug.S
@@ -0,0 +1,12 @@
1#include <linux/linkage.h>
2#include <asm/assembler.h>
3
4#include CONFIG_DEBUG_LL_INCLUDE
5
6ENTRY(putc)
7 addruart r1, r2, r3
8 waituart r3, r1
9 senduart r0, r1
10 busyuart r3, r1
11 mov pc, lr
12ENDPROC(putc)
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c
index df899834d84e..31bd43b82095 100644
--- a/arch/arm/boot/compressed/misc.c
+++ b/arch/arm/boot/compressed/misc.c
@@ -25,13 +25,7 @@ unsigned int __machine_arch_type;
25static void putstr(const char *ptr); 25static void putstr(const char *ptr);
26extern void error(char *x); 26extern void error(char *x);
27 27
28#ifdef CONFIG_ARCH_MULTIPLATFORM 28#include CONFIG_UNCOMPRESS_INCLUDE
29static inline void putc(int c) {}
30static inline void flush(void) {}
31static inline void arch_decomp_setup(void) {}
32#else
33#include <mach/uncompress.h>
34#endif
35 29
36#ifdef CONFIG_DEBUG_ICEDCC 30#ifdef CONFIG_DEBUG_ICEDCC
37 31
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index dc8dd0de5c0f..53e68b163196 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -11,3 +11,6 @@ obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
11obj-$(CONFIG_SHARP_SCOOP) += scoop.o 11obj-$(CONFIG_SHARP_SCOOP) += scoop.o
12obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o 12obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
13obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o 13obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
14obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
15AFLAGS_mcpm_head.o := -march=armv7-a
16AFLAGS_vlock.o := -march=armv7-a
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
new file mode 100644
index 000000000000..370236dd1a03
--- /dev/null
+++ b/arch/arm/common/mcpm_entry.c
@@ -0,0 +1,263 @@
1/*
2 * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
3 *
4 * Created by: Nicolas Pitre, March 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/irqflags.h>
15
16#include <asm/mcpm.h>
17#include <asm/cacheflush.h>
18#include <asm/idmap.h>
19#include <asm/cputype.h>
20
21extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
22
23void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
24{
25 unsigned long val = ptr ? virt_to_phys(ptr) : 0;
26 mcpm_entry_vectors[cluster][cpu] = val;
27 sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
28}
29
30static const struct mcpm_platform_ops *platform_ops;
31
32int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
33{
34 if (platform_ops)
35 return -EBUSY;
36 platform_ops = ops;
37 return 0;
38}
39
40int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
41{
42 if (!platform_ops)
43 return -EUNATCH; /* try not to shadow power_up errors */
44 might_sleep();
45 return platform_ops->power_up(cpu, cluster);
46}
47
48typedef void (*phys_reset_t)(unsigned long);
49
50void mcpm_cpu_power_down(void)
51{
52 phys_reset_t phys_reset;
53
54 BUG_ON(!platform_ops);
55 BUG_ON(!irqs_disabled());
56
57 /*
58 * Do this before calling into the power_down method,
59 * as it might not always be safe to do afterwards.
60 */
61 setup_mm_for_reboot();
62
63 platform_ops->power_down();
64
65 /*
66 * It is possible for a power_up request to happen concurrently
67 * with a power_down request for the same CPU. In this case the
68 * power_down method might not be able to actually enter a
69 * powered down state with the WFI instruction if the power_up
70 * method has removed the required reset condition. The
71 * power_down method is then allowed to return. We must perform
72 * a re-entry in the kernel as if the power_up method just had
73 * deasserted reset on the CPU.
74 *
75 * To simplify race issues, the platform specific implementation
76 * must accommodate for the possibility of unordered calls to
77 * power_down and power_up with a usage count. Therefore, if a
78 * call to power_up is issued for a CPU that is not down, then
79 * the next call to power_down must not attempt a full shutdown
80 * but only do the minimum (normally disabling L1 cache and CPU
81 * coherency) and return just as if a concurrent power_up request
82 * had happened as described above.
83 */
84
85 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
86 phys_reset(virt_to_phys(mcpm_entry_point));
87
88 /* should never get here */
89 BUG();
90}
91
92void mcpm_cpu_suspend(u64 expected_residency)
93{
94 phys_reset_t phys_reset;
95
96 BUG_ON(!platform_ops);
97 BUG_ON(!irqs_disabled());
98
99 /* Very similar to mcpm_cpu_power_down() */
100 setup_mm_for_reboot();
101 platform_ops->suspend(expected_residency);
102 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
103 phys_reset(virt_to_phys(mcpm_entry_point));
104 BUG();
105}
106
107int mcpm_cpu_powered_up(void)
108{
109 if (!platform_ops)
110 return -EUNATCH;
111 if (platform_ops->powered_up)
112 platform_ops->powered_up();
113 return 0;
114}
115
116struct sync_struct mcpm_sync;
117
118/*
119 * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
120 * This must be called at the point of committing to teardown of a CPU.
121 * The CPU cache (SCTRL.C bit) is expected to still be active.
122 */
123void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
124{
125 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
126 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
127}
128
129/*
130 * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
131 * cluster can be torn down without disrupting this CPU.
132 * To avoid deadlocks, this must be called before a CPU is powered down.
133 * The CPU cache (SCTRL.C bit) is expected to be off.
134 * However L2 cache might or might not be active.
135 */
136void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
137{
138 dmb();
139 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
140 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
141 dsb_sev();
142}
143
144/*
145 * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
146 * @state: the final state of the cluster:
147 * CLUSTER_UP: no destructive teardown was done and the cluster has been
148 * restored to the previous state (CPU cache still active); or
149 * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
150 * (CPU cache disabled, L2 cache either enabled or disabled).
151 */
152void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
153{
154 dmb();
155 mcpm_sync.clusters[cluster].cluster = state;
156 sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
157 dsb_sev();
158}
159
160/*
161 * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
162 * This function should be called by the last man, after local CPU teardown
163 * is complete. CPU cache expected to be active.
164 *
165 * Returns:
166 * false: the critical section was not entered because an inbound CPU was
167 * observed, or the cluster is already being set up;
168 * true: the critical section was entered: it is now safe to tear down the
169 * cluster.
170 */
171bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
172{
173 unsigned int i;
174 struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
175
176 /* Warn inbound CPUs that the cluster is being torn down: */
177 c->cluster = CLUSTER_GOING_DOWN;
178 sync_cache_w(&c->cluster);
179
180 /* Back out if the inbound cluster is already in the critical region: */
181 sync_cache_r(&c->inbound);
182 if (c->inbound == INBOUND_COMING_UP)
183 goto abort;
184
185 /*
186 * Wait for all CPUs to get out of the GOING_DOWN state, so that local
187 * teardown is complete on each CPU before tearing down the cluster.
188 *
189 * If any CPU has been woken up again from the DOWN state, then we
190 * shouldn't be taking the cluster down at all: abort in that case.
191 */
192 sync_cache_r(&c->cpus);
193 for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
194 int cpustate;
195
196 if (i == cpu)
197 continue;
198
199 while (1) {
200 cpustate = c->cpus[i].cpu;
201 if (cpustate != CPU_GOING_DOWN)
202 break;
203
204 wfe();
205 sync_cache_r(&c->cpus[i].cpu);
206 }
207
208 switch (cpustate) {
209 case CPU_DOWN:
210 continue;
211
212 default:
213 goto abort;
214 }
215 }
216
217 return true;
218
219abort:
220 __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
221 return false;
222}
223
224int __mcpm_cluster_state(unsigned int cluster)
225{
226 sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
227 return mcpm_sync.clusters[cluster].cluster;
228}
229
230extern unsigned long mcpm_power_up_setup_phys;
231
232int __init mcpm_sync_init(
233 void (*power_up_setup)(unsigned int affinity_level))
234{
235 unsigned int i, j, mpidr, this_cluster;
236
237 BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
238 BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
239
240 /*
241 * Set initial CPU and cluster states.
242 * Only one cluster is assumed to be active at this point.
243 */
244 for (i = 0; i < MAX_NR_CLUSTERS; i++) {
245 mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
246 mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
247 for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
248 mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
249 }
250 mpidr = read_cpuid_mpidr();
251 this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
252 for_each_online_cpu(i)
253 mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
254 mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
255 sync_cache_w(&mcpm_sync);
256
257 if (power_up_setup) {
258 mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
259 sync_cache_w(&mcpm_power_up_setup_phys);
260 }
261
262 return 0;
263}
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S
new file mode 100644
index 000000000000..8178705c4b24
--- /dev/null
+++ b/arch/arm/common/mcpm_head.S
@@ -0,0 +1,219 @@
1/*
2 * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
3 *
4 * Created by: Nicolas Pitre, March 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 *
12 * Refer to Documentation/arm/cluster-pm-race-avoidance.txt
13 * for details of the synchronisation algorithms used here.
14 */
15
16#include <linux/linkage.h>
17#include <asm/mcpm.h>
18
19#include "vlock.h"
20
21.if MCPM_SYNC_CLUSTER_CPUS
22.error "cpus must be the first member of struct mcpm_sync_struct"
23.endif
24
25 .macro pr_dbg string
26#if defined(CONFIG_DEBUG_LL) && defined(DEBUG)
27 b 1901f
281902: .asciz "CPU"
291903: .asciz " cluster"
301904: .asciz ": \string"
31 .align
321901: adr r0, 1902b
33 bl printascii
34 mov r0, r9
35 bl printhex8
36 adr r0, 1903b
37 bl printascii
38 mov r0, r10
39 bl printhex8
40 adr r0, 1904b
41 bl printascii
42#endif
43 .endm
44
45 .arm
46 .align
47
48ENTRY(mcpm_entry_point)
49
50 THUMB( adr r12, BSYM(1f) )
51 THUMB( bx r12 )
52 THUMB( .thumb )
531:
54 mrc p15, 0, r0, c0, c0, 5 @ MPIDR
55 ubfx r9, r0, #0, #8 @ r9 = cpu
56 ubfx r10, r0, #8, #8 @ r10 = cluster
57 mov r3, #MAX_CPUS_PER_CLUSTER
58 mla r4, r3, r10, r9 @ r4 = canonical CPU index
59 cmp r4, #(MAX_CPUS_PER_CLUSTER * MAX_NR_CLUSTERS)
60 blo 2f
61
62 /* We didn't expect this CPU. Try to cheaply make it quiet. */
631: wfi
64 wfe
65 b 1b
66
672: pr_dbg "kernel mcpm_entry_point\n"
68
69 /*
70 * MMU is off so we need to get to various variables in a
71 * position independent way.
72 */
73 adr r5, 3f
74 ldmia r5, {r6, r7, r8, r11}
75 add r6, r5, r6 @ r6 = mcpm_entry_vectors
76 ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys
77 add r8, r5, r8 @ r8 = mcpm_sync
78 add r11, r5, r11 @ r11 = first_man_locks
79
80 mov r0, #MCPM_SYNC_CLUSTER_SIZE
81 mla r8, r0, r10, r8 @ r8 = sync cluster base
82
83 @ Signal that this CPU is coming UP:
84 mov r0, #CPU_COMING_UP
85 mov r5, #MCPM_SYNC_CPU_SIZE
86 mla r5, r9, r5, r8 @ r5 = sync cpu address
87 strb r0, [r5]
88
89 @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
90 @ state, because there is at least one active CPU (this CPU).
91
92 mov r0, #VLOCK_SIZE
93 mla r11, r0, r10, r11 @ r11 = cluster first man lock
94 mov r0, r11
95 mov r1, r9 @ cpu
96 bl vlock_trylock @ implies DMB
97
98 cmp r0, #0 @ failed to get the lock?
99 bne mcpm_setup_wait @ wait for cluster setup if so
100
101 ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
102 cmp r0, #CLUSTER_UP @ cluster already up?
103 bne mcpm_setup @ if not, set up the cluster
104
105 @ Otherwise, release the first man lock and skip setup:
106 mov r0, r11
107 bl vlock_unlock
108 b mcpm_setup_complete
109
110mcpm_setup:
111 @ Control dependency implies strb not observable before previous ldrb.
112
113 @ Signal that the cluster is being brought up:
114 mov r0, #INBOUND_COMING_UP
115 strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
116 dmb
117
118 @ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this
119 @ point onwards will observe INBOUND_COMING_UP and abort.
120
121 @ Wait for any previously-pending cluster teardown operations to abort
122 @ or complete:
123mcpm_teardown_wait:
124 ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
125 cmp r0, #CLUSTER_GOING_DOWN
126 bne first_man_setup
127 wfe
128 b mcpm_teardown_wait
129
130first_man_setup:
131 dmb
132
133 @ If the outbound gave up before teardown started, skip cluster setup:
134
135 cmp r0, #CLUSTER_UP
136 beq mcpm_setup_leave
137
138 @ power_up_setup is now responsible for setting up the cluster:
139
140 cmp r7, #0
141 mov r0, #1 @ second (cluster) affinity level
142 blxne r7 @ Call power_up_setup if defined
143 dmb
144
145 mov r0, #CLUSTER_UP
146 strb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
147 dmb
148
149mcpm_setup_leave:
150 @ Leave the cluster setup critical section:
151
152 mov r0, #INBOUND_NOT_COMING_UP
153 strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
154 dsb
155 sev
156
157 mov r0, r11
158 bl vlock_unlock @ implies DMB
159 b mcpm_setup_complete
160
161 @ In the contended case, non-first men wait here for cluster setup
162 @ to complete:
163mcpm_setup_wait:
164 ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
165 cmp r0, #CLUSTER_UP
166 wfene
167 bne mcpm_setup_wait
168 dmb
169
170mcpm_setup_complete:
171 @ If a platform-specific CPU setup hook is needed, it is
172 @ called from here.
173
174 cmp r7, #0
175 mov r0, #0 @ first (CPU) affinity level
176 blxne r7 @ Call power_up_setup if defined
177 dmb
178
179 @ Mark the CPU as up:
180
181 mov r0, #CPU_UP
182 strb r0, [r5]
183
184 @ Observability order of CPU_UP and opening of the gate does not matter.
185
186mcpm_entry_gated:
187 ldr r5, [r6, r4, lsl #2] @ r5 = CPU entry vector
188 cmp r5, #0
189 wfeeq
190 beq mcpm_entry_gated
191 dmb
192
193 pr_dbg "released\n"
194 bx r5
195
196 .align 2
197
1983: .word mcpm_entry_vectors - .
199 .word mcpm_power_up_setup_phys - 3b
200 .word mcpm_sync - 3b
201 .word first_man_locks - 3b
202
203ENDPROC(mcpm_entry_point)
204
205 .bss
206
207 .align CACHE_WRITEBACK_ORDER
208 .type first_man_locks, #object
209first_man_locks:
210 .space VLOCK_SIZE * MAX_NR_CLUSTERS
211 .align CACHE_WRITEBACK_ORDER
212
213 .type mcpm_entry_vectors, #object
214ENTRY(mcpm_entry_vectors)
215 .space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
216
217 .type mcpm_power_up_setup_phys, #object
218ENTRY(mcpm_power_up_setup_phys)
219 .space 4 @ set by mcpm_sync_init()
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
new file mode 100644
index 000000000000..52b88d81b7bb
--- /dev/null
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -0,0 +1,92 @@
1/*
2 * linux/arch/arm/mach-vexpress/mcpm_platsmp.c
3 *
4 * Created by: Nicolas Pitre, November 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Code to handle secondary CPU bringup and hotplug for the cluster power API.
12 */
13
14#include <linux/init.h>
15#include <linux/smp.h>
16#include <linux/spinlock.h>
17
18#include <linux/irqchip/arm-gic.h>
19
20#include <asm/mcpm.h>
21#include <asm/smp.h>
22#include <asm/smp_plat.h>
23
24static void __init simple_smp_init_cpus(void)
25{
26}
27
28static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
29{
30 unsigned int mpidr, pcpu, pcluster, ret;
31 extern void secondary_startup(void);
32
33 mpidr = cpu_logical_map(cpu);
34 pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
35 pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
36 pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n",
37 __func__, cpu, pcpu, pcluster);
38
39 mcpm_set_entry_vector(pcpu, pcluster, NULL);
40 ret = mcpm_cpu_power_up(pcpu, pcluster);
41 if (ret)
42 return ret;
43 mcpm_set_entry_vector(pcpu, pcluster, secondary_startup);
44 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
45 dsb_sev();
46 return 0;
47}
48
49static void __cpuinit mcpm_secondary_init(unsigned int cpu)
50{
51 mcpm_cpu_powered_up();
52 gic_secondary_init(0);
53}
54
55#ifdef CONFIG_HOTPLUG_CPU
56
57static int mcpm_cpu_disable(unsigned int cpu)
58{
59 /*
60 * We assume all CPUs may be shut down.
61 * This would be the hook to use for eventual Secure
62 * OS migration requests as described in the PSCI spec.
63 */
64 return 0;
65}
66
67static void mcpm_cpu_die(unsigned int cpu)
68{
69 unsigned int mpidr, pcpu, pcluster;
70 mpidr = read_cpuid_mpidr();
71 pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
72 pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
73 mcpm_set_entry_vector(pcpu, pcluster, NULL);
74 mcpm_cpu_power_down();
75}
76
77#endif
78
79static struct smp_operations __initdata mcpm_smp_ops = {
80 .smp_init_cpus = simple_smp_init_cpus,
81 .smp_boot_secondary = mcpm_boot_secondary,
82 .smp_secondary_init = mcpm_secondary_init,
83#ifdef CONFIG_HOTPLUG_CPU
84 .cpu_disable = mcpm_cpu_disable,
85 .cpu_die = mcpm_cpu_die,
86#endif
87};
88
89void __init mcpm_smp_set_ops(void)
90{
91 smp_set_ops(&mcpm_smp_ops);
92}
diff --git a/arch/arm/common/vlock.S b/arch/arm/common/vlock.S
new file mode 100644
index 000000000000..ff198583f683
--- /dev/null
+++ b/arch/arm/common/vlock.S
@@ -0,0 +1,108 @@
1/*
2 * vlock.S - simple voting lock implementation for ARM
3 *
4 * Created by: Dave Martin, 2012-08-16
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 *
17 * This algorithm is described in more detail in
18 * Documentation/arm/vlocks.txt.
19 */
20
21#include <linux/linkage.h>
22#include "vlock.h"
23
24/* Select different code if voting flags can fit in a single word. */
25#if VLOCK_VOTING_SIZE > 4
26#define FEW(x...)
27#define MANY(x...) x
28#else
29#define FEW(x...) x
30#define MANY(x...)
31#endif
32
33@ voting lock for first-man coordination
34
35.macro voting_begin rbase:req, rcpu:req, rscratch:req
36 mov \rscratch, #1
37 strb \rscratch, [\rbase, \rcpu]
38 dmb
39.endm
40
41.macro voting_end rbase:req, rcpu:req, rscratch:req
42 dmb
43 mov \rscratch, #0
44 strb \rscratch, [\rbase, \rcpu]
45 dsb
46 sev
47.endm
48
49/*
50 * The vlock structure must reside in Strongly-Ordered or Device memory.
51 * This implementation deliberately eliminates most of the barriers which
52 * would be required for other memory types, and assumes that independent
53 * writes to neighbouring locations within a cacheline do not interfere
54 * with one another.
55 */
56
57@ r0: lock structure base
58@ r1: CPU ID (0-based index within cluster)
59ENTRY(vlock_trylock)
60 add r1, r1, #VLOCK_VOTING_OFFSET
61
62 voting_begin r0, r1, r2
63
64 ldrb r2, [r0, #VLOCK_OWNER_OFFSET] @ check whether lock is held
65 cmp r2, #VLOCK_OWNER_NONE
66 bne trylock_fail @ fail if so
67
68 @ Control dependency implies strb not observable before previous ldrb.
69
70 strb r1, [r0, #VLOCK_OWNER_OFFSET] @ submit my vote
71
72 voting_end r0, r1, r2 @ implies DMB
73
74 @ Wait for the current round of voting to finish:
75
76 MANY( mov r3, #VLOCK_VOTING_OFFSET )
770:
78 MANY( ldr r2, [r0, r3] )
79 FEW( ldr r2, [r0, #VLOCK_VOTING_OFFSET] )
80 cmp r2, #0
81 wfene
82 bne 0b
83 MANY( add r3, r3, #4 )
84 MANY( cmp r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE )
85 MANY( bne 0b )
86
87 @ Check who won:
88
89 dmb
90 ldrb r2, [r0, #VLOCK_OWNER_OFFSET]
91 eor r0, r1, r2 @ zero if I won, else nonzero
92 bx lr
93
94trylock_fail:
95 voting_end r0, r1, r2
96 mov r0, #1 @ nonzero indicates that I lost
97 bx lr
98ENDPROC(vlock_trylock)
99
100@ r0: lock structure base
101ENTRY(vlock_unlock)
102 dmb
103 mov r1, #VLOCK_OWNER_NONE
104 strb r1, [r0, #VLOCK_OWNER_OFFSET]
105 dsb
106 sev
107 bx lr
108ENDPROC(vlock_unlock)
diff --git a/arch/arm/common/vlock.h b/arch/arm/common/vlock.h
new file mode 100644
index 000000000000..3b441475a59b
--- /dev/null
+++ b/arch/arm/common/vlock.h
@@ -0,0 +1,29 @@
1/*
2 * vlock.h - simple voting lock implementation
3 *
4 * Created by: Dave Martin, 2012-08-16
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef __VLOCK_H
18#define __VLOCK_H
19
20#include <asm/mcpm.h>
21
22/* Offsets and sizes are rounded to a word (4 bytes) */
23#define VLOCK_OWNER_OFFSET 0
24#define VLOCK_VOTING_OFFSET 4
25#define VLOCK_VOTING_SIZE ((MAX_CPUS_PER_CLUSTER + 3) / 4 * 4)
26#define VLOCK_SIZE (VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE)
27#define VLOCK_OWNER_NONE 0
28
29#endif /* ! __VLOCK_H */
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index c79f61faa3a5..da1c77d39327 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -243,6 +243,29 @@ typedef struct {
243 243
244#define ATOMIC64_INIT(i) { (i) } 244#define ATOMIC64_INIT(i) { (i) }
245 245
246#ifdef CONFIG_ARM_LPAE
247static inline u64 atomic64_read(const atomic64_t *v)
248{
249 u64 result;
250
251 __asm__ __volatile__("@ atomic64_read\n"
252" ldrd %0, %H0, [%1]"
253 : "=&r" (result)
254 : "r" (&v->counter), "Qo" (v->counter)
255 );
256
257 return result;
258}
259
260static inline void atomic64_set(atomic64_t *v, u64 i)
261{
262 __asm__ __volatile__("@ atomic64_set\n"
263" strd %2, %H2, [%1]"
264 : "=Qo" (v->counter)
265 : "r" (&v->counter), "r" (i)
266 );
267}
268#else
246static inline u64 atomic64_read(const atomic64_t *v) 269static inline u64 atomic64_read(const atomic64_t *v)
247{ 270{
248 u64 result; 271 u64 result;
@@ -269,6 +292,7 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
269 : "r" (&v->counter), "r" (i) 292 : "r" (&v->counter), "r" (i)
270 : "cc"); 293 : "cc");
271} 294}
295#endif
272 296
273static inline void atomic64_add(u64 i, atomic64_t *v) 297static inline void atomic64_add(u64 i, atomic64_t *v)
274{ 298{
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e1489c54cd12..bff71388e72a 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -363,4 +363,79 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
363 flush_cache_all(); 363 flush_cache_all();
364} 364}
365 365
366/*
367 * Memory synchronization helpers for mixed cached vs non cached accesses.
368 *
369 * Some synchronization algorithms have to set states in memory with the
370 * cache enabled or disabled depending on the code path. It is crucial
371 * to always ensure proper cache maintenance to update main memory right
372 * away in that case.
373 *
374 * Any cached write must be followed by a cache clean operation.
375 * Any cached read must be preceded by a cache invalidate operation.
376 * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
377 * operation is needed to avoid discarding possible concurrent writes to the
378 * accessed memory.
379 *
380 * Also, in order to prevent a cached writer from interfering with an
381 * adjacent non-cached writer, each state variable must be located to
382 * a separate cache line.
383 */
384
385/*
386 * This needs to be >= the max cache writeback size of all
387 * supported platforms included in the current kernel configuration.
388 * This is used to align state variables to their own cache lines.
389 */
390#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */
391#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
392
393/*
394 * There is no __cpuc_clean_dcache_area but we use it anyway for
395 * code intent clarity, and alias it to __cpuc_flush_dcache_area.
396 */
397#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
398
399/*
400 * Ensure preceding writes to *p by this CPU are visible to
401 * subsequent reads by other CPUs:
402 */
403static inline void __sync_cache_range_w(volatile void *p, size_t size)
404{
405 char *_p = (char *)p;
406
407 __cpuc_clean_dcache_area(_p, size);
408 outer_clean_range(__pa(_p), __pa(_p + size));
409}
410
411/*
412 * Ensure preceding writes to *p by other CPUs are visible to
413 * subsequent reads by this CPU. We must be careful not to
414 * discard data simultaneously written by another CPU, hence the
415 * usage of flush rather than invalidate operations.
416 */
417static inline void __sync_cache_range_r(volatile void *p, size_t size)
418{
419 char *_p = (char *)p;
420
421#ifdef CONFIG_OUTER_CACHE
422 if (outer_cache.flush_range) {
423 /*
424 * Ensure dirty data migrated from other CPUs into our cache
425 * are cleaned out safely before the outer cache is cleaned:
426 */
427 __cpuc_clean_dcache_area(_p, size);
428
429 /* Clean and invalidate stale data for *p from outer ... */
430 outer_flush_range(__pa(_p), __pa(_p + size));
431 }
432#endif
433
434 /* ... and inner cache: */
435 __cpuc_flush_dcache_area(_p, size);
436}
437
438#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
439#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
440
366#endif 441#endif
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 5ef4d8015a60..1f3262e99d81 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -42,6 +42,8 @@
42#define vectors_high() (0) 42#define vectors_high() (0)
43#endif 43#endif
44 44
45#ifdef CONFIG_CPU_CP15
46
45extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ 47extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
46extern unsigned long cr_alignment; /* defined in entry-armv.S */ 48extern unsigned long cr_alignment; /* defined in entry-armv.S */
47 49
@@ -82,6 +84,18 @@ static inline void set_copro_access(unsigned int val)
82 isb(); 84 isb();
83} 85}
84 86
85#endif 87#else /* ifdef CONFIG_CPU_CP15 */
88
89/*
90 * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the
91 * minds of the developers). Yielding 0 for machines without a cp15 (and making
92 * it read-only) is fine for most cases and saves quite some #ifdeffery.
93 */
94#define cr_no_alignment UL(0)
95#define cr_alignment UL(0)
96
97#endif /* ifdef CONFIG_CPU_CP15 / else */
98
99#endif /* ifndef __ASSEMBLY__ */
86 100
87#endif 101#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index ad41ec2471e8..7652712d1d14 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -38,6 +38,24 @@
38#define MPIDR_AFFINITY_LEVEL(mpidr, level) \ 38#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
39 ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) 39 ((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
40 40
41#define ARM_CPU_IMP_ARM 0x41
42#define ARM_CPU_IMP_INTEL 0x69
43
44#define ARM_CPU_PART_ARM1136 0xB360
45#define ARM_CPU_PART_ARM1156 0xB560
46#define ARM_CPU_PART_ARM1176 0xB760
47#define ARM_CPU_PART_ARM11MPCORE 0xB020
48#define ARM_CPU_PART_CORTEX_A8 0xC080
49#define ARM_CPU_PART_CORTEX_A9 0xC090
50#define ARM_CPU_PART_CORTEX_A5 0xC050
51#define ARM_CPU_PART_CORTEX_A15 0xC0F0
52#define ARM_CPU_PART_CORTEX_A7 0xC070
53
54#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
55#define ARM_CPU_XSCALE_ARCH_V1 0x2000
56#define ARM_CPU_XSCALE_ARCH_V2 0x4000
57#define ARM_CPU_XSCALE_ARCH_V3 0x6000
58
41extern unsigned int processor_id; 59extern unsigned int processor_id;
42 60
43#ifdef CONFIG_CPU_CP15 61#ifdef CONFIG_CPU_CP15
@@ -50,6 +68,7 @@ extern unsigned int processor_id;
50 : "cc"); \ 68 : "cc"); \
51 __val; \ 69 __val; \
52 }) 70 })
71
53#define read_cpuid_ext(ext_reg) \ 72#define read_cpuid_ext(ext_reg) \
54 ({ \ 73 ({ \
55 unsigned int __val; \ 74 unsigned int __val; \
@@ -59,29 +78,24 @@ extern unsigned int processor_id;
59 : "cc"); \ 78 : "cc"); \
60 __val; \ 79 __val; \
61 }) 80 })
62#else
63#define read_cpuid(reg) (processor_id)
64#define read_cpuid_ext(reg) 0
65#endif
66 81
67#define ARM_CPU_IMP_ARM 0x41 82#else /* ifdef CONFIG_CPU_CP15 */
68#define ARM_CPU_IMP_INTEL 0x69
69 83
70#define ARM_CPU_PART_ARM1136 0xB360 84/*
71#define ARM_CPU_PART_ARM1156 0xB560 85 * read_cpuid and read_cpuid_ext should only ever be called on machines that
72#define ARM_CPU_PART_ARM1176 0xB760 86 * have cp15 so warn on other usages.
73#define ARM_CPU_PART_ARM11MPCORE 0xB020 87 */
74#define ARM_CPU_PART_CORTEX_A8 0xC080 88#define read_cpuid(reg) \
75#define ARM_CPU_PART_CORTEX_A9 0xC090 89 ({ \
76#define ARM_CPU_PART_CORTEX_A5 0xC050 90 WARN_ON_ONCE(1); \
77#define ARM_CPU_PART_CORTEX_A15 0xC0F0 91 0; \
78#define ARM_CPU_PART_CORTEX_A7 0xC070 92 })
79 93
80#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 94#define read_cpuid_ext(reg) read_cpuid(reg)
81#define ARM_CPU_XSCALE_ARCH_V1 0x2000 95
82#define ARM_CPU_XSCALE_ARCH_V2 0x4000 96#endif /* ifdef CONFIG_CPU_CP15 / else */
83#define ARM_CPU_XSCALE_ARCH_V3 0x6000
84 97
98#ifdef CONFIG_CPU_CP15
85/* 99/*
86 * The CPU ID never changes at run time, so we might as well tell the 100 * The CPU ID never changes at run time, so we might as well tell the
87 * compiler that it's constant. Use this function to read the CPU ID 101 * compiler that it's constant. Use this function to read the CPU ID
@@ -92,6 +106,15 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
92 return read_cpuid(CPUID_ID); 106 return read_cpuid(CPUID_ID);
93} 107}
94 108
109#else /* ifdef CONFIG_CPU_CP15 */
110
111static inline unsigned int __attribute_const__ read_cpuid_id(void)
112{
113 return processor_id;
114}
115
116#endif /* ifdef CONFIG_CPU_CP15 / else */
117
95static inline unsigned int __attribute_const__ read_cpuid_implementor(void) 118static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
96{ 119{
97 return (read_cpuid_id() & 0xFF000000) >> 24; 120 return (read_cpuid_id() & 0xFF000000) >> 24;
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index cca9f15704ed..ea289e1435e7 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -19,14 +19,6 @@
19#undef _CACHE 19#undef _CACHE
20#undef MULTI_CACHE 20#undef MULTI_CACHE
21 21
22#if defined(CONFIG_CPU_CACHE_V3)
23# ifdef _CACHE
24# define MULTI_CACHE 1
25# else
26# define _CACHE v3
27# endif
28#endif
29
30#if defined(CONFIG_CPU_CACHE_V4) 22#if defined(CONFIG_CPU_CACHE_V4)
31# ifdef _CACHE 23# ifdef _CACHE
32# define MULTI_CACHE 1 24# define MULTI_CACHE 1
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
index 8cacbcda76da..b6e9f2c108b5 100644
--- a/arch/arm/include/asm/glue-df.h
+++ b/arch/arm/include/asm/glue-df.h
@@ -18,12 +18,12 @@
18 * ================ 18 * ================
19 * 19 *
20 * We have the following to choose from: 20 * We have the following to choose from:
21 * arm6 - ARM6 style
22 * arm7 - ARM7 style 21 * arm7 - ARM7 style
23 * v4_early - ARMv4 without Thumb early abort handler 22 * v4_early - ARMv4 without Thumb early abort handler
24 * v4t_late - ARMv4 with Thumb late abort handler 23 * v4t_late - ARMv4 with Thumb late abort handler
25 * v4t_early - ARMv4 with Thumb early abort handler 24 * v4t_early - ARMv4 with Thumb early abort handler
26 * v5tej_early - ARMv5 with Thumb and Java early abort handler 25 * v5t_early - ARMv5 with Thumb early abort handler
26 * v5tj_early - ARMv5 with Thumb and Java early abort handler
27 * xscale - ARMv5 with Thumb with Xscale extensions 27 * xscale - ARMv5 with Thumb with Xscale extensions
28 * v6_early - ARMv6 generic early abort handler 28 * v6_early - ARMv6 generic early abort handler
29 * v7_early - ARMv7 generic early abort handler 29 * v7_early - ARMv7 generic early abort handler
@@ -39,19 +39,19 @@
39# endif 39# endif
40#endif 40#endif
41 41
42#ifdef CONFIG_CPU_ABRT_LV4T 42#ifdef CONFIG_CPU_ABRT_EV4
43# ifdef CPU_DABORT_HANDLER 43# ifdef CPU_DABORT_HANDLER
44# define MULTI_DABORT 1 44# define MULTI_DABORT 1
45# else 45# else
46# define CPU_DABORT_HANDLER v4t_late_abort 46# define CPU_DABORT_HANDLER v4_early_abort
47# endif 47# endif
48#endif 48#endif
49 49
50#ifdef CONFIG_CPU_ABRT_EV4 50#ifdef CONFIG_CPU_ABRT_LV4T
51# ifdef CPU_DABORT_HANDLER 51# ifdef CPU_DABORT_HANDLER
52# define MULTI_DABORT 1 52# define MULTI_DABORT 1
53# else 53# else
54# define CPU_DABORT_HANDLER v4_early_abort 54# define CPU_DABORT_HANDLER v4t_late_abort
55# endif 55# endif
56#endif 56#endif
57 57
@@ -63,19 +63,19 @@
63# endif 63# endif
64#endif 64#endif
65 65
66#ifdef CONFIG_CPU_ABRT_EV5TJ 66#ifdef CONFIG_CPU_ABRT_EV5T
67# ifdef CPU_DABORT_HANDLER 67# ifdef CPU_DABORT_HANDLER
68# define MULTI_DABORT 1 68# define MULTI_DABORT 1
69# else 69# else
70# define CPU_DABORT_HANDLER v5tj_early_abort 70# define CPU_DABORT_HANDLER v5t_early_abort
71# endif 71# endif
72#endif 72#endif
73 73
74#ifdef CONFIG_CPU_ABRT_EV5T 74#ifdef CONFIG_CPU_ABRT_EV5TJ
75# ifdef CPU_DABORT_HANDLER 75# ifdef CPU_DABORT_HANDLER
76# define MULTI_DABORT 1 76# define MULTI_DABORT 1
77# else 77# else
78# define CPU_DABORT_HANDLER v5t_early_abort 78# define CPU_DABORT_HANDLER v5tj_early_abort
79# endif 79# endif
80#endif 80#endif
81 81
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
index 02fe2fbe2477..ed94b1a366ae 100644
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ b/arch/arm/include/asm/hardware/iop3xx.h
@@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void);
37 * IOP3XX processor registers 37 * IOP3XX processor registers
38 */ 38 */
39#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000 39#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000
40#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000 40#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000
41#define IOP3XX_PERIPHERAL_SIZE 0x00002000 41#define IOP3XX_PERIPHERAL_SIZE 0x00002000
42#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\ 42#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
43 IOP3XX_PERIPHERAL_SIZE - 1) 43 IOP3XX_PERIPHERAL_SIZE - 1)
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 7c3d813e15df..124623e5ef14 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -211,4 +211,8 @@
211 211
212#define HSR_HVC_IMM_MASK ((1UL << 16) - 1) 212#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
213 213
214#define HSR_DABT_S1PTW (1U << 7)
215#define HSR_DABT_CM (1U << 8)
216#define HSR_DABT_EA (1U << 9)
217
214#endif /* __ARM_KVM_ARM_H__ */ 218#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index e4956f4e23e1..18d50322a9e2 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[];
75extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 75extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
76 76
77extern void __kvm_flush_vm_context(void); 77extern void __kvm_flush_vm_context(void);
78extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 78extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
79 79
80extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 80extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
81#endif 81#endif
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index fd611996bfb5..82b4babead2c 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -22,11 +22,12 @@
22#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
23#include <asm/kvm_asm.h> 23#include <asm/kvm_asm.h>
24#include <asm/kvm_mmio.h> 24#include <asm/kvm_mmio.h>
25#include <asm/kvm_arm.h>
25 26
26u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); 27unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
27u32 *vcpu_spsr(struct kvm_vcpu *vcpu); 28unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
28 29
29int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); 30bool kvm_condition_valid(struct kvm_vcpu *vcpu);
30void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); 31void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
31void kvm_inject_undefined(struct kvm_vcpu *vcpu); 32void kvm_inject_undefined(struct kvm_vcpu *vcpu);
32void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 33void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
@@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
37 return 1; 38 return 1;
38} 39}
39 40
40static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) 41static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
41{ 42{
42 return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc; 43 return &vcpu->arch.regs.usr_regs.ARM_pc;
43} 44}
44 45
45static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu) 46static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
46{ 47{
47 return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr; 48 return &vcpu->arch.regs.usr_regs.ARM_cpsr;
48} 49}
49 50
50static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) 51static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
@@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
69 return reg == 15; 70 return reg == 15;
70} 71}
71 72
73static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
74{
75 return vcpu->arch.fault.hsr;
76}
77
78static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
79{
80 return vcpu->arch.fault.hxfar;
81}
82
83static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
84{
85 return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
86}
87
88static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu)
89{
90 return vcpu->arch.fault.hyp_pc;
91}
92
93static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
94{
95 return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
96}
97
98static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
99{
100 return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
101}
102
103static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
104{
105 return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
106}
107
108static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
109{
110 return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
111}
112
113static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
114{
115 return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
116}
117
118static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
119{
120 return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
121}
122
123/* Get Access Size from a data abort */
124static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
125{
126 switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
127 case 0:
128 return 1;
129 case 1:
130 return 2;
131 case 2:
132 return 4;
133 default:
134 kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
135 return -EFAULT;
136 }
137}
138
139/* This one is not specific to Data Abort */
140static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
141{
142 return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
143}
144
145static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
146{
147 return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
148}
149
150static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
151{
152 return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
153}
154
155static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
156{
157 return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
158}
159
160static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
161{
162 return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
163}
164
72#endif /* __ARM_KVM_EMULATE_H__ */ 165#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index d1736a53b12d..0c4e643d939e 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache {
80 void *objects[KVM_NR_MEM_OBJS]; 80 void *objects[KVM_NR_MEM_OBJS];
81}; 81};
82 82
83struct kvm_vcpu_fault_info {
84 u32 hsr; /* Hyp Syndrome Register */
85 u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
86 u32 hpfar; /* Hyp IPA Fault Address Register */
87 u32 hyp_pc; /* PC when exception was taken from Hyp mode */
88};
89
90typedef struct vfp_hard_struct kvm_kernel_vfp_t;
91
83struct kvm_vcpu_arch { 92struct kvm_vcpu_arch {
84 struct kvm_regs regs; 93 struct kvm_regs regs;
85 94
@@ -93,13 +102,11 @@ struct kvm_vcpu_arch {
93 u32 midr; 102 u32 midr;
94 103
95 /* Exception Information */ 104 /* Exception Information */
96 u32 hsr; /* Hyp Syndrome Register */ 105 struct kvm_vcpu_fault_info fault;
97 u32 hxfar; /* Hyp Data/Inst Fault Address Register */
98 u32 hpfar; /* Hyp IPA Fault Address Register */
99 106
100 /* Floating point registers (VFP and Advanced SIMD/NEON) */ 107 /* Floating point registers (VFP and Advanced SIMD/NEON) */
101 struct vfp_hard_struct vfp_guest; 108 kvm_kernel_vfp_t vfp_guest;
102 struct vfp_hard_struct *vfp_host; 109 kvm_kernel_vfp_t *vfp_host;
103 110
104 /* VGIC state */ 111 /* VGIC state */
105 struct vgic_cpu vgic_cpu; 112 struct vgic_cpu vgic_cpu;
@@ -122,9 +129,6 @@ struct kvm_vcpu_arch {
122 /* Interrupt related fields */ 129 /* Interrupt related fields */
123 u32 irq_lines; /* IRQ and FIQ levels */ 130 u32 irq_lines; /* IRQ and FIQ levels */
124 131
125 /* Hyp exception information */
126 u32 hyp_pc; /* PC when exception was taken from Hyp mode */
127
128 /* Cache some mmu pages needed inside spinlock regions */ 132 /* Cache some mmu pages needed inside spinlock regions */
129 struct kvm_mmu_memory_cache mmu_page_cache; 133 struct kvm_mmu_memory_cache mmu_page_cache;
130 134
@@ -181,4 +185,26 @@ struct kvm_one_reg;
181int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 185int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
182int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 186int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
183 187
188int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
189 int exception_index);
190
191static inline void __cpu_init_hyp_mode(unsigned long long pgd_ptr,
192 unsigned long hyp_stack_ptr,
193 unsigned long vector_ptr)
194{
195 unsigned long pgd_low, pgd_high;
196
197 pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
198 pgd_high = (pgd_ptr >> 32ULL);
199
200 /*
201 * Call initialization code, and switch to the full blown
202 * HYP code. The init code doesn't need to preserve these registers as
203 * r1-r3 and r12 are already callee save according to the AAPCS.
204 * Note that we slightly misuse the prototype by casing the pgd_low to
205 * a void *.
206 */
207 kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
208}
209
184#endif /* __ARM_KVM_HOST_H__ */ 210#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 421a20b34874..970f3b5fa109 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -19,6 +19,18 @@
19#ifndef __ARM_KVM_MMU_H__ 19#ifndef __ARM_KVM_MMU_H__
20#define __ARM_KVM_MMU_H__ 20#define __ARM_KVM_MMU_H__
21 21
22#include <asm/cacheflush.h>
23#include <asm/pgalloc.h>
24#include <asm/idmap.h>
25
26/*
27 * We directly use the kernel VA for the HYP, as we can directly share
28 * the mapping (HTTBR "covers" TTBR1).
29 */
30#define HYP_PAGE_OFFSET_MASK (~0UL)
31#define HYP_PAGE_OFFSET PAGE_OFFSET
32#define KERN_TO_HYP(kva) (kva)
33
22int create_hyp_mappings(void *from, void *to); 34int create_hyp_mappings(void *from, void *to);
23int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 35int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
24void free_hyp_pmds(void); 36void free_hyp_pmds(void);
@@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void);
36int kvm_mmu_init(void); 48int kvm_mmu_init(void);
37void kvm_clear_hyp_idmap(void); 49void kvm_clear_hyp_idmap(void);
38 50
51static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
52{
53 pte_val(*pte) = new_pte;
54 /*
55 * flush_pmd_entry just takes a void pointer and cleans the necessary
56 * cache entries, so we can reuse the function for ptes.
57 */
58 flush_pmd_entry(pte);
59}
60
39static inline bool kvm_is_write_fault(unsigned long hsr) 61static inline bool kvm_is_write_fault(unsigned long hsr)
40{ 62{
41 unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; 63 unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
@@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr)
47 return true; 69 return true;
48} 70}
49 71
72static inline void kvm_clean_pgd(pgd_t *pgd)
73{
74 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
75}
76
77static inline void kvm_clean_pmd_entry(pmd_t *pmd)
78{
79 clean_pmd_entry(pmd);
80}
81
82static inline void kvm_clean_pte(pte_t *pte)
83{
84 clean_pte_table(pte);
85}
86
87static inline void kvm_set_s2pte_writable(pte_t *pte)
88{
89 pte_val(*pte) |= L_PTE_S2_RDWR;
90}
91
92struct kvm;
93
94static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
95{
96 /*
97 * If we are going to insert an instruction page and the icache is
98 * either VIPT or PIPT, there is a potential problem where the host
99 * (or another VM) may have used the same page as this guest, and we
100 * read incorrect data from the icache. If we're using a PIPT cache,
101 * we can invalidate just that page, but if we are using a VIPT cache
102 * we need to invalidate the entire icache - damn shame - as written
103 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
104 *
105 * VIVT caches are tagged using both the ASID and the VMID and doesn't
106 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
107 */
108 if (icache_is_pipt()) {
109 unsigned long hva = gfn_to_hva(kvm, gfn);
110 __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
111 } else if (!icache_is_vivt_asid_tagged()) {
112 /* any kind of VIPT cache */
113 __flush_icache_all();
114 }
115}
116
50#endif /* __ARM_KVM_MMU_H__ */ 117#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h
index ab97207d9cd3..343744e4809c 100644
--- a/arch/arm/include/asm/kvm_vgic.h
+++ b/arch/arm/include/asm/kvm_vgic.h
@@ -21,7 +21,6 @@
21 21
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/kvm.h> 23#include <linux/kvm.h>
24#include <linux/kvm_host.h>
25#include <linux/irqreturn.h> 24#include <linux/irqreturn.h>
26#include <linux/spinlock.h> 25#include <linux/spinlock.h>
27#include <linux/types.h> 26#include <linux/types.h>
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 5cf2e979b4be..7d2c3c843801 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -30,6 +30,11 @@ struct hw_pci {
30 void (*postinit)(void); 30 void (*postinit)(void);
31 u8 (*swizzle)(struct pci_dev *dev, u8 *pin); 31 u8 (*swizzle)(struct pci_dev *dev, u8 *pin);
32 int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); 32 int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
33 resource_size_t (*align_resource)(struct pci_dev *dev,
34 const struct resource *res,
35 resource_size_t start,
36 resource_size_t size,
37 resource_size_t align);
33}; 38};
34 39
35/* 40/*
@@ -51,6 +56,12 @@ struct pci_sys_data {
51 u8 (*swizzle)(struct pci_dev *, u8 *); 56 u8 (*swizzle)(struct pci_dev *, u8 *);
52 /* IRQ mapping */ 57 /* IRQ mapping */
53 int (*map_irq)(const struct pci_dev *, u8, u8); 58 int (*map_irq)(const struct pci_dev *, u8, u8);
59 /* Resource alignement requirements */
60 resource_size_t (*align_resource)(struct pci_dev *dev,
61 const struct resource *res,
62 resource_size_t start,
63 resource_size_t size,
64 resource_size_t align);
54 void *private_data; /* platform controller private data */ 65 void *private_data; /* platform controller private data */
55}; 66};
56 67
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
new file mode 100644
index 000000000000..0f7b7620e9a5
--- /dev/null
+++ b/arch/arm/include/asm/mcpm.h
@@ -0,0 +1,209 @@
1/*
2 * arch/arm/include/asm/mcpm.h
3 *
4 * Created by: Nicolas Pitre, April 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef MCPM_H
13#define MCPM_H
14
15/*
16 * Maximum number of possible clusters / CPUs per cluster.
17 *
18 * This should be sufficient for quite a while, while keeping the
19 * (assembly) code simpler. When this starts to grow then we'll have
20 * to consider dynamic allocation.
21 */
22#define MAX_CPUS_PER_CLUSTER 4
23#define MAX_NR_CLUSTERS 2
24
25#ifndef __ASSEMBLY__
26
27#include <linux/types.h>
28#include <asm/cacheflush.h>
29
30/*
31 * Platform specific code should use this symbol to set up secondary
32 * entry location for processors to use when released from reset.
33 */
34extern void mcpm_entry_point(void);
35
36/*
37 * This is used to indicate where the given CPU from given cluster should
38 * branch once it is ready to re-enter the kernel using ptr, or NULL if it
39 * should be gated. A gated CPU is held in a WFE loop until its vector
40 * becomes non NULL.
41 */
42void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
43
44/*
45 * CPU/cluster power operations API for higher subsystems to use.
46 */
47
48/**
49 * mcpm_cpu_power_up - make given CPU in given cluster runable
50 *
51 * @cpu: CPU number within given cluster
52 * @cluster: cluster number for the CPU
53 *
54 * The identified CPU is brought out of reset. If the cluster was powered
55 * down then it is brought up as well, taking care not to let the other CPUs
56 * in the cluster run, and ensuring appropriate cluster setup.
57 *
58 * Caller must ensure the appropriate entry vector is initialized with
59 * mcpm_set_entry_vector() prior to calling this.
60 *
61 * This must be called in a sleepable context. However, the implementation
62 * is strongly encouraged to return early and let the operation happen
63 * asynchronously, especially when significant delays are expected.
64 *
65 * If the operation cannot be performed then an error code is returned.
66 */
67int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
68
69/**
70 * mcpm_cpu_power_down - power the calling CPU down
71 *
72 * The calling CPU is powered down.
73 *
74 * If this CPU is found to be the "last man standing" in the cluster
75 * then the cluster is prepared for power-down too.
76 *
77 * This must be called with interrupts disabled.
78 *
79 * This does not return. Re-entry in the kernel is expected via
80 * mcpm_entry_point.
81 */
82void mcpm_cpu_power_down(void);
83
84/**
85 * mcpm_cpu_suspend - bring the calling CPU in a suspended state
86 *
87 * @expected_residency: duration in microseconds the CPU is expected
88 * to remain suspended, or 0 if unknown/infinity.
89 *
90 * The calling CPU is suspended. The expected residency argument is used
91 * as a hint by the platform specific backend to implement the appropriate
92 * sleep state level according to the knowledge it has on wake-up latency
93 * for the given hardware.
94 *
95 * If this CPU is found to be the "last man standing" in the cluster
96 * then the cluster may be prepared for power-down too, if the expected
97 * residency makes it worthwhile.
98 *
99 * This must be called with interrupts disabled.
100 *
101 * This does not return. Re-entry in the kernel is expected via
102 * mcpm_entry_point.
103 */
104void mcpm_cpu_suspend(u64 expected_residency);
105
106/**
107 * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
108 *
109 * This lets the platform specific backend code perform needed housekeeping
110 * work. This must be called by the newly activated CPU as soon as it is
111 * fully operational in kernel space, before it enables interrupts.
112 *
113 * If the operation cannot be performed then an error code is returned.
114 */
115int mcpm_cpu_powered_up(void);
116
117/*
118 * Platform specific methods used in the implementation of the above API.
119 */
120struct mcpm_platform_ops {
121 int (*power_up)(unsigned int cpu, unsigned int cluster);
122 void (*power_down)(void);
123 void (*suspend)(u64);
124 void (*powered_up)(void);
125};
126
127/**
128 * mcpm_platform_register - register platform specific power methods
129 *
130 * @ops: mcpm_platform_ops structure to register
131 *
132 * An error is returned if the registration has been done previously.
133 */
134int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
135
136/* Synchronisation structures for coordinating safe cluster setup/teardown: */
137
138/*
139 * When modifying this structure, make sure you update the MCPM_SYNC_ defines
140 * to match.
141 */
142struct mcpm_sync_struct {
143 /* individual CPU states */
144 struct {
145 s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE);
146 } cpus[MAX_CPUS_PER_CLUSTER];
147
148 /* cluster state */
149 s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE);
150
151 /* inbound-side state */
152 s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
153};
154
155struct sync_struct {
156 struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
157};
158
159extern unsigned long sync_phys; /* physical address of *mcpm_sync */
160
161void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
162void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
163void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
164bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
165int __mcpm_cluster_state(unsigned int cluster);
166
167int __init mcpm_sync_init(
168 void (*power_up_setup)(unsigned int affinity_level));
169
170void __init mcpm_smp_set_ops(void);
171
172#else
173
174/*
175 * asm-offsets.h causes trouble when included in .c files, and cacheflush.h
176 * cannot be included in asm files. Let's work around the conflict like this.
177 */
178#include <asm/asm-offsets.h>
179#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE
180
181#endif /* ! __ASSEMBLY__ */
182
183/* Definitions for mcpm_sync_struct */
184#define CPU_DOWN 0x11
185#define CPU_COMING_UP 0x12
186#define CPU_UP 0x13
187#define CPU_GOING_DOWN 0x14
188
189#define CLUSTER_DOWN 0x21
190#define CLUSTER_UP 0x22
191#define CLUSTER_GOING_DOWN 0x23
192
193#define INBOUND_NOT_COMING_UP 0x31
194#define INBOUND_COMING_UP 0x32
195
196/*
197 * Offsets for the mcpm_sync_struct members, for use in asm.
198 * We don't want to make them global to the kernel via asm-offsets.c.
199 */
200#define MCPM_SYNC_CLUSTER_CPUS 0
201#define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE
202#define MCPM_SYNC_CLUSTER_CLUSTER \
203 (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER)
204#define MCPM_SYNC_CLUSTER_INBOUND \
205 (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE)
206#define MCPM_SYNC_CLUSTER_SIZE \
207 (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE)
208
209#endif
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 6ef8afd1b64c..86b8fe398b95 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -111,7 +111,7 @@
111#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ 111#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
112#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ 112#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
113#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ 113#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
114#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */ 114#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
115 115
116/* 116/*
117 * Hyp-mode PL2 PTE definitions for LPAE. 117 * Hyp-mode PL2 PTE definitions for LPAE.
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 80d6fc4dbe4a..9bcd262a9008 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -61,6 +61,15 @@ extern void __pgd_error(const char *file, int line, pgd_t);
61#define FIRST_USER_ADDRESS PAGE_SIZE 61#define FIRST_USER_ADDRESS PAGE_SIZE
62 62
63/* 63/*
64 * Use TASK_SIZE as the ceiling argument for free_pgtables() and
65 * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
66 * page shared between user and kernel).
67 */
68#ifdef CONFIG_ARM_LPAE
69#define USER_PGTABLES_CEILING TASK_SIZE
70#endif
71
72/*
64 * The pgprot_* and protection_map entries will be fixed up in runtime 73 * The pgprot_* and protection_map entries will be fixed up in runtime
65 * to include the cachable and bufferable bits based on memory policy, 74 * to include the cachable and bufferable bits based on memory policy,
66 * as well as any architecture dependent bits like global/ASID and SMP 75 * as well as any architecture dependent bits like global/ASID and SMP
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index cddda1f41f0f..1995d1a84060 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -152,6 +152,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
152#define TIF_SYSCALL_AUDIT 9 152#define TIF_SYSCALL_AUDIT 9
153#define TIF_SYSCALL_TRACEPOINT 10 153#define TIF_SYSCALL_TRACEPOINT 10
154#define TIF_SECCOMP 11 /* seccomp syscall filtering active */ 154#define TIF_SECCOMP 11 /* seccomp syscall filtering active */
155#define TIF_NOHZ 12 /* in adaptive nohz mode */
155#define TIF_USING_IWMMXT 17 156#define TIF_USING_IWMMXT 17
156#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 157#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
157#define TIF_RESTORE_SIGMASK 20 158#define TIF_RESTORE_SIGMASK 20
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 9e9c041358ca..a3625d141c1d 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -14,7 +14,6 @@
14 14
15#include <asm/glue.h> 15#include <asm/glue.h>
16 16
17#define TLB_V3_PAGE (1 << 0)
18#define TLB_V4_U_PAGE (1 << 1) 17#define TLB_V4_U_PAGE (1 << 1)
19#define TLB_V4_D_PAGE (1 << 2) 18#define TLB_V4_D_PAGE (1 << 2)
20#define TLB_V4_I_PAGE (1 << 3) 19#define TLB_V4_I_PAGE (1 << 3)
@@ -22,7 +21,6 @@
22#define TLB_V6_D_PAGE (1 << 5) 21#define TLB_V6_D_PAGE (1 << 5)
23#define TLB_V6_I_PAGE (1 << 6) 22#define TLB_V6_I_PAGE (1 << 6)
24 23
25#define TLB_V3_FULL (1 << 8)
26#define TLB_V4_U_FULL (1 << 9) 24#define TLB_V4_U_FULL (1 << 9)
27#define TLB_V4_D_FULL (1 << 10) 25#define TLB_V4_D_FULL (1 << 10)
28#define TLB_V4_I_FULL (1 << 11) 26#define TLB_V4_I_FULL (1 << 11)
@@ -52,7 +50,6 @@
52 * ============= 50 * =============
53 * 51 *
54 * We have the following to choose from: 52 * We have the following to choose from:
55 * v3 - ARMv3
56 * v4 - ARMv4 without write buffer 53 * v4 - ARMv4 without write buffer
57 * v4wb - ARMv4 with write buffer without I TLB flush entry instruction 54 * v4wb - ARMv4 with write buffer without I TLB flush entry instruction
58 * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction 55 * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
@@ -169,7 +166,7 @@
169# define v6wbi_always_flags (-1UL) 166# define v6wbi_always_flags (-1UL)
170#endif 167#endif
171 168
172#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 169#define v7wbi_tlb_flags_smp (TLB_WB | TLB_BARRIER | \
173 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ 170 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \
174 TLB_V7_UIS_ASID | TLB_V7_UIS_BP) 171 TLB_V7_UIS_ASID | TLB_V7_UIS_BP)
175#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ 172#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
@@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void)
330 if (tlb_flag(TLB_WB)) 327 if (tlb_flag(TLB_WB))
331 dsb(); 328 dsb();
332 329
333 tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
334 tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); 330 tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
335 tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); 331 tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
336 tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); 332 tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
@@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
351 if (tlb_flag(TLB_WB)) 347 if (tlb_flag(TLB_WB))
352 dsb(); 348 dsb();
353 349
354 if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { 350 if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
355 if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { 351 if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
356 tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
357 tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); 352 tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
358 tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); 353 tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
359 tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); 354 tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
@@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
385 if (tlb_flag(TLB_WB)) 380 if (tlb_flag(TLB_WB))
386 dsb(); 381 dsb();
387 382
388 if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && 383 if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
389 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 384 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
390 tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr);
391 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); 385 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
392 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); 386 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
393 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); 387 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
@@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
418 if (tlb_flag(TLB_WB)) 412 if (tlb_flag(TLB_WB))
419 dsb(); 413 dsb();
420 414
421 tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr);
422 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); 415 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
423 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); 416 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
424 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); 417 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
diff --git a/arch/arm/include/debug/uncompress.h b/arch/arm/include/debug/uncompress.h
new file mode 100644
index 000000000000..0e2949b0fae9
--- /dev/null
+++ b/arch/arm/include/debug/uncompress.h
@@ -0,0 +1,7 @@
1#ifdef CONFIG_DEBUG_UNCOMPRESS
2extern void putc(int c);
3#else
4static inline void putc(int c) {}
5#endif
6static inline void flush(void) {}
7static inline void arch_decomp_setup(void) {}
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 023bfeb367bf..c1ee007523d7 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -53,12 +53,12 @@
53#define KVM_ARM_FIQ_spsr fiq_regs[7] 53#define KVM_ARM_FIQ_spsr fiq_regs[7]
54 54
55struct kvm_regs { 55struct kvm_regs {
56 struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */ 56 struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */
57 __u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ 57 unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
58 __u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ 58 unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
59 __u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */ 59 unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */
60 __u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ 60 unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
61 __u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ 61 unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
62}; 62};
63 63
64/* Supported Processor Types */ 64/* Supported Processor Types */
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 923eec7105cf..a53efa993690 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -149,6 +149,10 @@ int main(void)
149 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); 149 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
150 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); 150 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
151 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); 151 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
152 BLANK();
153 DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
154 DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
155 BLANK();
152#ifdef CONFIG_KVM_ARM_HOST 156#ifdef CONFIG_KVM_ARM_HOST
153 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); 157 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
154 DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); 158 DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr));
@@ -165,10 +169,10 @@ int main(void)
165 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); 169 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
166 DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); 170 DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
167 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); 171 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
168 DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr)); 172 DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr));
169 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar)); 173 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
170 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar)); 174 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar));
171 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc)); 175 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
172#ifdef CONFIG_KVM_ARM_VGIC 176#ifdef CONFIG_KVM_ARM_VGIC
173 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); 177 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
174 DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); 178 DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index a1f73b502ef0..b2ed73c45489 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -462,6 +462,7 @@ static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
462 sys->busnr = busnr; 462 sys->busnr = busnr;
463 sys->swizzle = hw->swizzle; 463 sys->swizzle = hw->swizzle;
464 sys->map_irq = hw->map_irq; 464 sys->map_irq = hw->map_irq;
465 sys->align_resource = hw->align_resource;
465 INIT_LIST_HEAD(&sys->resources); 466 INIT_LIST_HEAD(&sys->resources);
466 467
467 if (hw->private_data) 468 if (hw->private_data)
@@ -574,6 +575,8 @@ char * __init pcibios_setup(char *str)
574resource_size_t pcibios_align_resource(void *data, const struct resource *res, 575resource_size_t pcibios_align_resource(void *data, const struct resource *res,
575 resource_size_t size, resource_size_t align) 576 resource_size_t size, resource_size_t align)
576{ 577{
578 struct pci_dev *dev = data;
579 struct pci_sys_data *sys = dev->sysdata;
577 resource_size_t start = res->start; 580 resource_size_t start = res->start;
578 581
579 if (res->flags & IORESOURCE_IO && start & 0x300) 582 if (res->flags & IORESOURCE_IO && start & 0x300)
@@ -581,6 +584,9 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
581 584
582 start = (start + align - 1) & ~(align - 1); 585 start = (start + align - 1) & ~(align - 1);
583 586
587 if (sys->align_resource)
588 return sys->align_resource(dev, res, start, size, align);
589
584 return start; 590 return start;
585} 591}
586 592
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 0f82098c9bfe..c66ca7e4ee91 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -192,18 +192,6 @@ __dabt_svc:
192 svc_entry 192 svc_entry
193 mov r2, sp 193 mov r2, sp
194 dabt_helper 194 dabt_helper
195
196 @
197 @ IRQs off again before pulling preserved data off the stack
198 @
199 disable_irq_notrace
200
201#ifdef CONFIG_TRACE_IRQFLAGS
202 tst r5, #PSR_I_BIT
203 bleq trace_hardirqs_on
204 tst r5, #PSR_I_BIT
205 blne trace_hardirqs_off
206#endif
207 svc_exit r5 @ return from exception 195 svc_exit r5 @ return from exception
208 UNWIND(.fnend ) 196 UNWIND(.fnend )
209ENDPROC(__dabt_svc) 197ENDPROC(__dabt_svc)
@@ -223,12 +211,7 @@ __irq_svc:
223 blne svc_preempt 211 blne svc_preempt
224#endif 212#endif
225 213
226#ifdef CONFIG_TRACE_IRQFLAGS 214 svc_exit r5, irq = 1 @ return from exception
227 @ The parent context IRQs must have been enabled to get here in
228 @ the first place, so there's no point checking the PSR I bit.
229 bl trace_hardirqs_on
230#endif
231 svc_exit r5 @ return from exception
232 UNWIND(.fnend ) 215 UNWIND(.fnend )
233ENDPROC(__irq_svc) 216ENDPROC(__irq_svc)
234 217
@@ -295,22 +278,8 @@ __und_svc_fault:
295 mov r0, sp @ struct pt_regs *regs 278 mov r0, sp @ struct pt_regs *regs
296 bl __und_fault 279 bl __und_fault
297 280
298 @
299 @ IRQs off again before pulling preserved data off the stack
300 @
301__und_svc_finish: 281__und_svc_finish:
302 disable_irq_notrace
303
304 @
305 @ restore SPSR and restart the instruction
306 @
307 ldr r5, [sp, #S_PSR] @ Get SVC cpsr 282 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
308#ifdef CONFIG_TRACE_IRQFLAGS
309 tst r5, #PSR_I_BIT
310 bleq trace_hardirqs_on
311 tst r5, #PSR_I_BIT
312 blne trace_hardirqs_off
313#endif
314 svc_exit r5 @ return from exception 283 svc_exit r5 @ return from exception
315 UNWIND(.fnend ) 284 UNWIND(.fnend )
316ENDPROC(__und_svc) 285ENDPROC(__und_svc)
@@ -320,18 +289,6 @@ __pabt_svc:
320 svc_entry 289 svc_entry
321 mov r2, sp @ regs 290 mov r2, sp @ regs
322 pabt_helper 291 pabt_helper
323
324 @
325 @ IRQs off again before pulling preserved data off the stack
326 @
327 disable_irq_notrace
328
329#ifdef CONFIG_TRACE_IRQFLAGS
330 tst r5, #PSR_I_BIT
331 bleq trace_hardirqs_on
332 tst r5, #PSR_I_BIT
333 blne trace_hardirqs_off
334#endif
335 svc_exit r5 @ return from exception 292 svc_exit r5 @ return from exception
336 UNWIND(.fnend ) 293 UNWIND(.fnend )
337ENDPROC(__pabt_svc) 294ENDPROC(__pabt_svc)
@@ -396,6 +353,7 @@ ENDPROC(__pabt_svc)
396#ifdef CONFIG_IRQSOFF_TRACER 353#ifdef CONFIG_IRQSOFF_TRACER
397 bl trace_hardirqs_off 354 bl trace_hardirqs_off
398#endif 355#endif
356 ct_user_exit save = 0
399 .endm 357 .endm
400 358
401 .macro kuser_cmpxchg_check 359 .macro kuser_cmpxchg_check
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index fefd7f971437..bc5bc0a97131 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -35,12 +35,11 @@ ret_fast_syscall:
35 ldr r1, [tsk, #TI_FLAGS] 35 ldr r1, [tsk, #TI_FLAGS]
36 tst r1, #_TIF_WORK_MASK 36 tst r1, #_TIF_WORK_MASK
37 bne fast_work_pending 37 bne fast_work_pending
38#if defined(CONFIG_IRQSOFF_TRACER)
39 asm_trace_hardirqs_on 38 asm_trace_hardirqs_on
40#endif
41 39
42 /* perform architecture specific actions before user return */ 40 /* perform architecture specific actions before user return */
43 arch_ret_to_user r1, lr 41 arch_ret_to_user r1, lr
42 ct_user_enter
44 43
45 restore_user_regs fast = 1, offset = S_OFF 44 restore_user_regs fast = 1, offset = S_OFF
46 UNWIND(.fnend ) 45 UNWIND(.fnend )
@@ -71,11 +70,11 @@ ENTRY(ret_to_user_from_irq)
71 tst r1, #_TIF_WORK_MASK 70 tst r1, #_TIF_WORK_MASK
72 bne work_pending 71 bne work_pending
73no_work_pending: 72no_work_pending:
74#if defined(CONFIG_IRQSOFF_TRACER)
75 asm_trace_hardirqs_on 73 asm_trace_hardirqs_on
76#endif 74
77 /* perform architecture specific actions before user return */ 75 /* perform architecture specific actions before user return */
78 arch_ret_to_user r1, lr 76 arch_ret_to_user r1, lr
77 ct_user_enter save = 0
79 78
80 restore_user_regs fast = 0, offset = 0 79 restore_user_regs fast = 0, offset = 0
81ENDPROC(ret_to_user_from_irq) 80ENDPROC(ret_to_user_from_irq)
@@ -406,6 +405,7 @@ ENTRY(vector_swi)
406 mcr p15, 0, ip, c1, c0 @ update control register 405 mcr p15, 0, ip, c1, c0 @ update control register
407#endif 406#endif
408 enable_irq 407 enable_irq
408 ct_user_exit
409 409
410 get_thread_info tsk 410 get_thread_info tsk
411 adr tbl, sys_call_table @ load syscall table pointer 411 adr tbl, sys_call_table @ load syscall table pointer
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 9a8531eadd3d..160f3376ba6d 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -74,7 +74,24 @@
74 .endm 74 .endm
75 75
76#ifndef CONFIG_THUMB2_KERNEL 76#ifndef CONFIG_THUMB2_KERNEL
77 .macro svc_exit, rpsr 77 .macro svc_exit, rpsr, irq = 0
78 .if \irq != 0
79 @ IRQs already off
80#ifdef CONFIG_TRACE_IRQFLAGS
81 @ The parent context IRQs must have been enabled to get here in
82 @ the first place, so there's no point checking the PSR I bit.
83 bl trace_hardirqs_on
84#endif
85 .else
86 @ IRQs off again before pulling preserved data off the stack
87 disable_irq_notrace
88#ifdef CONFIG_TRACE_IRQFLAGS
89 tst \rpsr, #PSR_I_BIT
90 bleq trace_hardirqs_on
91 tst \rpsr, #PSR_I_BIT
92 blne trace_hardirqs_off
93#endif
94 .endif
78 msr spsr_cxsf, \rpsr 95 msr spsr_cxsf, \rpsr
79#if defined(CONFIG_CPU_V6) 96#if defined(CONFIG_CPU_V6)
80 ldr r0, [sp] 97 ldr r0, [sp]
@@ -120,7 +137,24 @@
120 mov pc, \reg 137 mov pc, \reg
121 .endm 138 .endm
122#else /* CONFIG_THUMB2_KERNEL */ 139#else /* CONFIG_THUMB2_KERNEL */
123 .macro svc_exit, rpsr 140 .macro svc_exit, rpsr, irq = 0
141 .if \irq != 0
142 @ IRQs already off
143#ifdef CONFIG_TRACE_IRQFLAGS
144 @ The parent context IRQs must have been enabled to get here in
145 @ the first place, so there's no point checking the PSR I bit.
146 bl trace_hardirqs_on
147#endif
148 .else
149 @ IRQs off again before pulling preserved data off the stack
150 disable_irq_notrace
151#ifdef CONFIG_TRACE_IRQFLAGS
152 tst \rpsr, #PSR_I_BIT
153 bleq trace_hardirqs_on
154 tst \rpsr, #PSR_I_BIT
155 blne trace_hardirqs_off
156#endif
157 .endif
124 ldr lr, [sp, #S_SP] @ top of the stack 158 ldr lr, [sp, #S_SP] @ top of the stack
125 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc 159 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
126 clrex @ clear the exclusive monitor 160 clrex @ clear the exclusive monitor
@@ -164,6 +198,34 @@
164#endif /* !CONFIG_THUMB2_KERNEL */ 198#endif /* !CONFIG_THUMB2_KERNEL */
165 199
166/* 200/*
201 * Context tracking subsystem. Used to instrument transitions
202 * between user and kernel mode.
203 */
204 .macro ct_user_exit, save = 1
205#ifdef CONFIG_CONTEXT_TRACKING
206 .if \save
207 stmdb sp!, {r0-r3, ip, lr}
208 bl user_exit
209 ldmia sp!, {r0-r3, ip, lr}
210 .else
211 bl user_exit
212 .endif
213#endif
214 .endm
215
216 .macro ct_user_enter, save = 1
217#ifdef CONFIG_CONTEXT_TRACKING
218 .if \save
219 stmdb sp!, {r0-r3, ip, lr}
220 bl user_enter
221 ldmia sp!, {r0-r3, ip, lr}
222 .else
223 bl user_enter
224 .endif
225#endif
226 .endm
227
228/*
167 * These are the registers used in the syscall handler, and allow us to 229 * These are the registers used in the syscall handler, and allow us to
168 * have in theory up to 7 arguments to a function - r0 to r6. 230 * have in theory up to 7 arguments to a function - r0 to r6.
169 * 231 *
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 854bd22380d3..5b391a689b47 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -98,8 +98,9 @@ __mmap_switched:
98 str r9, [r4] @ Save processor ID 98 str r9, [r4] @ Save processor ID
99 str r1, [r5] @ Save machine type 99 str r1, [r5] @ Save machine type
100 str r2, [r6] @ Save atags pointer 100 str r2, [r6] @ Save atags pointer
101 bic r4, r0, #CR_A @ Clear 'A' bit 101 cmp r7, #0
102 stmia r7, {r0, r4} @ Save control register values 102 bicne r4, r0, #CR_A @ Clear 'A' bit
103 stmneia r7, {r0, r4} @ Save control register values
103 b start_kernel 104 b start_kernel
104ENDPROC(__mmap_switched) 105ENDPROC(__mmap_switched)
105 106
@@ -113,7 +114,11 @@ __mmap_switched_data:
113 .long processor_id @ r4 114 .long processor_id @ r4
114 .long __machine_arch_type @ r5 115 .long __machine_arch_type @ r5
115 .long __atags_pointer @ r6 116 .long __atags_pointer @ r6
117#ifdef CONFIG_CPU_CP15
116 .long cr_alignment @ r7 118 .long cr_alignment @ r7
119#else
120 .long 0 @ r7
121#endif
117 .long init_thread_union + THREAD_START_SP @ sp 122 .long init_thread_union + THREAD_START_SP @ sp
118 .size __mmap_switched_data, . - __mmap_switched_data 123 .size __mmap_switched_data, . - __mmap_switched_data
119 124
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 2c228a07e58c..6a2e09c952c7 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -32,15 +32,21 @@
32 * numbers for r1. 32 * numbers for r1.
33 * 33 *
34 */ 34 */
35 .arm
36 35
37 __HEAD 36 __HEAD
37
38#ifdef CONFIG_CPU_THUMBONLY
39 .thumb
40ENTRY(stext)
41#else
42 .arm
38ENTRY(stext) 43ENTRY(stext)
39 44
40 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. 45 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
41 THUMB( bx r9 ) @ If this is a Thumb-2 kernel, 46 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
42 THUMB( .thumb ) @ switch to Thumb now. 47 THUMB( .thumb ) @ switch to Thumb now.
43 THUMB(1: ) 48 THUMB(1: )
49#endif
44 50
45 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 51 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
46 @ and irqs disabled 52 @ and irqs disabled
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 5dc1aa6f0f7d..1fd749ee4a1b 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
1043 return NOTIFY_OK; 1043 return NOTIFY_OK;
1044} 1044}
1045 1045
1046static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = { 1046static struct notifier_block dbg_cpu_pm_nb = {
1047 .notifier_call = dbg_cpu_pm_notify, 1047 .notifier_call = dbg_cpu_pm_notify,
1048}; 1048};
1049 1049
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 146157dfe27c..8c3094d0f7b7 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events,
253 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 253 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
254 struct pmu *leader_pmu = event->group_leader->pmu; 254 struct pmu *leader_pmu = event->group_leader->pmu;
255 255
256 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) 256 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
257 return 1;
258
259 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
257 return 1; 260 return 1;
258 261
259 return armpmu->get_event_idx(hw_events, event) >= 0; 262 return armpmu->get_event_idx(hw_events, event) >= 0;
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 8085417555dd..fafedd86885d 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -26,7 +26,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
26 struct return_address_data *data = d; 26 struct return_address_data *data = d;
27 27
28 if (!data->level) { 28 if (!data->level) {
29 data->addr = (void *)frame->lr; 29 data->addr = (void *)frame->pc;
30 30
31 return 1; 31 return 1;
32 } else { 32 } else {
@@ -41,7 +41,8 @@ void *return_address(unsigned int level)
41 struct stackframe frame; 41 struct stackframe frame;
42 register unsigned long current_sp asm ("sp"); 42 register unsigned long current_sp asm ("sp");
43 43
44 data.level = level + 1; 44 data.level = level + 2;
45 data.addr = NULL;
45 46
46 frame.fp = (unsigned long)__builtin_frame_address(0); 47 frame.fp = (unsigned long)__builtin_frame_address(0);
47 frame.sp = current_sp; 48 frame.sp = current_sp;
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index bd6f56b9ec21..59d2adb764a9 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void)
45 45
46static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; 46static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
47 47
48static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) 48static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
49{ 49{
50 return (cyc * mult) >> shift; 50 return (cyc * mult) >> shift;
51} 51}
52 52
53static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) 53static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
54{ 54{
55 u64 epoch_ns; 55 u64 epoch_ns;
56 u32 epoch_cyc; 56 u32 epoch_cyc;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index d343a6c3a6d1..728007c4a2b7 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -56,7 +56,6 @@
56#include <asm/virt.h> 56#include <asm/virt.h>
57 57
58#include "atags.h" 58#include "atags.h"
59#include "tcm.h"
60 59
61 60
62#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) 61#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
@@ -291,10 +290,10 @@ static int cpu_has_aliasing_icache(unsigned int arch)
291 290
292static void __init cacheid_init(void) 291static void __init cacheid_init(void)
293{ 292{
294 unsigned int cachetype = read_cpuid_cachetype();
295 unsigned int arch = cpu_architecture(); 293 unsigned int arch = cpu_architecture();
296 294
297 if (arch >= CPU_ARCH_ARMv6) { 295 if (arch >= CPU_ARCH_ARMv6) {
296 unsigned int cachetype = read_cpuid_cachetype();
298 if ((cachetype & (7 << 29)) == 4 << 29) { 297 if ((cachetype & (7 << 29)) == 4 << 29) {
299 /* ARMv7 register format */ 298 /* ARMv7 register format */
300 arch = CPU_ARCH_ARMv7; 299 arch = CPU_ARCH_ARMv7;
@@ -390,7 +389,7 @@ static void __init feat_v6_fixup(void)
390 * 389 *
391 * cpu_init sets up the per-CPU stacks. 390 * cpu_init sets up the per-CPU stacks.
392 */ 391 */
393void cpu_init(void) 392void notrace cpu_init(void)
394{ 393{
395 unsigned int cpu = smp_processor_id(); 394 unsigned int cpu = smp_processor_id();
396 struct stack *stk = &stacks[cpu]; 395 struct stack *stk = &stacks[cpu];
@@ -798,8 +797,6 @@ void __init setup_arch(char **cmdline_p)
798 797
799 reserve_crashkernel(); 798 reserve_crashkernel();
800 799
801 tcm_init();
802
803#ifdef CONFIG_MULTI_IRQ_HANDLER 800#ifdef CONFIG_MULTI_IRQ_HANDLER
804 handle_arch_irq = mdesc->handle_irq; 801 handle_arch_irq = mdesc->handle_irq;
805#endif 802#endif
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 45eac87ed66a..5bc1a63284e3 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -41,7 +41,7 @@ void scu_enable(void __iomem *scu_base)
41 41
42#ifdef CONFIG_ARM_ERRATA_764369 42#ifdef CONFIG_ARM_ERRATA_764369
43 /* Cortex-A9 only */ 43 /* Cortex-A9 only */
44 if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) { 44 if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
45 scu_ctrl = __raw_readl(scu_base + 0x30); 45 scu_ctrl = __raw_readl(scu_base + 0x30);
46 if (!(scu_ctrl & 1)) 46 if (!(scu_ctrl & 1))
47 __raw_writel(scu_ctrl | 0x1, scu_base + 0x30); 47 __raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index e82e1d248772..9a52a07aa40e 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -98,21 +98,21 @@ static void broadcast_tlb_a15_erratum(void)
98 return; 98 return;
99 99
100 dummy_flush_tlb_a15_erratum(); 100 dummy_flush_tlb_a15_erratum();
101 smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum, 101 smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
102 NULL, 1);
103} 102}
104 103
105static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) 104static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
106{ 105{
107 int cpu; 106 int cpu, this_cpu;
108 cpumask_t mask = { CPU_BITS_NONE }; 107 cpumask_t mask = { CPU_BITS_NONE };
109 108
110 if (!erratum_a15_798181()) 109 if (!erratum_a15_798181())
111 return; 110 return;
112 111
113 dummy_flush_tlb_a15_erratum(); 112 dummy_flush_tlb_a15_erratum();
113 this_cpu = get_cpu();
114 for_each_online_cpu(cpu) { 114 for_each_online_cpu(cpu) {
115 if (cpu == smp_processor_id()) 115 if (cpu == this_cpu)
116 continue; 116 continue;
117 /* 117 /*
118 * We only need to send an IPI if the other CPUs are running 118 * We only need to send an IPI if the other CPUs are running
@@ -127,6 +127,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
127 cpumask_set_cpu(cpu, &mask); 127 cpumask_set_cpu(cpu, &mask);
128 } 128 }
129 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); 129 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
130 put_cpu();
130} 131}
131 132
132void flush_tlb_all(void) 133void flush_tlb_all(void)
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index 30ae6bb4a310..f50f19e5c138 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -17,7 +17,6 @@
17#include <asm/mach/map.h> 17#include <asm/mach/map.h>
18#include <asm/memory.h> 18#include <asm/memory.h>
19#include <asm/system_info.h> 19#include <asm/system_info.h>
20#include "tcm.h"
21 20
22static struct gen_pool *tcm_pool; 21static struct gen_pool *tcm_pool;
23static bool dtcm_present; 22static bool dtcm_present;
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index fc96ce6f2357..8dc5e76cb789 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -17,7 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
17kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) 17kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
18 18
19obj-y += kvm-arm.o init.o interrupts.o 19obj-y += kvm-arm.o init.o interrupts.o
20obj-y += arm.o guest.o mmu.o emulate.o reset.o 20obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
21obj-y += coproc.o coproc_a15.o mmio.o psci.o 21obj-y += coproc.o coproc_a15.o mmio.o psci.o
22obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o 22obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o
23obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o 23obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 5a936988eb24..a0dfc2a53f91 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -30,11 +30,9 @@
30#define CREATE_TRACE_POINTS 30#define CREATE_TRACE_POINTS
31#include "trace.h" 31#include "trace.h"
32 32
33#include <asm/unified.h>
34#include <asm/uaccess.h> 33#include <asm/uaccess.h>
35#include <asm/ptrace.h> 34#include <asm/ptrace.h>
36#include <asm/mman.h> 35#include <asm/mman.h>
37#include <asm/cputype.h>
38#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
39#include <asm/cacheflush.h> 37#include <asm/cacheflush.h>
40#include <asm/virt.h> 38#include <asm/virt.h>
@@ -44,14 +42,13 @@
44#include <asm/kvm_emulate.h> 42#include <asm/kvm_emulate.h>
45#include <asm/kvm_coproc.h> 43#include <asm/kvm_coproc.h>
46#include <asm/kvm_psci.h> 44#include <asm/kvm_psci.h>
47#include <asm/opcodes.h>
48 45
49#ifdef REQUIRES_VIRT 46#ifdef REQUIRES_VIRT
50__asm__(".arch_extension virt"); 47__asm__(".arch_extension virt");
51#endif 48#endif
52 49
53static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); 50static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
54static struct vfp_hard_struct __percpu *kvm_host_vfp_state; 51static kvm_kernel_vfp_t __percpu *kvm_host_vfp_state;
55static unsigned long hyp_default_vectors; 52static unsigned long hyp_default_vectors;
56 53
57/* Per-CPU variable containing the currently running vcpu. */ 54/* Per-CPU variable containing the currently running vcpu. */
@@ -201,6 +198,7 @@ int kvm_dev_ioctl_check_extension(long ext)
201 break; 198 break;
202 case KVM_CAP_ARM_SET_DEVICE_ADDR: 199 case KVM_CAP_ARM_SET_DEVICE_ADDR:
203 r = 1; 200 r = 1;
201 break;
204 case KVM_CAP_NR_VCPUS: 202 case KVM_CAP_NR_VCPUS:
205 r = num_online_cpus(); 203 r = num_online_cpus();
206 break; 204 break;
@@ -303,22 +301,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
303 return 0; 301 return 0;
304} 302}
305 303
306int __attribute_const__ kvm_target_cpu(void)
307{
308 unsigned long implementor = read_cpuid_implementor();
309 unsigned long part_number = read_cpuid_part_number();
310
311 if (implementor != ARM_CPU_IMP_ARM)
312 return -EINVAL;
313
314 switch (part_number) {
315 case ARM_CPU_PART_CORTEX_A15:
316 return KVM_ARM_TARGET_CORTEX_A15;
317 default:
318 return -EINVAL;
319 }
320}
321
322int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 304int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
323{ 305{
324 int ret; 306 int ret;
@@ -481,163 +463,6 @@ static void update_vttbr(struct kvm *kvm)
481 spin_unlock(&kvm_vmid_lock); 463 spin_unlock(&kvm_vmid_lock);
482} 464}
483 465
484static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
485{
486 /* SVC called from Hyp mode should never get here */
487 kvm_debug("SVC called from Hyp mode shouldn't go here\n");
488 BUG();
489 return -EINVAL; /* Squash warning */
490}
491
492static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
493{
494 trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
495 vcpu->arch.hsr & HSR_HVC_IMM_MASK);
496
497 if (kvm_psci_call(vcpu))
498 return 1;
499
500 kvm_inject_undefined(vcpu);
501 return 1;
502}
503
504static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
505{
506 if (kvm_psci_call(vcpu))
507 return 1;
508
509 kvm_inject_undefined(vcpu);
510 return 1;
511}
512
513static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
514{
515 /* The hypervisor should never cause aborts */
516 kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
517 vcpu->arch.hxfar, vcpu->arch.hsr);
518 return -EFAULT;
519}
520
521static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
522{
523 /* This is either an error in the ws. code or an external abort */
524 kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n",
525 vcpu->arch.hxfar, vcpu->arch.hsr);
526 return -EFAULT;
527}
528
529typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
530static exit_handle_fn arm_exit_handlers[] = {
531 [HSR_EC_WFI] = kvm_handle_wfi,
532 [HSR_EC_CP15_32] = kvm_handle_cp15_32,
533 [HSR_EC_CP15_64] = kvm_handle_cp15_64,
534 [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
535 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
536 [HSR_EC_CP14_64] = kvm_handle_cp14_access,
537 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
538 [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
539 [HSR_EC_SVC_HYP] = handle_svc_hyp,
540 [HSR_EC_HVC] = handle_hvc,
541 [HSR_EC_SMC] = handle_smc,
542 [HSR_EC_IABT] = kvm_handle_guest_abort,
543 [HSR_EC_IABT_HYP] = handle_pabt_hyp,
544 [HSR_EC_DABT] = kvm_handle_guest_abort,
545 [HSR_EC_DABT_HYP] = handle_dabt_hyp,
546};
547
548/*
549 * A conditional instruction is allowed to trap, even though it
550 * wouldn't be executed. So let's re-implement the hardware, in
551 * software!
552 */
553static bool kvm_condition_valid(struct kvm_vcpu *vcpu)
554{
555 unsigned long cpsr, cond, insn;
556
557 /*
558 * Exception Code 0 can only happen if we set HCR.TGE to 1, to
559 * catch undefined instructions, and then we won't get past
560 * the arm_exit_handlers test anyway.
561 */
562 BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0);
563
564 /* Top two bits non-zero? Unconditional. */
565 if (vcpu->arch.hsr >> 30)
566 return true;
567
568 cpsr = *vcpu_cpsr(vcpu);
569
570 /* Is condition field valid? */
571 if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT)
572 cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT;
573 else {
574 /* This can happen in Thumb mode: examine IT state. */
575 unsigned long it;
576
577 it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
578
579 /* it == 0 => unconditional. */
580 if (it == 0)
581 return true;
582
583 /* The cond for this insn works out as the top 4 bits. */
584 cond = (it >> 4);
585 }
586
587 /* Shift makes it look like an ARM-mode instruction */
588 insn = cond << 28;
589 return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
590}
591
592/*
593 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
594 * proper exit to QEMU.
595 */
596static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
597 int exception_index)
598{
599 unsigned long hsr_ec;
600
601 switch (exception_index) {
602 case ARM_EXCEPTION_IRQ:
603 return 1;
604 case ARM_EXCEPTION_UNDEFINED:
605 kvm_err("Undefined exception in Hyp mode at: %#08x\n",
606 vcpu->arch.hyp_pc);
607 BUG();
608 panic("KVM: Hypervisor undefined exception!\n");
609 case ARM_EXCEPTION_DATA_ABORT:
610 case ARM_EXCEPTION_PREF_ABORT:
611 case ARM_EXCEPTION_HVC:
612 hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT;
613
614 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers)
615 || !arm_exit_handlers[hsr_ec]) {
616 kvm_err("Unkown exception class: %#08lx, "
617 "hsr: %#08x\n", hsr_ec,
618 (unsigned int)vcpu->arch.hsr);
619 BUG();
620 }
621
622 /*
623 * See ARM ARM B1.14.1: "Hyp traps on instructions
624 * that fail their condition code check"
625 */
626 if (!kvm_condition_valid(vcpu)) {
627 bool is_wide = vcpu->arch.hsr & HSR_IL;
628 kvm_skip_instr(vcpu, is_wide);
629 return 1;
630 }
631
632 return arm_exit_handlers[hsr_ec](vcpu, run);
633 default:
634 kvm_pr_unimpl("Unsupported exception type: %d",
635 exception_index);
636 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
637 return 0;
638 }
639}
640
641static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) 466static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
642{ 467{
643 if (likely(vcpu->arch.has_run_once)) 468 if (likely(vcpu->arch.has_run_once))
@@ -972,7 +797,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
972static void cpu_init_hyp_mode(void *vector) 797static void cpu_init_hyp_mode(void *vector)
973{ 798{
974 unsigned long long pgd_ptr; 799 unsigned long long pgd_ptr;
975 unsigned long pgd_low, pgd_high;
976 unsigned long hyp_stack_ptr; 800 unsigned long hyp_stack_ptr;
977 unsigned long stack_page; 801 unsigned long stack_page;
978 unsigned long vector_ptr; 802 unsigned long vector_ptr;
@@ -981,20 +805,11 @@ static void cpu_init_hyp_mode(void *vector)
981 __hyp_set_vectors((unsigned long)vector); 805 __hyp_set_vectors((unsigned long)vector);
982 806
983 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); 807 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
984 pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
985 pgd_high = (pgd_ptr >> 32ULL);
986 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); 808 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
987 hyp_stack_ptr = stack_page + PAGE_SIZE; 809 hyp_stack_ptr = stack_page + PAGE_SIZE;
988 vector_ptr = (unsigned long)__kvm_hyp_vector; 810 vector_ptr = (unsigned long)__kvm_hyp_vector;
989 811
990 /* 812 __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
991 * Call initialization code, and switch to the full blown
992 * HYP code. The init code doesn't need to preserve these registers as
993 * r1-r3 and r12 are already callee save according to the AAPCS.
994 * Note that we slightly misuse the prototype by casing the pgd_low to
995 * a void *.
996 */
997 kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
998} 813}
999 814
1000/** 815/**
@@ -1077,7 +892,7 @@ static int init_hyp_mode(void)
1077 /* 892 /*
1078 * Map the host VFP structures 893 * Map the host VFP structures
1079 */ 894 */
1080 kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct); 895 kvm_host_vfp_state = alloc_percpu(kvm_kernel_vfp_t);
1081 if (!kvm_host_vfp_state) { 896 if (!kvm_host_vfp_state) {
1082 err = -ENOMEM; 897 err = -ENOMEM;
1083 kvm_err("Cannot allocate host VFP state\n"); 898 kvm_err("Cannot allocate host VFP state\n");
@@ -1085,7 +900,7 @@ static int init_hyp_mode(void)
1085 } 900 }
1086 901
1087 for_each_possible_cpu(cpu) { 902 for_each_possible_cpu(cpu) {
1088 struct vfp_hard_struct *vfp; 903 kvm_kernel_vfp_t *vfp;
1089 904
1090 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); 905 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
1091 err = create_hyp_mappings(vfp, vfp + 1); 906 err = create_hyp_mappings(vfp, vfp + 1);
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 4ea9a982269c..8eea97be1ed5 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -76,14 +76,14 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
76 const struct coproc_params *p, 76 const struct coproc_params *p,
77 const struct coproc_reg *r) 77 const struct coproc_reg *r)
78{ 78{
79 u32 val; 79 unsigned long val;
80 int cpu; 80 int cpu;
81 81
82 cpu = get_cpu();
83
84 if (!p->is_write) 82 if (!p->is_write)
85 return read_from_write_only(vcpu, p); 83 return read_from_write_only(vcpu, p);
86 84
85 cpu = get_cpu();
86
87 cpumask_setall(&vcpu->arch.require_dcache_flush); 87 cpumask_setall(&vcpu->arch.require_dcache_flush);
88 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); 88 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
89 89
@@ -293,12 +293,12 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
293 293
294 if (likely(r->access(vcpu, params, r))) { 294 if (likely(r->access(vcpu, params, r))) {
295 /* Skip instruction, since it was emulated */ 295 /* Skip instruction, since it was emulated */
296 kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); 296 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
297 return 1; 297 return 1;
298 } 298 }
299 /* If access function fails, it should complain. */ 299 /* If access function fails, it should complain. */
300 } else { 300 } else {
301 kvm_err("Unsupported guest CP15 access at: %08x\n", 301 kvm_err("Unsupported guest CP15 access at: %08lx\n",
302 *vcpu_pc(vcpu)); 302 *vcpu_pc(vcpu));
303 print_cp_instr(params); 303 print_cp_instr(params);
304 } 304 }
@@ -315,14 +315,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
315{ 315{
316 struct coproc_params params; 316 struct coproc_params params;
317 317
318 params.CRm = (vcpu->arch.hsr >> 1) & 0xf; 318 params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
319 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; 319 params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
320 params.is_write = ((vcpu->arch.hsr & 1) == 0); 320 params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
321 params.is_64bit = true; 321 params.is_64bit = true;
322 322
323 params.Op1 = (vcpu->arch.hsr >> 16) & 0xf; 323 params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
324 params.Op2 = 0; 324 params.Op2 = 0;
325 params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf; 325 params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
326 params.CRn = 0; 326 params.CRn = 0;
327 327
328 return emulate_cp15(vcpu, &params); 328 return emulate_cp15(vcpu, &params);
@@ -347,14 +347,14 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
347{ 347{
348 struct coproc_params params; 348 struct coproc_params params;
349 349
350 params.CRm = (vcpu->arch.hsr >> 1) & 0xf; 350 params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
351 params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; 351 params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
352 params.is_write = ((vcpu->arch.hsr & 1) == 0); 352 params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
353 params.is_64bit = false; 353 params.is_64bit = false;
354 354
355 params.CRn = (vcpu->arch.hsr >> 10) & 0xf; 355 params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
356 params.Op1 = (vcpu->arch.hsr >> 14) & 0x7; 356 params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
357 params.Op2 = (vcpu->arch.hsr >> 17) & 0x7; 357 params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
358 params.Rt2 = 0; 358 params.Rt2 = 0;
359 359
360 return emulate_cp15(vcpu, &params); 360 return emulate_cp15(vcpu, &params);
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index 992adfafa2ff..b7301d3e4799 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -84,7 +84,7 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,
84static inline bool write_to_read_only(struct kvm_vcpu *vcpu, 84static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
85 const struct coproc_params *params) 85 const struct coproc_params *params)
86{ 86{
87 kvm_debug("CP15 write to read-only register at: %08x\n", 87 kvm_debug("CP15 write to read-only register at: %08lx\n",
88 *vcpu_pc(vcpu)); 88 *vcpu_pc(vcpu));
89 print_cp_instr(params); 89 print_cp_instr(params);
90 return false; 90 return false;
@@ -93,7 +93,7 @@ static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
93static inline bool read_from_write_only(struct kvm_vcpu *vcpu, 93static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
94 const struct coproc_params *params) 94 const struct coproc_params *params)
95{ 95{
96 kvm_debug("CP15 read to write-only register at: %08x\n", 96 kvm_debug("CP15 read to write-only register at: %08lx\n",
97 *vcpu_pc(vcpu)); 97 *vcpu_pc(vcpu));
98 print_cp_instr(params); 98 print_cp_instr(params);
99 return false; 99 return false;
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index d61450ac6665..bdede9e7da51 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -20,6 +20,7 @@
20#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
21#include <asm/kvm_arm.h> 21#include <asm/kvm_arm.h>
22#include <asm/kvm_emulate.h> 22#include <asm/kvm_emulate.h>
23#include <asm/opcodes.h>
23#include <trace/events/kvm.h> 24#include <trace/events/kvm.h>
24 25
25#include "trace.h" 26#include "trace.h"
@@ -109,10 +110,10 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
109 * Return a pointer to the register number valid in the current mode of 110 * Return a pointer to the register number valid in the current mode of
110 * the virtual CPU. 111 * the virtual CPU.
111 */ 112 */
112u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) 113unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
113{ 114{
114 u32 *reg_array = (u32 *)&vcpu->arch.regs; 115 unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs;
115 u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; 116 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
116 117
117 switch (mode) { 118 switch (mode) {
118 case USR_MODE...SVC_MODE: 119 case USR_MODE...SVC_MODE:
@@ -141,9 +142,9 @@ u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
141/* 142/*
142 * Return the SPSR for the current mode of the virtual CPU. 143 * Return the SPSR for the current mode of the virtual CPU.
143 */ 144 */
144u32 *vcpu_spsr(struct kvm_vcpu *vcpu) 145unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
145{ 146{
146 u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; 147 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
147 switch (mode) { 148 switch (mode) {
148 case SVC_MODE: 149 case SVC_MODE:
149 return &vcpu->arch.regs.KVM_ARM_SVC_spsr; 150 return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
@@ -160,20 +161,48 @@ u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
160 } 161 }
161} 162}
162 163
163/** 164/*
164 * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest 165 * A conditional instruction is allowed to trap, even though it
165 * @vcpu: the vcpu pointer 166 * wouldn't be executed. So let's re-implement the hardware, in
166 * @run: the kvm_run structure pointer 167 * software!
167 *
168 * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
169 * halt execution of world-switches and schedule other host processes until
170 * there is an incoming IRQ or FIQ to the VM.
171 */ 168 */
172int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) 169bool kvm_condition_valid(struct kvm_vcpu *vcpu)
173{ 170{
174 trace_kvm_wfi(*vcpu_pc(vcpu)); 171 unsigned long cpsr, cond, insn;
175 kvm_vcpu_block(vcpu); 172
176 return 1; 173 /*
174 * Exception Code 0 can only happen if we set HCR.TGE to 1, to
175 * catch undefined instructions, and then we won't get past
176 * the arm_exit_handlers test anyway.
177 */
178 BUG_ON(!kvm_vcpu_trap_get_class(vcpu));
179
180 /* Top two bits non-zero? Unconditional. */
181 if (kvm_vcpu_get_hsr(vcpu) >> 30)
182 return true;
183
184 cpsr = *vcpu_cpsr(vcpu);
185
186 /* Is condition field valid? */
187 if ((kvm_vcpu_get_hsr(vcpu) & HSR_CV) >> HSR_CV_SHIFT)
188 cond = (kvm_vcpu_get_hsr(vcpu) & HSR_COND) >> HSR_COND_SHIFT;
189 else {
190 /* This can happen in Thumb mode: examine IT state. */
191 unsigned long it;
192
193 it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
194
195 /* it == 0 => unconditional. */
196 if (it == 0)
197 return true;
198
199 /* The cond for this insn works out as the top 4 bits. */
200 cond = (it >> 4);
201 }
202
203 /* Shift makes it look like an ARM-mode instruction */
204 insn = cond << 28;
205 return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL;
177} 206}
178 207
179/** 208/**
@@ -257,9 +286,9 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
257 */ 286 */
258void kvm_inject_undefined(struct kvm_vcpu *vcpu) 287void kvm_inject_undefined(struct kvm_vcpu *vcpu)
259{ 288{
260 u32 new_lr_value; 289 unsigned long new_lr_value;
261 u32 new_spsr_value; 290 unsigned long new_spsr_value;
262 u32 cpsr = *vcpu_cpsr(vcpu); 291 unsigned long cpsr = *vcpu_cpsr(vcpu);
263 u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; 292 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
264 bool is_thumb = (cpsr & PSR_T_BIT); 293 bool is_thumb = (cpsr & PSR_T_BIT);
265 u32 vect_offset = 4; 294 u32 vect_offset = 4;
@@ -291,9 +320,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
291 */ 320 */
292static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) 321static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
293{ 322{
294 u32 new_lr_value; 323 unsigned long new_lr_value;
295 u32 new_spsr_value; 324 unsigned long new_spsr_value;
296 u32 cpsr = *vcpu_cpsr(vcpu); 325 unsigned long cpsr = *vcpu_cpsr(vcpu);
297 u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; 326 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
298 bool is_thumb = (cpsr & PSR_T_BIT); 327 bool is_thumb = (cpsr & PSR_T_BIT);
299 u32 vect_offset; 328 u32 vect_offset;
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 2339d9609d36..152d03612181 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -22,6 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <asm/cputype.h>
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26#include <asm/kvm.h> 27#include <asm/kvm.h>
27#include <asm/kvm_asm.h> 28#include <asm/kvm_asm.h>
@@ -180,6 +181,22 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
180 return -EINVAL; 181 return -EINVAL;
181} 182}
182 183
184int __attribute_const__ kvm_target_cpu(void)
185{
186 unsigned long implementor = read_cpuid_implementor();
187 unsigned long part_number = read_cpuid_part_number();
188
189 if (implementor != ARM_CPU_IMP_ARM)
190 return -EINVAL;
191
192 switch (part_number) {
193 case ARM_CPU_PART_CORTEX_A15:
194 return KVM_ARM_TARGET_CORTEX_A15;
195 default:
196 return -EINVAL;
197 }
198}
199
183int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, 200int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
184 const struct kvm_vcpu_init *init) 201 const struct kvm_vcpu_init *init)
185{ 202{
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
new file mode 100644
index 000000000000..26ad17310a1e
--- /dev/null
+++ b/arch/arm/kvm/handle_exit.c
@@ -0,0 +1,164 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <asm/kvm_emulate.h>
22#include <asm/kvm_coproc.h>
23#include <asm/kvm_mmu.h>
24#include <asm/kvm_psci.h>
25#include <trace/events/kvm.h>
26
27#include "trace.h"
28
29#include "trace.h"
30
31typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
32
33static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
34{
35 /* SVC called from Hyp mode should never get here */
36 kvm_debug("SVC called from Hyp mode shouldn't go here\n");
37 BUG();
38 return -EINVAL; /* Squash warning */
39}
40
41static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
42{
43 trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
44 kvm_vcpu_hvc_get_imm(vcpu));
45
46 if (kvm_psci_call(vcpu))
47 return 1;
48
49 kvm_inject_undefined(vcpu);
50 return 1;
51}
52
53static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
54{
55 if (kvm_psci_call(vcpu))
56 return 1;
57
58 kvm_inject_undefined(vcpu);
59 return 1;
60}
61
62static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
63{
64 /* The hypervisor should never cause aborts */
65 kvm_err("Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
66 kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
67 return -EFAULT;
68}
69
70static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
71{
72 /* This is either an error in the ws. code or an external abort */
73 kvm_err("Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
74 kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
75 return -EFAULT;
76}
77
78/**
79 * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
80 * @vcpu: the vcpu pointer
81 * @run: the kvm_run structure pointer
82 *
83 * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
84 * halt execution of world-switches and schedule other host processes until
85 * there is an incoming IRQ or FIQ to the VM.
86 */
87static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
88{
89 trace_kvm_wfi(*vcpu_pc(vcpu));
90 kvm_vcpu_block(vcpu);
91 return 1;
92}
93
94static exit_handle_fn arm_exit_handlers[] = {
95 [HSR_EC_WFI] = kvm_handle_wfi,
96 [HSR_EC_CP15_32] = kvm_handle_cp15_32,
97 [HSR_EC_CP15_64] = kvm_handle_cp15_64,
98 [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
99 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
100 [HSR_EC_CP14_64] = kvm_handle_cp14_access,
101 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
102 [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
103 [HSR_EC_SVC_HYP] = handle_svc_hyp,
104 [HSR_EC_HVC] = handle_hvc,
105 [HSR_EC_SMC] = handle_smc,
106 [HSR_EC_IABT] = kvm_handle_guest_abort,
107 [HSR_EC_IABT_HYP] = handle_pabt_hyp,
108 [HSR_EC_DABT] = kvm_handle_guest_abort,
109 [HSR_EC_DABT_HYP] = handle_dabt_hyp,
110};
111
112static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
113{
114 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
115
116 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
117 !arm_exit_handlers[hsr_ec]) {
118 kvm_err("Unkown exception class: hsr: %#08x\n",
119 (unsigned int)kvm_vcpu_get_hsr(vcpu));
120 BUG();
121 }
122
123 return arm_exit_handlers[hsr_ec];
124}
125
126/*
127 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
128 * proper exit to userspace.
129 */
130int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
131 int exception_index)
132{
133 exit_handle_fn exit_handler;
134
135 switch (exception_index) {
136 case ARM_EXCEPTION_IRQ:
137 return 1;
138 case ARM_EXCEPTION_UNDEFINED:
139 kvm_err("Undefined exception in Hyp mode at: %#08lx\n",
140 kvm_vcpu_get_hyp_pc(vcpu));
141 BUG();
142 panic("KVM: Hypervisor undefined exception!\n");
143 case ARM_EXCEPTION_DATA_ABORT:
144 case ARM_EXCEPTION_PREF_ABORT:
145 case ARM_EXCEPTION_HVC:
146 /*
147 * See ARM ARM B1.14.1: "Hyp traps on instructions
148 * that fail their condition code check"
149 */
150 if (!kvm_condition_valid(vcpu)) {
151 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
152 return 1;
153 }
154
155 exit_handler = kvm_get_exit_handler(vcpu);
156
157 return exit_handler(vcpu, run);
158 default:
159 kvm_pr_unimpl("Unsupported exception type: %d",
160 exception_index);
161 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
162 return 0;
163 }
164}
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 8ca87ab0919d..f7793df62f58 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -35,15 +35,18 @@ __kvm_hyp_code_start:
35/******************************************************************** 35/********************************************************************
36 * Flush per-VMID TLBs 36 * Flush per-VMID TLBs
37 * 37 *
38 * void __kvm_tlb_flush_vmid(struct kvm *kvm); 38 * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
39 * 39 *
40 * We rely on the hardware to broadcast the TLB invalidation to all CPUs 40 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
41 * inside the inner-shareable domain (which is the case for all v7 41 * inside the inner-shareable domain (which is the case for all v7
42 * implementations). If we come across a non-IS SMP implementation, we'll 42 * implementations). If we come across a non-IS SMP implementation, we'll
43 * have to use an IPI based mechanism. Until then, we stick to the simple 43 * have to use an IPI based mechanism. Until then, we stick to the simple
44 * hardware assisted version. 44 * hardware assisted version.
45 *
46 * As v7 does not support flushing per IPA, just nuke the whole TLB
47 * instead, ignoring the ipa value.
45 */ 48 */
46ENTRY(__kvm_tlb_flush_vmid) 49ENTRY(__kvm_tlb_flush_vmid_ipa)
47 push {r2, r3} 50 push {r2, r3}
48 51
49 add r0, r0, #KVM_VTTBR 52 add r0, r0, #KVM_VTTBR
@@ -60,7 +63,7 @@ ENTRY(__kvm_tlb_flush_vmid)
60 63
61 pop {r2, r3} 64 pop {r2, r3}
62 bx lr 65 bx lr
63ENDPROC(__kvm_tlb_flush_vmid) 66ENDPROC(__kvm_tlb_flush_vmid_ipa)
64 67
65/******************************************************************** 68/********************************************************************
66 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable 69 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
@@ -235,9 +238,9 @@ ENTRY(kvm_call_hyp)
235 * instruction is issued since all traps are disabled when running the host 238 * instruction is issued since all traps are disabled when running the host
236 * kernel as per the Hyp-mode initialization at boot time. 239 * kernel as per the Hyp-mode initialization at boot time.
237 * 240 *
238 * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc 241 * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
239 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the 242 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
240 * host kernel) and they cause a trap to the vector page + offset 0xc when HVC 243 * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
241 * instructions are called from within Hyp-mode. 244 * instructions are called from within Hyp-mode.
242 * 245 *
243 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): 246 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 98a870ff1a5c..72a12f2171b2 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -33,16 +33,16 @@
33 */ 33 */
34int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) 34int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
35{ 35{
36 __u32 *dest; 36 unsigned long *dest;
37 unsigned int len; 37 unsigned int len;
38 int mask; 38 int mask;
39 39
40 if (!run->mmio.is_write) { 40 if (!run->mmio.is_write) {
41 dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt); 41 dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
42 memset(dest, 0, sizeof(int)); 42 *dest = 0;
43 43
44 len = run->mmio.len; 44 len = run->mmio.len;
45 if (len > 4) 45 if (len > sizeof(unsigned long))
46 return -EINVAL; 46 return -EINVAL;
47 47
48 memcpy(dest, run->mmio.data, len); 48 memcpy(dest, run->mmio.data, len);
@@ -50,7 +50,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
50 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, 50 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
51 *((u64 *)run->mmio.data)); 51 *((u64 *)run->mmio.data));
52 52
53 if (vcpu->arch.mmio_decode.sign_extend && len < 4) { 53 if (vcpu->arch.mmio_decode.sign_extend &&
54 len < sizeof(unsigned long)) {
54 mask = 1U << ((len * 8) - 1); 55 mask = 1U << ((len * 8) - 1);
55 *dest = (*dest ^ mask) - mask; 56 *dest = (*dest ^ mask) - mask;
56 } 57 }
@@ -65,40 +66,29 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
65 unsigned long rt, len; 66 unsigned long rt, len;
66 bool is_write, sign_extend; 67 bool is_write, sign_extend;
67 68
68 if ((vcpu->arch.hsr >> 8) & 1) { 69 if (kvm_vcpu_dabt_isextabt(vcpu)) {
69 /* cache operation on I/O addr, tell guest unsupported */ 70 /* cache operation on I/O addr, tell guest unsupported */
70 kvm_inject_dabt(vcpu, vcpu->arch.hxfar); 71 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
71 return 1; 72 return 1;
72 } 73 }
73 74
74 if ((vcpu->arch.hsr >> 7) & 1) { 75 if (kvm_vcpu_dabt_iss1tw(vcpu)) {
75 /* page table accesses IO mem: tell guest to fix its TTBR */ 76 /* page table accesses IO mem: tell guest to fix its TTBR */
76 kvm_inject_dabt(vcpu, vcpu->arch.hxfar); 77 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
77 return 1; 78 return 1;
78 } 79 }
79 80
80 switch ((vcpu->arch.hsr >> 22) & 0x3) { 81 len = kvm_vcpu_dabt_get_as(vcpu);
81 case 0: 82 if (unlikely(len < 0))
82 len = 1; 83 return len;
83 break;
84 case 1:
85 len = 2;
86 break;
87 case 2:
88 len = 4;
89 break;
90 default:
91 kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
92 return -EFAULT;
93 }
94 84
95 is_write = vcpu->arch.hsr & HSR_WNR; 85 is_write = kvm_vcpu_dabt_iswrite(vcpu);
96 sign_extend = vcpu->arch.hsr & HSR_SSE; 86 sign_extend = kvm_vcpu_dabt_issext(vcpu);
97 rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT; 87 rt = kvm_vcpu_dabt_get_rd(vcpu);
98 88
99 if (kvm_vcpu_reg_is_pc(vcpu, rt)) { 89 if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
100 /* IO memory trying to read/write pc */ 90 /* IO memory trying to read/write pc */
101 kvm_inject_pabt(vcpu, vcpu->arch.hxfar); 91 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
102 return 1; 92 return 1;
103 } 93 }
104 94
@@ -112,7 +102,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
112 * The MMIO instruction is emulated and should not be re-executed 102 * The MMIO instruction is emulated and should not be re-executed
113 * in the guest. 103 * in the guest.
114 */ 104 */
115 kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); 105 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
116 return 0; 106 return 0;
117} 107}
118 108
@@ -130,7 +120,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
130 * space do its magic. 120 * space do its magic.
131 */ 121 */
132 122
133 if (vcpu->arch.hsr & HSR_ISV) { 123 if (kvm_vcpu_dabt_isvalid(vcpu)) {
134 ret = decode_hsr(vcpu, fault_ipa, &mmio); 124 ret = decode_hsr(vcpu, fault_ipa, &mmio);
135 if (ret) 125 if (ret)
136 return ret; 126 return ret;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 99e07c7dd745..2f12e4056408 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -20,7 +20,6 @@
20#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <trace/events/kvm.h> 22#include <trace/events/kvm.h>
23#include <asm/idmap.h>
24#include <asm/pgalloc.h> 23#include <asm/pgalloc.h>
25#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
26#include <asm/kvm_arm.h> 25#include <asm/kvm_arm.h>
@@ -28,8 +27,6 @@
28#include <asm/kvm_mmio.h> 27#include <asm/kvm_mmio.h>
29#include <asm/kvm_asm.h> 28#include <asm/kvm_asm.h>
30#include <asm/kvm_emulate.h> 29#include <asm/kvm_emulate.h>
31#include <asm/mach/map.h>
32#include <trace/events/kvm.h>
33 30
34#include "trace.h" 31#include "trace.h"
35 32
@@ -37,19 +34,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
37 34
38static DEFINE_MUTEX(kvm_hyp_pgd_mutex); 35static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
39 36
40static void kvm_tlb_flush_vmid(struct kvm *kvm) 37static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
41{ 38{
42 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); 39 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
43}
44
45static void kvm_set_pte(pte_t *pte, pte_t new_pte)
46{
47 pte_val(*pte) = new_pte;
48 /*
49 * flush_pmd_entry just takes a void pointer and cleans the necessary
50 * cache entries, so we can reuse the function for ptes.
51 */
52 flush_pmd_entry(pte);
53} 40}
54 41
55static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 42static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -98,33 +85,42 @@ static void free_ptes(pmd_t *pmd, unsigned long addr)
98 } 85 }
99} 86}
100 87
88static void free_hyp_pgd_entry(unsigned long addr)
89{
90 pgd_t *pgd;
91 pud_t *pud;
92 pmd_t *pmd;
93 unsigned long hyp_addr = KERN_TO_HYP(addr);
94
95 pgd = hyp_pgd + pgd_index(hyp_addr);
96 pud = pud_offset(pgd, hyp_addr);
97
98 if (pud_none(*pud))
99 return;
100 BUG_ON(pud_bad(*pud));
101
102 pmd = pmd_offset(pud, hyp_addr);
103 free_ptes(pmd, addr);
104 pmd_free(NULL, pmd);
105 pud_clear(pud);
106}
107
101/** 108/**
102 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables 109 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
103 * 110 *
104 * Assumes this is a page table used strictly in Hyp-mode and therefore contains 111 * Assumes this is a page table used strictly in Hyp-mode and therefore contains
105 * only mappings in the kernel memory area, which is above PAGE_OFFSET. 112 * either mappings in the kernel memory area (above PAGE_OFFSET), or
113 * device mappings in the vmalloc range (from VMALLOC_START to VMALLOC_END).
106 */ 114 */
107void free_hyp_pmds(void) 115void free_hyp_pmds(void)
108{ 116{
109 pgd_t *pgd;
110 pud_t *pud;
111 pmd_t *pmd;
112 unsigned long addr; 117 unsigned long addr;
113 118
114 mutex_lock(&kvm_hyp_pgd_mutex); 119 mutex_lock(&kvm_hyp_pgd_mutex);
115 for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) { 120 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
116 pgd = hyp_pgd + pgd_index(addr); 121 free_hyp_pgd_entry(addr);
117 pud = pud_offset(pgd, addr); 122 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
118 123 free_hyp_pgd_entry(addr);
119 if (pud_none(*pud))
120 continue;
121 BUG_ON(pud_bad(*pud));
122
123 pmd = pmd_offset(pud, addr);
124 free_ptes(pmd, addr);
125 pmd_free(NULL, pmd);
126 pud_clear(pud);
127 }
128 mutex_unlock(&kvm_hyp_pgd_mutex); 124 mutex_unlock(&kvm_hyp_pgd_mutex);
129} 125}
130 126
@@ -136,7 +132,9 @@ static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
136 struct page *page; 132 struct page *page;
137 133
138 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 134 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
139 pte = pte_offset_kernel(pmd, addr); 135 unsigned long hyp_addr = KERN_TO_HYP(addr);
136
137 pte = pte_offset_kernel(pmd, hyp_addr);
140 BUG_ON(!virt_addr_valid(addr)); 138 BUG_ON(!virt_addr_valid(addr));
141 page = virt_to_page(addr); 139 page = virt_to_page(addr);
142 kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); 140 kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
@@ -151,7 +149,9 @@ static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
151 unsigned long addr; 149 unsigned long addr;
152 150
153 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 151 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
154 pte = pte_offset_kernel(pmd, addr); 152 unsigned long hyp_addr = KERN_TO_HYP(addr);
153
154 pte = pte_offset_kernel(pmd, hyp_addr);
155 BUG_ON(pfn_valid(*pfn_base)); 155 BUG_ON(pfn_valid(*pfn_base));
156 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); 156 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
157 (*pfn_base)++; 157 (*pfn_base)++;
@@ -166,12 +166,13 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
166 unsigned long addr, next; 166 unsigned long addr, next;
167 167
168 for (addr = start; addr < end; addr = next) { 168 for (addr = start; addr < end; addr = next) {
169 pmd = pmd_offset(pud, addr); 169 unsigned long hyp_addr = KERN_TO_HYP(addr);
170 pmd = pmd_offset(pud, hyp_addr);
170 171
171 BUG_ON(pmd_sect(*pmd)); 172 BUG_ON(pmd_sect(*pmd));
172 173
173 if (pmd_none(*pmd)) { 174 if (pmd_none(*pmd)) {
174 pte = pte_alloc_one_kernel(NULL, addr); 175 pte = pte_alloc_one_kernel(NULL, hyp_addr);
175 if (!pte) { 176 if (!pte) {
176 kvm_err("Cannot allocate Hyp pte\n"); 177 kvm_err("Cannot allocate Hyp pte\n");
177 return -ENOMEM; 178 return -ENOMEM;
@@ -206,17 +207,23 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
206 unsigned long addr, next; 207 unsigned long addr, next;
207 int err = 0; 208 int err = 0;
208 209
209 BUG_ON(start > end); 210 if (start >= end)
210 if (start < PAGE_OFFSET) 211 return -EINVAL;
212 /* Check for a valid kernel memory mapping */
213 if (!pfn_base && (!virt_addr_valid(from) || !virt_addr_valid(to - 1)))
214 return -EINVAL;
215 /* Check for a valid kernel IO mapping */
216 if (pfn_base && (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)))
211 return -EINVAL; 217 return -EINVAL;
212 218
213 mutex_lock(&kvm_hyp_pgd_mutex); 219 mutex_lock(&kvm_hyp_pgd_mutex);
214 for (addr = start; addr < end; addr = next) { 220 for (addr = start; addr < end; addr = next) {
215 pgd = hyp_pgd + pgd_index(addr); 221 unsigned long hyp_addr = KERN_TO_HYP(addr);
216 pud = pud_offset(pgd, addr); 222 pgd = hyp_pgd + pgd_index(hyp_addr);
223 pud = pud_offset(pgd, hyp_addr);
217 224
218 if (pud_none_or_clear_bad(pud)) { 225 if (pud_none_or_clear_bad(pud)) {
219 pmd = pmd_alloc_one(NULL, addr); 226 pmd = pmd_alloc_one(NULL, hyp_addr);
220 if (!pmd) { 227 if (!pmd) {
221 kvm_err("Cannot allocate Hyp pmd\n"); 228 kvm_err("Cannot allocate Hyp pmd\n");
222 err = -ENOMEM; 229 err = -ENOMEM;
@@ -236,12 +243,13 @@ out:
236} 243}
237 244
238/** 245/**
239 * create_hyp_mappings - map a kernel virtual address range in Hyp mode 246 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
240 * @from: The virtual kernel start address of the range 247 * @from: The virtual kernel start address of the range
241 * @to: The virtual kernel end address of the range (exclusive) 248 * @to: The virtual kernel end address of the range (exclusive)
242 * 249 *
243 * The same virtual address as the kernel virtual address is also used in 250 * The same virtual address as the kernel virtual address is also used
244 * Hyp-mode mapping to the same underlying physical pages. 251 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
252 * physical pages.
245 * 253 *
246 * Note: Wrapping around zero in the "to" address is not supported. 254 * Note: Wrapping around zero in the "to" address is not supported.
247 */ 255 */
@@ -251,10 +259,13 @@ int create_hyp_mappings(void *from, void *to)
251} 259}
252 260
253/** 261/**
254 * create_hyp_io_mappings - map a physical IO range in Hyp mode 262 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
255 * @from: The virtual HYP start address of the range 263 * @from: The kernel start VA of the range
256 * @to: The virtual HYP end address of the range (exclusive) 264 * @to: The kernel end VA of the range (exclusive)
257 * @addr: The physical start address which gets mapped 265 * @addr: The physical start address which gets mapped
266 *
267 * The resulting HYP VA is the same as the kernel VA, modulo
268 * HYP_PAGE_OFFSET.
258 */ 269 */
259int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) 270int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
260{ 271{
@@ -290,7 +301,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
290 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1)); 301 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
291 302
292 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); 303 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
293 clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); 304 kvm_clean_pgd(pgd);
294 kvm->arch.pgd = pgd; 305 kvm->arch.pgd = pgd;
295 306
296 return 0; 307 return 0;
@@ -422,22 +433,22 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
422 return 0; /* ignore calls from kvm_set_spte_hva */ 433 return 0; /* ignore calls from kvm_set_spte_hva */
423 pmd = mmu_memory_cache_alloc(cache); 434 pmd = mmu_memory_cache_alloc(cache);
424 pud_populate(NULL, pud, pmd); 435 pud_populate(NULL, pud, pmd);
425 pmd += pmd_index(addr);
426 get_page(virt_to_page(pud)); 436 get_page(virt_to_page(pud));
427 } else 437 }
428 pmd = pmd_offset(pud, addr); 438
439 pmd = pmd_offset(pud, addr);
429 440
430 /* Create 2nd stage page table mapping - Level 2 */ 441 /* Create 2nd stage page table mapping - Level 2 */
431 if (pmd_none(*pmd)) { 442 if (pmd_none(*pmd)) {
432 if (!cache) 443 if (!cache)
433 return 0; /* ignore calls from kvm_set_spte_hva */ 444 return 0; /* ignore calls from kvm_set_spte_hva */
434 pte = mmu_memory_cache_alloc(cache); 445 pte = mmu_memory_cache_alloc(cache);
435 clean_pte_table(pte); 446 kvm_clean_pte(pte);
436 pmd_populate_kernel(NULL, pmd, pte); 447 pmd_populate_kernel(NULL, pmd, pte);
437 pte += pte_index(addr);
438 get_page(virt_to_page(pmd)); 448 get_page(virt_to_page(pmd));
439 } else 449 }
440 pte = pte_offset_kernel(pmd, addr); 450
451 pte = pte_offset_kernel(pmd, addr);
441 452
442 if (iomap && pte_present(*pte)) 453 if (iomap && pte_present(*pte))
443 return -EFAULT; 454 return -EFAULT;
@@ -446,7 +457,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
446 old_pte = *pte; 457 old_pte = *pte;
447 kvm_set_pte(pte, *new_pte); 458 kvm_set_pte(pte, *new_pte);
448 if (pte_present(old_pte)) 459 if (pte_present(old_pte))
449 kvm_tlb_flush_vmid(kvm); 460 kvm_tlb_flush_vmid_ipa(kvm, addr);
450 else 461 else
451 get_page(virt_to_page(pte)); 462 get_page(virt_to_page(pte));
452 463
@@ -473,7 +484,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
473 pfn = __phys_to_pfn(pa); 484 pfn = __phys_to_pfn(pa);
474 485
475 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { 486 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
476 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR); 487 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
488 kvm_set_s2pte_writable(&pte);
477 489
478 ret = mmu_topup_memory_cache(&cache, 2, 2); 490 ret = mmu_topup_memory_cache(&cache, 2, 2);
479 if (ret) 491 if (ret)
@@ -492,29 +504,6 @@ out:
492 return ret; 504 return ret;
493} 505}
494 506
495static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
496{
497 /*
498 * If we are going to insert an instruction page and the icache is
499 * either VIPT or PIPT, there is a potential problem where the host
500 * (or another VM) may have used the same page as this guest, and we
501 * read incorrect data from the icache. If we're using a PIPT cache,
502 * we can invalidate just that page, but if we are using a VIPT cache
503 * we need to invalidate the entire icache - damn shame - as written
504 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
505 *
506 * VIVT caches are tagged using both the ASID and the VMID and doesn't
507 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
508 */
509 if (icache_is_pipt()) {
510 unsigned long hva = gfn_to_hva(kvm, gfn);
511 __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
512 } else if (!icache_is_vivt_asid_tagged()) {
513 /* any kind of VIPT cache */
514 __flush_icache_all();
515 }
516}
517
518static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 507static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
519 gfn_t gfn, struct kvm_memory_slot *memslot, 508 gfn_t gfn, struct kvm_memory_slot *memslot,
520 unsigned long fault_status) 509 unsigned long fault_status)
@@ -526,7 +515,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
526 unsigned long mmu_seq; 515 unsigned long mmu_seq;
527 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; 516 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
528 517
529 write_fault = kvm_is_write_fault(vcpu->arch.hsr); 518 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
530 if (fault_status == FSC_PERM && !write_fault) { 519 if (fault_status == FSC_PERM && !write_fault) {
531 kvm_err("Unexpected L2 read permission error\n"); 520 kvm_err("Unexpected L2 read permission error\n");
532 return -EFAULT; 521 return -EFAULT;
@@ -560,7 +549,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
560 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) 549 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
561 goto out_unlock; 550 goto out_unlock;
562 if (writable) { 551 if (writable) {
563 pte_val(new_pte) |= L_PTE_S2_RDWR; 552 kvm_set_s2pte_writable(&new_pte);
564 kvm_set_pfn_dirty(pfn); 553 kvm_set_pfn_dirty(pfn);
565 } 554 }
566 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); 555 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
@@ -585,7 +574,6 @@ out_unlock:
585 */ 574 */
586int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) 575int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
587{ 576{
588 unsigned long hsr_ec;
589 unsigned long fault_status; 577 unsigned long fault_status;
590 phys_addr_t fault_ipa; 578 phys_addr_t fault_ipa;
591 struct kvm_memory_slot *memslot; 579 struct kvm_memory_slot *memslot;
@@ -593,18 +581,17 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
593 gfn_t gfn; 581 gfn_t gfn;
594 int ret, idx; 582 int ret, idx;
595 583
596 hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT; 584 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
597 is_iabt = (hsr_ec == HSR_EC_IABT); 585 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
598 fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8;
599 586
600 trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr, 587 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
601 vcpu->arch.hxfar, fault_ipa); 588 kvm_vcpu_get_hfar(vcpu), fault_ipa);
602 589
603 /* Check the stage-2 fault is trans. fault or write fault */ 590 /* Check the stage-2 fault is trans. fault or write fault */
604 fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE); 591 fault_status = kvm_vcpu_trap_get_fault(vcpu);
605 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { 592 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
606 kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n", 593 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
607 hsr_ec, fault_status); 594 kvm_vcpu_trap_get_class(vcpu), fault_status);
608 return -EFAULT; 595 return -EFAULT;
609 } 596 }
610 597
@@ -614,7 +601,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
614 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { 601 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
615 if (is_iabt) { 602 if (is_iabt) {
616 /* Prefetch Abort on I/O address */ 603 /* Prefetch Abort on I/O address */
617 kvm_inject_pabt(vcpu, vcpu->arch.hxfar); 604 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
618 ret = 1; 605 ret = 1;
619 goto out_unlock; 606 goto out_unlock;
620 } 607 }
@@ -626,8 +613,13 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
626 goto out_unlock; 613 goto out_unlock;
627 } 614 }
628 615
629 /* Adjust page offset */ 616 /*
630 fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK; 617 * The IPA is reported as [MAX:12], so we need to
618 * complement it with the bottom 12 bits from the
619 * faulting VA. This is always 12 bits, irrespective
620 * of the page size.
621 */
622 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
631 ret = io_mem_abort(vcpu, run, fault_ipa); 623 ret = io_mem_abort(vcpu, run, fault_ipa);
632 goto out_unlock; 624 goto out_unlock;
633 } 625 }
@@ -682,7 +674,7 @@ static void handle_hva_to_gpa(struct kvm *kvm,
682static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) 674static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
683{ 675{
684 unmap_stage2_range(kvm, gpa, PAGE_SIZE); 676 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
685 kvm_tlb_flush_vmid(kvm); 677 kvm_tlb_flush_vmid_ipa(kvm, gpa);
686} 678}
687 679
688int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 680int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
@@ -776,7 +768,7 @@ void kvm_clear_hyp_idmap(void)
776 pmd = pmd_offset(pud, addr); 768 pmd = pmd_offset(pud, addr);
777 769
778 pud_clear(pud); 770 pud_clear(pud);
779 clean_pmd_entry(pmd); 771 kvm_clean_pmd_entry(pmd);
780 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK)); 772 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
781 } while (pgd++, addr = next, addr < end); 773 } while (pgd++, addr = next, addr < end);
782} 774}
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c
index 0e4cfe123b38..17c5ac7d10ed 100644
--- a/arch/arm/kvm/vgic.c
+++ b/arch/arm/kvm/vgic.c
@@ -1477,7 +1477,7 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
1477 if (addr & ~KVM_PHYS_MASK) 1477 if (addr & ~KVM_PHYS_MASK)
1478 return -E2BIG; 1478 return -E2BIG;
1479 1479
1480 if (addr & ~PAGE_MASK) 1480 if (addr & (SZ_4K - 1))
1481 return -EINVAL; 1481 return -EINVAL;
1482 1482
1483 mutex_lock(&kvm->lock); 1483 mutex_lock(&kvm->lock);
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 8a68f1ec66b9..577298ed5a44 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -300,7 +300,7 @@ void __init omap3xxx_check_revision(void)
300 * If the processor type is Cortex-A8 and the revision is 0x0 300 * If the processor type is Cortex-A8 and the revision is 0x0
301 * it means its Cortex r0p0 which is 3430 ES1.0. 301 * it means its Cortex r0p0 which is 3430 ES1.0.
302 */ 302 */
303 cpuid = read_cpuid(CPUID_ID); 303 cpuid = read_cpuid_id();
304 if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) { 304 if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) {
305 omap_revision = OMAP3430_REV_ES1_0; 305 omap_revision = OMAP3430_REV_ES1_0;
306 cpu_rev = "1.0"; 306 cpu_rev = "1.0";
@@ -460,7 +460,7 @@ void __init omap4xxx_check_revision(void)
460 * Use ARM register to detect the correct ES version 460 * Use ARM register to detect the correct ES version
461 */ 461 */
462 if (!rev && (hawkeye != 0xb94e) && (hawkeye != 0xb975)) { 462 if (!rev && (hawkeye != 0xb94e) && (hawkeye != 0xb975)) {
463 idcode = read_cpuid(CPUID_ID); 463 idcode = read_cpuid_id();
464 rev = (idcode & 0xf) - 1; 464 rev = (idcode & 0xf) - 1;
465 } 465 }
466 466
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index d9727218dd0a..7f5626d8fd3e 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -209,7 +209,7 @@ static void __init omap4_smp_init_cpus(void)
209 unsigned int i = 0, ncores = 1, cpu_id; 209 unsigned int i = 0, ncores = 1, cpu_id;
210 210
211 /* Use ARM cpuid check here, as SoC detection will not work so early */ 211 /* Use ARM cpuid check here, as SoC detection will not work so early */
212 cpu_id = read_cpuid(CPUID_ID) & CPU_MASK; 212 cpu_id = read_cpuid_id() & CPU_MASK;
213 if (cpu_id == CPU_CORTEX_A9) { 213 if (cpu_id == CPU_CORTEX_A9) {
214 /* 214 /*
215 * Currently we can't call ioremap here because 215 * Currently we can't call ioremap here because
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 025d17328730..35955b54944c 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -43,7 +43,7 @@ config CPU_ARM740T
43 depends on !MMU 43 depends on !MMU
44 select CPU_32v4T 44 select CPU_32v4T
45 select CPU_ABRT_LV4T 45 select CPU_ABRT_LV4T
46 select CPU_CACHE_V3 # although the core is v4t 46 select CPU_CACHE_V4
47 select CPU_CP15_MPU 47 select CPU_CP15_MPU
48 select CPU_PABRT_LEGACY 48 select CPU_PABRT_LEGACY
49 help 49 help
@@ -397,6 +397,13 @@ config CPU_V7
397 select CPU_PABRT_V7 397 select CPU_PABRT_V7
398 select CPU_TLB_V7 if MMU 398 select CPU_TLB_V7 if MMU
399 399
400config CPU_THUMBONLY
401 bool
402 # There are no CPUs available with MMU that don't implement an ARM ISA:
403 depends on !MMU
404 help
405 Select this if your CPU doesn't support the 32 bit ARM instructions.
406
400# Figure out what processor architecture version we should be using. 407# Figure out what processor architecture version we should be using.
401# This defines the compiler instruction set which depends on the machine type. 408# This defines the compiler instruction set which depends on the machine type.
402config CPU_32v3 409config CPU_32v3
@@ -469,9 +476,6 @@ config CPU_PABRT_V7
469 bool 476 bool
470 477
471# The cache model 478# The cache model
472config CPU_CACHE_V3
473 bool
474
475config CPU_CACHE_V4 479config CPU_CACHE_V4
476 bool 480 bool
477 481
@@ -608,7 +612,7 @@ config ARCH_DMA_ADDR_T_64BIT
608 bool 612 bool
609 613
610config ARM_THUMB 614config ARM_THUMB
611 bool "Support Thumb user binaries" 615 bool "Support Thumb user binaries" if !CPU_THUMBONLY
612 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON 616 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON
613 default y 617 default y
614 help 618 help
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 4e333fa2756f..9e51be96f635 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o
33obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o 33obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o
34obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o 34obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o
35 35
36obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
37obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o 36obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
38obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o 37obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
39obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o 38obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index db26e2e543f4..6f4585b89078 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -961,12 +961,14 @@ static int __init alignment_init(void)
961 return -ENOMEM; 961 return -ENOMEM;
962#endif 962#endif
963 963
964#ifdef CONFIG_CPU_CP15
964 if (cpu_is_v6_unaligned()) { 965 if (cpu_is_v6_unaligned()) {
965 cr_alignment &= ~CR_A; 966 cr_alignment &= ~CR_A;
966 cr_no_alignment &= ~CR_A; 967 cr_no_alignment &= ~CR_A;
967 set_cr(cr_alignment); 968 set_cr(cr_alignment);
968 ai_usermode = safe_usermode(ai_usermode, false); 969 ai_usermode = safe_usermode(ai_usermode, false);
969 } 970 }
971#endif
970 972
971 hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN, 973 hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
972 "alignment exception"); 974 "alignment exception");
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index dd3d59122cc3..48bc3c0a87ce 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
343 outer_cache.inv_range = feroceon_l2_inv_range; 343 outer_cache.inv_range = feroceon_l2_inv_range;
344 outer_cache.clean_range = feroceon_l2_clean_range; 344 outer_cache.clean_range = feroceon_l2_clean_range;
345 outer_cache.flush_range = feroceon_l2_flush_range; 345 outer_cache.flush_range = feroceon_l2_flush_range;
346 outer_cache.inv_all = l2_inv_all;
346 347
347 enable_l2(); 348 enable_l2();
348 349
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
deleted file mode 100644
index 8a3fadece8d3..000000000000
--- a/arch/arm/mm/cache-v3.S
+++ /dev/null
@@ -1,137 +0,0 @@
1/*
2 * linux/arch/arm/mm/cache-v3.S
3 *
4 * Copyright (C) 1997-2002 Russell king
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/linkage.h>
11#include <linux/init.h>
12#include <asm/page.h>
13#include "proc-macros.S"
14
15/*
16 * flush_icache_all()
17 *
18 * Unconditionally clean and invalidate the entire icache.
19 */
20ENTRY(v3_flush_icache_all)
21 mov pc, lr
22ENDPROC(v3_flush_icache_all)
23
24/*
25 * flush_user_cache_all()
26 *
27 * Invalidate all cache entries in a particular address
28 * space.
29 *
30 * - mm - mm_struct describing address space
31 */
32ENTRY(v3_flush_user_cache_all)
33 /* FALLTHROUGH */
34/*
35 * flush_kern_cache_all()
36 *
37 * Clean and invalidate the entire cache.
38 */
39ENTRY(v3_flush_kern_cache_all)
40 /* FALLTHROUGH */
41
42/*
43 * flush_user_cache_range(start, end, flags)
44 *
45 * Invalidate a range of cache entries in the specified
46 * address space.
47 *
48 * - start - start address (may not be aligned)
49 * - end - end address (exclusive, may not be aligned)
50 * - flags - vma_area_struct flags describing address space
51 */
52ENTRY(v3_flush_user_cache_range)
53 mov ip, #0
54 mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
55 mov pc, lr
56
57/*
58 * coherent_kern_range(start, end)
59 *
60 * Ensure coherency between the Icache and the Dcache in the
61 * region described by start. If you have non-snooping
62 * Harvard caches, you need to implement this function.
63 *
64 * - start - virtual start address
65 * - end - virtual end address
66 */
67ENTRY(v3_coherent_kern_range)
68 /* FALLTHROUGH */
69
70/*
71 * coherent_user_range(start, end)
72 *
73 * Ensure coherency between the Icache and the Dcache in the
74 * region described by start. If you have non-snooping
75 * Harvard caches, you need to implement this function.
76 *
77 * - start - virtual start address
78 * - end - virtual end address
79 */
80ENTRY(v3_coherent_user_range)
81 mov r0, #0
82 mov pc, lr
83
84/*
85 * flush_kern_dcache_area(void *page, size_t size)
86 *
87 * Ensure no D cache aliasing occurs, either with itself or
88 * the I cache
89 *
90 * - addr - kernel address
91 * - size - region size
92 */
93ENTRY(v3_flush_kern_dcache_area)
94 /* FALLTHROUGH */
95
96/*
97 * dma_flush_range(start, end)
98 *
99 * Clean and invalidate the specified virtual address range.
100 *
101 * - start - virtual start address
102 * - end - virtual end address
103 */
104ENTRY(v3_dma_flush_range)
105 mov r0, #0
106 mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
107 mov pc, lr
108
109/*
110 * dma_unmap_area(start, size, dir)
111 * - start - kernel virtual start address
112 * - size - size of region
113 * - dir - DMA direction
114 */
115ENTRY(v3_dma_unmap_area)
116 teq r2, #DMA_TO_DEVICE
117 bne v3_dma_flush_range
118 /* FALLTHROUGH */
119
120/*
121 * dma_map_area(start, size, dir)
122 * - start - kernel virtual start address
123 * - size - size of region
124 * - dir - DMA direction
125 */
126ENTRY(v3_dma_map_area)
127 mov pc, lr
128ENDPROC(v3_dma_unmap_area)
129ENDPROC(v3_dma_map_area)
130
131 .globl v3_flush_kern_cache_louis
132 .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all
133
134 __INITDATA
135
136 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
137 define_cache_functions v3
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 43e5d77be677..a7ba68f59f0c 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all)
58ENTRY(v4_flush_user_cache_range) 58ENTRY(v4_flush_user_cache_range)
59#ifdef CONFIG_CPU_CP15 59#ifdef CONFIG_CPU_CP15
60 mov ip, #0 60 mov ip, #0
61 mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache 61 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
62 mov pc, lr 62 mov pc, lr
63#else 63#else
64 /* FALLTHROUGH */ 64 /* FALLTHROUGH */
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e9db6b4bf65a..ef3e0f3aac96 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -823,16 +823,17 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
823 if (PageHighMem(page)) { 823 if (PageHighMem(page)) {
824 if (len + offset > PAGE_SIZE) 824 if (len + offset > PAGE_SIZE)
825 len = PAGE_SIZE - offset; 825 len = PAGE_SIZE - offset;
826 vaddr = kmap_high_get(page); 826
827 if (vaddr) { 827 if (cache_is_vipt_nonaliasing()) {
828 vaddr += offset;
829 op(vaddr, len, dir);
830 kunmap_high(page);
831 } else if (cache_is_vipt()) {
832 /* unmapped pages might still be cached */
833 vaddr = kmap_atomic(page); 828 vaddr = kmap_atomic(page);
834 op(vaddr + offset, len, dir); 829 op(vaddr + offset, len, dir);
835 kunmap_atomic(vaddr); 830 kunmap_atomic(vaddr);
831 } else {
832 vaddr = kmap_high_get(page);
833 if (vaddr) {
834 op(vaddr + offset, len, dir);
835 kunmap_high(page);
836 }
836 } 837 }
837 } else { 838 } else {
838 vaddr = page_address(page) + offset; 839 vaddr = page_address(page) + offset;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 1c8f7f564175..0d473cce501c 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -170,15 +170,18 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
170 if (!PageHighMem(page)) { 170 if (!PageHighMem(page)) {
171 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 171 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
172 } else { 172 } else {
173 void *addr = kmap_high_get(page); 173 void *addr;
174 if (addr) { 174
175 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 175 if (cache_is_vipt_nonaliasing()) {
176 kunmap_high(page);
177 } else if (cache_is_vipt()) {
178 /* unmapped pages might still be cached */
179 addr = kmap_atomic(page); 176 addr = kmap_atomic(page);
180 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 177 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
181 kunmap_atomic(addr); 178 kunmap_atomic(addr);
179 } else {
180 addr = kmap_high_get(page);
181 if (addr) {
182 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
183 kunmap_high(page);
184 }
182 } 185 }
183 } 186 }
184 187
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 78978945492a..e0d8565671a6 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -34,6 +34,7 @@
34#include <asm/mach/pci.h> 34#include <asm/mach/pci.h>
35 35
36#include "mm.h" 36#include "mm.h"
37#include "tcm.h"
37 38
38/* 39/*
39 * empty_zero_page is a special page that is used for 40 * empty_zero_page is a special page that is used for
@@ -112,6 +113,7 @@ static struct cachepolicy cache_policies[] __initdata = {
112 } 113 }
113}; 114};
114 115
116#ifdef CONFIG_CPU_CP15
115/* 117/*
116 * These are useful for identifying cache coherency 118 * These are useful for identifying cache coherency
117 * problems by allowing the cache or the cache and 119 * problems by allowing the cache or the cache and
@@ -210,6 +212,22 @@ void adjust_cr(unsigned long mask, unsigned long set)
210} 212}
211#endif 213#endif
212 214
215#else /* ifdef CONFIG_CPU_CP15 */
216
217static int __init early_cachepolicy(char *p)
218{
219 pr_warning("cachepolicy kernel parameter not supported without cp15\n");
220}
221early_param("cachepolicy", early_cachepolicy);
222
223static int __init noalign_setup(char *__unused)
224{
225 pr_warning("noalign kernel parameter not supported without cp15\n");
226}
227__setup("noalign", noalign_setup);
228
229#endif /* ifdef CONFIG_CPU_CP15 / else */
230
213#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN 231#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
214#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE 232#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
215 233
@@ -1277,6 +1295,7 @@ void __init paging_init(struct machine_desc *mdesc)
1277 dma_contiguous_remap(); 1295 dma_contiguous_remap();
1278 devicemaps_init(mdesc); 1296 devicemaps_init(mdesc);
1279 kmap_init(); 1297 kmap_init();
1298 tcm_init();
1280 1299
1281 top_pmd = pmd_off_k(0xffff0000); 1300 top_pmd = pmd_off_k(0xffff0000);
1282 1301
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index dc5de5d53f20..fde2d2a794cf 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -77,24 +77,27 @@ __arm740_setup:
77 mcr p15, 0, r0, c6, c0 @ set area 0, default 77 mcr p15, 0, r0, c6, c0 @ set area 0, default
78 78
79 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM 79 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
80 ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) 80 ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
81 mov r2, #10 @ 11 is the minimum (4KB) 81 mov r4, #10 @ 11 is the minimum (4KB)
821: add r2, r2, #1 @ area size *= 2 821: add r4, r4, #1 @ area size *= 2
83 mov r1, r1, lsr #1 83 movs r3, r3, lsr #1
84 bne 1b @ count not zero r-shift 84 bne 1b @ count not zero r-shift
85 orr r0, r0, r2, lsl #1 @ the area register value 85 orr r0, r0, r4, lsl #1 @ the area register value
86 orr r0, r0, #1 @ set enable bit 86 orr r0, r0, #1 @ set enable bit
87 mcr p15, 0, r0, c6, c1 @ set area 1, RAM 87 mcr p15, 0, r0, c6, c1 @ set area 1, RAM
88 88
89 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH 89 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
90 ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) 90 ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
91 mov r2, #10 @ 11 is the minimum (4KB) 91 cmp r3, #0
921: add r2, r2, #1 @ area size *= 2 92 moveq r0, #0
93 mov r1, r1, lsr #1 93 beq 2f
94 mov r4, #10 @ 11 is the minimum (4KB)
951: add r4, r4, #1 @ area size *= 2
96 movs r3, r3, lsr #1
94 bne 1b @ count not zero r-shift 97 bne 1b @ count not zero r-shift
95 orr r0, r0, r2, lsl #1 @ the area register value 98 orr r0, r0, r4, lsl #1 @ the area register value
96 orr r0, r0, #1 @ set enable bit 99 orr r0, r0, #1 @ set enable bit
97 mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH 1002: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
98 101
99 mov r0, #0x06 102 mov r0, #0x06
100 mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable 103 mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable
@@ -137,13 +140,14 @@ __arm740_proc_info:
137 .long 0x41807400 140 .long 0x41807400
138 .long 0xfffffff0 141 .long 0xfffffff0
139 .long 0 142 .long 0
143 .long 0
140 b __arm740_setup 144 b __arm740_setup
141 .long cpu_arch_name 145 .long cpu_arch_name
142 .long cpu_elf_name 146 .long cpu_elf_name
143 .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT 147 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
144 .long cpu_arm740_name 148 .long cpu_arm740_name
145 .long arm740_processor_functions 149 .long arm740_processor_functions
146 .long 0 150 .long 0
147 .long 0 151 .long 0
148 .long v3_cache_fns @ cache model 152 .long v4_cache_fns @ cache model
149 .size __arm740_proc_info, . - __arm740_proc_info 153 .size __arm740_proc_info, . - __arm740_proc_info
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 2c3b9421ab5e..2556cf1c2da1 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
387/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 387/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
388.globl cpu_arm920_suspend_size 388.globl cpu_arm920_suspend_size
389.equ cpu_arm920_suspend_size, 4 * 3 389.equ cpu_arm920_suspend_size, 4 * 3
390#ifdef CONFIG_PM_SLEEP 390#ifdef CONFIG_ARM_CPU_SUSPEND
391ENTRY(cpu_arm920_do_suspend) 391ENTRY(cpu_arm920_do_suspend)
392 stmfd sp!, {r4 - r6, lr} 392 stmfd sp!, {r4 - r6, lr}
393 mrc p15, 0, r4, c13, c0, 0 @ PID 393 mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index f1803f7e2972..344c8a548cc0 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
402/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 402/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
403.globl cpu_arm926_suspend_size 403.globl cpu_arm926_suspend_size
404.equ cpu_arm926_suspend_size, 4 * 3 404.equ cpu_arm926_suspend_size, 4 * 3
405#ifdef CONFIG_PM_SLEEP 405#ifdef CONFIG_ARM_CPU_SUSPEND
406ENTRY(cpu_arm926_do_suspend) 406ENTRY(cpu_arm926_do_suspend)
407 stmfd sp!, {r4 - r6, lr} 407 stmfd sp!, {r4 - r6, lr}
408 mrc p15, 0, r4, c13, c0, 0 @ PID 408 mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 82f9cdc751d6..0b60dd3d742a 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
350 350
351.globl cpu_mohawk_suspend_size 351.globl cpu_mohawk_suspend_size
352.equ cpu_mohawk_suspend_size, 4 * 6 352.equ cpu_mohawk_suspend_size, 4 * 6
353#ifdef CONFIG_PM_SLEEP 353#ifdef CONFIG_ARM_CPU_SUSPEND
354ENTRY(cpu_mohawk_do_suspend) 354ENTRY(cpu_mohawk_do_suspend)
355 stmfd sp!, {r4 - r9, lr} 355 stmfd sp!, {r4 - r9, lr}
356 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 356 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 3aa0da11fd84..d92dfd081429 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
172 172
173.globl cpu_sa1100_suspend_size 173.globl cpu_sa1100_suspend_size
174.equ cpu_sa1100_suspend_size, 4 * 3 174.equ cpu_sa1100_suspend_size, 4 * 3
175#ifdef CONFIG_PM_SLEEP 175#ifdef CONFIG_ARM_CPU_SUSPEND
176ENTRY(cpu_sa1100_do_suspend) 176ENTRY(cpu_sa1100_do_suspend)
177 stmfd sp!, {r4 - r6, lr} 177 stmfd sp!, {r4 - r6, lr}
178 mrc p15, 0, r4, c3, c0, 0 @ domain ID 178 mrc p15, 0, r4, c3, c0, 0 @ domain ID
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 3e6210b4d6d4..054b491ff764 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -17,7 +17,9 @@
17 17
18#ifndef MULTI_CPU 18#ifndef MULTI_CPU
19EXPORT_SYMBOL(cpu_dcache_clean_area); 19EXPORT_SYMBOL(cpu_dcache_clean_area);
20#ifdef CONFIG_MMU
20EXPORT_SYMBOL(cpu_set_pte_ext); 21EXPORT_SYMBOL(cpu_set_pte_ext);
22#endif
21#else 23#else
22EXPORT_SYMBOL(processor); 24EXPORT_SYMBOL(processor);
23#endif 25#endif
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index bcaaa8de9325..919405e20b80 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -80,12 +80,10 @@ ENTRY(cpu_v6_do_idle)
80 mov pc, lr 80 mov pc, lr
81 81
82ENTRY(cpu_v6_dcache_clean_area) 82ENTRY(cpu_v6_dcache_clean_area)
83#ifndef TLB_CAN_READ_FROM_L1_CACHE
841: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 831: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
85 add r0, r0, #D_CACHE_LINE_SIZE 84 add r0, r0, #D_CACHE_LINE_SIZE
86 subs r1, r1, #D_CACHE_LINE_SIZE 85 subs r1, r1, #D_CACHE_LINE_SIZE
87 bhi 1b 86 bhi 1b
88#endif
89 mov pc, lr 87 mov pc, lr
90 88
91/* 89/*
@@ -138,7 +136,7 @@ ENTRY(cpu_v6_set_pte_ext)
138/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ 136/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
139.globl cpu_v6_suspend_size 137.globl cpu_v6_suspend_size
140.equ cpu_v6_suspend_size, 4 * 6 138.equ cpu_v6_suspend_size, 4 * 6
141#ifdef CONFIG_PM_SLEEP 139#ifdef CONFIG_ARM_CPU_SUSPEND
142ENTRY(cpu_v6_do_suspend) 140ENTRY(cpu_v6_do_suspend)
143 stmfd sp!, {r4 - r9, lr} 141 stmfd sp!, {r4 - r9, lr}
144 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 142 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 78f520bc0e99..9704097c450e 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -110,7 +110,8 @@ ENTRY(cpu_v7_set_pte_ext)
110 ARM( str r3, [r0, #2048]! ) 110 ARM( str r3, [r0, #2048]! )
111 THUMB( add r0, r0, #2048 ) 111 THUMB( add r0, r0, #2048 )
112 THUMB( str r3, [r0] ) 112 THUMB( str r3, [r0] )
113 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 113 ALT_SMP(mov pc,lr)
114 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
114#endif 115#endif
115 mov pc, lr 116 mov pc, lr
116ENDPROC(cpu_v7_set_pte_ext) 117ENDPROC(cpu_v7_set_pte_ext)
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 6ffd78c0f9ab..363027e811d6 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -73,7 +73,8 @@ ENTRY(cpu_v7_set_pte_ext)
73 tst r3, #1 << (55 - 32) @ L_PTE_DIRTY 73 tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
74 orreq r2, #L_PTE_RDONLY 74 orreq r2, #L_PTE_RDONLY
751: strd r2, r3, [r0] 751: strd r2, r3, [r0]
76 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 76 ALT_SMP(mov pc, lr)
77 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
77#endif 78#endif
78 mov pc, lr 79 mov pc, lr
79ENDPROC(cpu_v7_set_pte_ext) 80ENDPROC(cpu_v7_set_pte_ext)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index f584d3f5b37c..2c73a7301ff7 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -75,14 +75,14 @@ ENTRY(cpu_v7_do_idle)
75ENDPROC(cpu_v7_do_idle) 75ENDPROC(cpu_v7_do_idle)
76 76
77ENTRY(cpu_v7_dcache_clean_area) 77ENTRY(cpu_v7_dcache_clean_area)
78#ifndef TLB_CAN_READ_FROM_L1_CACHE 78 ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW
79 ALT_UP(W(nop))
79 dcache_line_size r2, r3 80 dcache_line_size r2, r3
801: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 811: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
81 add r0, r0, r2 82 add r0, r0, r2
82 subs r1, r1, r2 83 subs r1, r1, r2
83 bhi 1b 84 bhi 1b
84 dsb 85 dsb
85#endif
86 mov pc, lr 86 mov pc, lr
87ENDPROC(cpu_v7_dcache_clean_area) 87ENDPROC(cpu_v7_dcache_clean_area)
88 88
@@ -402,6 +402,8 @@ __v7_ca9mp_proc_info:
402 __v7_proc __v7_ca9mp_setup 402 __v7_proc __v7_ca9mp_setup
403 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info 403 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
404 404
405#endif /* CONFIG_ARM_LPAE */
406
405 /* 407 /*
406 * Marvell PJ4B processor. 408 * Marvell PJ4B processor.
407 */ 409 */
@@ -411,7 +413,6 @@ __v7_pj4b_proc_info:
411 .long 0xfffffff0 413 .long 0xfffffff0
412 __v7_proc __v7_pj4b_setup 414 __v7_proc __v7_pj4b_setup
413 .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info 415 .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
414#endif /* CONFIG_ARM_LPAE */
415 416
416 /* 417 /*
417 * ARM Ltd. Cortex A7 processor. 418 * ARM Ltd. Cortex A7 processor.
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index eb93d6487f35..e8efd83b6f25 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
413 413
414.globl cpu_xsc3_suspend_size 414.globl cpu_xsc3_suspend_size
415.equ cpu_xsc3_suspend_size, 4 * 6 415.equ cpu_xsc3_suspend_size, 4 * 6
416#ifdef CONFIG_PM_SLEEP 416#ifdef CONFIG_ARM_CPU_SUSPEND
417ENTRY(cpu_xsc3_do_suspend) 417ENTRY(cpu_xsc3_do_suspend)
418 stmfd sp!, {r4 - r9, lr} 418 stmfd sp!, {r4 - r9, lr}
419 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 419 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 25510361aa18..e766f889bfd6 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
528 528
529.globl cpu_xscale_suspend_size 529.globl cpu_xscale_suspend_size
530.equ cpu_xscale_suspend_size, 4 * 6 530.equ cpu_xscale_suspend_size, 4 * 6
531#ifdef CONFIG_PM_SLEEP 531#ifdef CONFIG_ARM_CPU_SUSPEND
532ENTRY(cpu_xscale_do_suspend) 532ENTRY(cpu_xscale_do_suspend)
533 stmfd sp!, {r4 - r9, lr} 533 stmfd sp!, {r4 - r9, lr}
534 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 534 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/kernel/tcm.h b/arch/arm/mm/tcm.h
index 8015ad434a40..8015ad434a40 100644
--- a/arch/arm/kernel/tcm.h
+++ b/arch/arm/mm/tcm.h
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 831e1fdfdb2f..a10297da122b 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -16,7 +16,7 @@
16# are merged into mainline or have been edited in the machine database 16# are merged into mainline or have been edited in the machine database
17# within the last 12 months. References to machine_is_NAME() do not count! 17# within the last 12 months. References to machine_is_NAME() do not count!
18# 18#
19# Last update: Thu Apr 26 08:44:23 2012 19# Last update: Fri Mar 22 17:24:50 2013
20# 20#
21# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number 21# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
22# 22#
@@ -64,8 +64,8 @@ h7201 ARCH_H7201 H7201 161
64h7202 ARCH_H7202 H7202 162 64h7202 ARCH_H7202 H7202 162
65iq80321 ARCH_IQ80321 IQ80321 169 65iq80321 ARCH_IQ80321 IQ80321 169
66ks8695 ARCH_KS8695 KS8695 180 66ks8695 ARCH_KS8695 KS8695 180
67karo ARCH_KARO KARO 190
68smdk2410 ARCH_SMDK2410 SMDK2410 193 67smdk2410 ARCH_SMDK2410 SMDK2410 193
68ceiva ARCH_CEIVA CEIVA 200
69voiceblue MACH_VOICEBLUE VOICEBLUE 218 69voiceblue MACH_VOICEBLUE VOICEBLUE 218
70h5400 ARCH_H5400 H5400 220 70h5400 ARCH_H5400 H5400 220
71omap_innovator MACH_OMAP_INNOVATOR OMAP_INNOVATOR 234 71omap_innovator MACH_OMAP_INNOVATOR OMAP_INNOVATOR 234
@@ -95,6 +95,7 @@ lpd7a400 MACH_LPD7A400 LPD7A400 389
95lpd7a404 MACH_LPD7A404 LPD7A404 390 95lpd7a404 MACH_LPD7A404 LPD7A404 390
96csb337 MACH_CSB337 CSB337 399 96csb337 MACH_CSB337 CSB337 399
97mainstone MACH_MAINSTONE MAINSTONE 406 97mainstone MACH_MAINSTONE MAINSTONE 406
98lite300 MACH_LITE300 LITE300 408
98xcep MACH_XCEP XCEP 413 99xcep MACH_XCEP XCEP 413
99arcom_vulcan MACH_ARCOM_VULCAN ARCOM_VULCAN 414 100arcom_vulcan MACH_ARCOM_VULCAN ARCOM_VULCAN 414
100nomadik MACH_NOMADIK NOMADIK 420 101nomadik MACH_NOMADIK NOMADIK 420
@@ -131,12 +132,14 @@ kb9200 MACH_KB9200 KB9200 612
131sx1 MACH_SX1 SX1 613 132sx1 MACH_SX1 SX1 613
132ixdp465 MACH_IXDP465 IXDP465 618 133ixdp465 MACH_IXDP465 IXDP465 618
133ixdp2351 MACH_IXDP2351 IXDP2351 619 134ixdp2351 MACH_IXDP2351 IXDP2351 619
135cm4008 MACH_CM4008 CM4008 624
134iq80332 MACH_IQ80332 IQ80332 629 136iq80332 MACH_IQ80332 IQ80332 629
135gtwx5715 MACH_GTWX5715 GTWX5715 641 137gtwx5715 MACH_GTWX5715 GTWX5715 641
136csb637 MACH_CSB637 CSB637 648 138csb637 MACH_CSB637 CSB637 648
137n30 MACH_N30 N30 656 139n30 MACH_N30 N30 656
138nec_mp900 MACH_NEC_MP900 NEC_MP900 659 140nec_mp900 MACH_NEC_MP900 NEC_MP900 659
139kafa MACH_KAFA KAFA 662 141kafa MACH_KAFA KAFA 662
142cm41xx MACH_CM41XX CM41XX 672
140ts72xx MACH_TS72XX TS72XX 673 143ts72xx MACH_TS72XX TS72XX 673
141otom MACH_OTOM OTOM 680 144otom MACH_OTOM OTOM 680
142nexcoder_2440 MACH_NEXCODER_2440 NEXCODER_2440 681 145nexcoder_2440 MACH_NEXCODER_2440 NEXCODER_2440 681
@@ -149,6 +152,7 @@ colibri MACH_COLIBRI COLIBRI 729
149gateway7001 MACH_GATEWAY7001 GATEWAY7001 731 152gateway7001 MACH_GATEWAY7001 GATEWAY7001 731
150pcm027 MACH_PCM027 PCM027 732 153pcm027 MACH_PCM027 PCM027 732
151anubis MACH_ANUBIS ANUBIS 734 154anubis MACH_ANUBIS ANUBIS 734
155xboardgp8 MACH_XBOARDGP8 XBOARDGP8 742
152akita MACH_AKITA AKITA 744 156akita MACH_AKITA AKITA 744
153e330 MACH_E330 E330 753 157e330 MACH_E330 E330 753
154nokia770 MACH_NOKIA770 NOKIA770 755 158nokia770 MACH_NOKIA770 NOKIA770 755
@@ -157,9 +161,11 @@ edb9315a MACH_EDB9315A EDB9315A 772
157stargate2 MACH_STARGATE2 STARGATE2 774 161stargate2 MACH_STARGATE2 STARGATE2 774
158intelmote2 MACH_INTELMOTE2 INTELMOTE2 775 162intelmote2 MACH_INTELMOTE2 INTELMOTE2 775
159trizeps4 MACH_TRIZEPS4 TRIZEPS4 776 163trizeps4 MACH_TRIZEPS4 TRIZEPS4 776
164pnx4008 MACH_PNX4008 PNX4008 782
160cpuat91 MACH_CPUAT91 CPUAT91 787 165cpuat91 MACH_CPUAT91 CPUAT91 787
161iq81340sc MACH_IQ81340SC IQ81340SC 799 166iq81340sc MACH_IQ81340SC IQ81340SC 799
162iq81340mc MACH_IQ81340MC IQ81340MC 801 167iq81340mc MACH_IQ81340MC IQ81340MC 801
168se4200 MACH_SE4200 SE4200 809
163micro9 MACH_MICRO9 MICRO9 811 169micro9 MACH_MICRO9 MICRO9 811
164micro9l MACH_MICRO9L MICRO9L 812 170micro9l MACH_MICRO9L MICRO9L 812
165omap_palmte MACH_OMAP_PALMTE OMAP_PALMTE 817 171omap_palmte MACH_OMAP_PALMTE OMAP_PALMTE 817
@@ -178,6 +184,7 @@ mx21ads MACH_MX21ADS MX21ADS 851
178ams_delta MACH_AMS_DELTA AMS_DELTA 862 184ams_delta MACH_AMS_DELTA AMS_DELTA 862
179nas100d MACH_NAS100D NAS100D 865 185nas100d MACH_NAS100D NAS100D 865
180magician MACH_MAGICIAN MAGICIAN 875 186magician MACH_MAGICIAN MAGICIAN 875
187cm4002 MACH_CM4002 CM4002 876
181nxdkn MACH_NXDKN NXDKN 880 188nxdkn MACH_NXDKN NXDKN 880
182palmtx MACH_PALMTX PALMTX 885 189palmtx MACH_PALMTX PALMTX 885
183s3c2413 MACH_S3C2413 S3C2413 887 190s3c2413 MACH_S3C2413 S3C2413 887
@@ -203,7 +210,6 @@ omap_fsample MACH_OMAP_FSAMPLE OMAP_FSAMPLE 970
203snapper_cl15 MACH_SNAPPER_CL15 SNAPPER_CL15 986 210snapper_cl15 MACH_SNAPPER_CL15 SNAPPER_CL15 986
204omap_palmz71 MACH_OMAP_PALMZ71 OMAP_PALMZ71 993 211omap_palmz71 MACH_OMAP_PALMZ71 OMAP_PALMZ71 993
205smdk2412 MACH_SMDK2412 SMDK2412 1009 212smdk2412 MACH_SMDK2412 SMDK2412 1009
206bkde303 MACH_BKDE303 BKDE303 1021
207smdk2413 MACH_SMDK2413 SMDK2413 1022 213smdk2413 MACH_SMDK2413 SMDK2413 1022
208aml_m5900 MACH_AML_M5900 AML_M5900 1024 214aml_m5900 MACH_AML_M5900 AML_M5900 1024
209balloon3 MACH_BALLOON3 BALLOON3 1029 215balloon3 MACH_BALLOON3 BALLOON3 1029
@@ -214,6 +220,7 @@ fsg MACH_FSG FSG 1091
214at91sam9260ek MACH_AT91SAM9260EK AT91SAM9260EK 1099 220at91sam9260ek MACH_AT91SAM9260EK AT91SAM9260EK 1099
215glantank MACH_GLANTANK GLANTANK 1100 221glantank MACH_GLANTANK GLANTANK 1100
216n2100 MACH_N2100 N2100 1101 222n2100 MACH_N2100 N2100 1101
223im42xx MACH_IM42XX IM42XX 1105
217qt2410 MACH_QT2410 QT2410 1108 224qt2410 MACH_QT2410 QT2410 1108
218kixrp435 MACH_KIXRP435 KIXRP435 1109 225kixrp435 MACH_KIXRP435 KIXRP435 1109
219cc9p9360dev MACH_CC9P9360DEV CC9P9360DEV 1114 226cc9p9360dev MACH_CC9P9360DEV CC9P9360DEV 1114
@@ -247,6 +254,7 @@ csb726 MACH_CSB726 CSB726 1359
247davinci_dm6467_evm MACH_DAVINCI_DM6467_EVM DAVINCI_DM6467_EVM 1380 254davinci_dm6467_evm MACH_DAVINCI_DM6467_EVM DAVINCI_DM6467_EVM 1380
248davinci_dm355_evm MACH_DAVINCI_DM355_EVM DAVINCI_DM355_EVM 1381 255davinci_dm355_evm MACH_DAVINCI_DM355_EVM DAVINCI_DM355_EVM 1381
249littleton MACH_LITTLETON LITTLETON 1388 256littleton MACH_LITTLETON LITTLETON 1388
257im4004 MACH_IM4004 IM4004 1400
250realview_pb11mp MACH_REALVIEW_PB11MP REALVIEW_PB11MP 1407 258realview_pb11mp MACH_REALVIEW_PB11MP REALVIEW_PB11MP 1407
251mx27_3ds MACH_MX27_3DS MX27_3DS 1430 259mx27_3ds MACH_MX27_3DS MX27_3DS 1430
252halibut MACH_HALIBUT HALIBUT 1439 260halibut MACH_HALIBUT HALIBUT 1439
@@ -268,6 +276,7 @@ dns323 MACH_DNS323 DNS323 1542
268omap3_beagle MACH_OMAP3_BEAGLE OMAP3_BEAGLE 1546 276omap3_beagle MACH_OMAP3_BEAGLE OMAP3_BEAGLE 1546
269nokia_n810 MACH_NOKIA_N810 NOKIA_N810 1548 277nokia_n810 MACH_NOKIA_N810 NOKIA_N810 1548
270pcm038 MACH_PCM038 PCM038 1551 278pcm038 MACH_PCM038 PCM038 1551
279sg310 MACH_SG310 SG310 1564
271ts209 MACH_TS209 TS209 1565 280ts209 MACH_TS209 TS209 1565
272at91cap9adk MACH_AT91CAP9ADK AT91CAP9ADK 1566 281at91cap9adk MACH_AT91CAP9ADK AT91CAP9ADK 1566
273mx31moboard MACH_MX31MOBOARD MX31MOBOARD 1574 282mx31moboard MACH_MX31MOBOARD MX31MOBOARD 1574
@@ -371,7 +380,6 @@ pcm043 MACH_PCM043 PCM043 2072
371sheevaplug MACH_SHEEVAPLUG SHEEVAPLUG 2097 380sheevaplug MACH_SHEEVAPLUG SHEEVAPLUG 2097
372avengers_lite MACH_AVENGERS_LITE AVENGERS_LITE 2104 381avengers_lite MACH_AVENGERS_LITE AVENGERS_LITE 2104
373mx51_babbage MACH_MX51_BABBAGE MX51_BABBAGE 2125 382mx51_babbage MACH_MX51_BABBAGE MX51_BABBAGE 2125
374tx37 MACH_TX37 TX37 2127
375rd78x00_masa MACH_RD78X00_MASA RD78X00_MASA 2135 383rd78x00_masa MACH_RD78X00_MASA RD78X00_MASA 2135
376dm355_leopard MACH_DM355_LEOPARD DM355_LEOPARD 2138 384dm355_leopard MACH_DM355_LEOPARD DM355_LEOPARD 2138
377ts219 MACH_TS219 TS219 2139 385ts219 MACH_TS219 TS219 2139
@@ -380,12 +388,12 @@ davinci_da850_evm MACH_DAVINCI_DA850_EVM DAVINCI_DA850_EVM 2157
380at91sam9g10ek MACH_AT91SAM9G10EK AT91SAM9G10EK 2159 388at91sam9g10ek MACH_AT91SAM9G10EK AT91SAM9G10EK 2159
381omap_4430sdp MACH_OMAP_4430SDP OMAP_4430SDP 2160 389omap_4430sdp MACH_OMAP_4430SDP OMAP_4430SDP 2160
382magx_zn5 MACH_MAGX_ZN5 MAGX_ZN5 2162 390magx_zn5 MACH_MAGX_ZN5 MAGX_ZN5 2162
383tx25 MACH_TX25 TX25 2177
384omap3_torpedo MACH_OMAP3_TORPEDO OMAP3_TORPEDO 2178 391omap3_torpedo MACH_OMAP3_TORPEDO OMAP3_TORPEDO 2178
385anw6410 MACH_ANW6410 ANW6410 2183 392anw6410 MACH_ANW6410 ANW6410 2183
386imx27_visstrim_m10 MACH_IMX27_VISSTRIM_M10 IMX27_VISSTRIM_M10 2187 393imx27_visstrim_m10 MACH_IMX27_VISSTRIM_M10 IMX27_VISSTRIM_M10 2187
387portuxg20 MACH_PORTUXG20 PORTUXG20 2191 394portuxg20 MACH_PORTUXG20 PORTUXG20 2191
388smdkc110 MACH_SMDKC110 SMDKC110 2193 395smdkc110 MACH_SMDKC110 SMDKC110 2193
396cabespresso MACH_CABESPRESSO CABESPRESSO 2194
389omap3517evm MACH_OMAP3517EVM OMAP3517EVM 2200 397omap3517evm MACH_OMAP3517EVM OMAP3517EVM 2200
390netspace_v2 MACH_NETSPACE_V2 NETSPACE_V2 2201 398netspace_v2 MACH_NETSPACE_V2 NETSPACE_V2 2201
391netspace_max_v2 MACH_NETSPACE_MAX_V2 NETSPACE_MAX_V2 2202 399netspace_max_v2 MACH_NETSPACE_MAX_V2 NETSPACE_MAX_V2 2202
@@ -404,6 +412,7 @@ bigdisk MACH_BIGDISK BIGDISK 2283
404at91sam9g20ek_2mmc MACH_AT91SAM9G20EK_2MMC AT91SAM9G20EK_2MMC 2288 412at91sam9g20ek_2mmc MACH_AT91SAM9G20EK_2MMC AT91SAM9G20EK_2MMC 2288
405bcmring MACH_BCMRING BCMRING 2289 413bcmring MACH_BCMRING BCMRING 2289
406mahimahi MACH_MAHIMAHI MAHIMAHI 2304 414mahimahi MACH_MAHIMAHI MAHIMAHI 2304
415cerebric MACH_CEREBRIC CEREBRIC 2311
407smdk6442 MACH_SMDK6442 SMDK6442 2324 416smdk6442 MACH_SMDK6442 SMDK6442 2324
408openrd_base MACH_OPENRD_BASE OPENRD_BASE 2325 417openrd_base MACH_OPENRD_BASE OPENRD_BASE 2325
409devkit8000 MACH_DEVKIT8000 DEVKIT8000 2330 418devkit8000 MACH_DEVKIT8000 DEVKIT8000 2330
@@ -423,10 +432,10 @@ raumfeld_rc MACH_RAUMFELD_RC RAUMFELD_RC 2413
423raumfeld_connector MACH_RAUMFELD_CONNECTOR RAUMFELD_CONNECTOR 2414 432raumfeld_connector MACH_RAUMFELD_CONNECTOR RAUMFELD_CONNECTOR 2414
424raumfeld_speaker MACH_RAUMFELD_SPEAKER RAUMFELD_SPEAKER 2415 433raumfeld_speaker MACH_RAUMFELD_SPEAKER RAUMFELD_SPEAKER 2415
425tnetv107x MACH_TNETV107X TNETV107X 2418 434tnetv107x MACH_TNETV107X TNETV107X 2418
426mx51_m2id MACH_MX51_M2ID MX51_M2ID 2428
427smdkv210 MACH_SMDKV210 SMDKV210 2456 435smdkv210 MACH_SMDKV210 SMDKV210 2456
428omap_zoom3 MACH_OMAP_ZOOM3 OMAP_ZOOM3 2464 436omap_zoom3 MACH_OMAP_ZOOM3 OMAP_ZOOM3 2464
429omap_3630sdp MACH_OMAP_3630SDP OMAP_3630SDP 2465 437omap_3630sdp MACH_OMAP_3630SDP OMAP_3630SDP 2465
438cybook2440 MACH_CYBOOK2440 CYBOOK2440 2466
430smartq7 MACH_SMARTQ7 SMARTQ7 2479 439smartq7 MACH_SMARTQ7 SMARTQ7 2479
431watson_efm_plugin MACH_WATSON_EFM_PLUGIN WATSON_EFM_PLUGIN 2491 440watson_efm_plugin MACH_WATSON_EFM_PLUGIN WATSON_EFM_PLUGIN 2491
432g4evm MACH_G4EVM G4EVM 2493 441g4evm MACH_G4EVM G4EVM 2493
@@ -434,12 +443,10 @@ omapl138_hawkboard MACH_OMAPL138_HAWKBOARD OMAPL138_HAWKBOARD 2495
434ts41x MACH_TS41X TS41X 2502 443ts41x MACH_TS41X TS41X 2502
435phy3250 MACH_PHY3250 PHY3250 2511 444phy3250 MACH_PHY3250 PHY3250 2511
436mini6410 MACH_MINI6410 MINI6410 2520 445mini6410 MACH_MINI6410 MINI6410 2520
437tx51 MACH_TX51 TX51 2529
438mx28evk MACH_MX28EVK MX28EVK 2531 446mx28evk MACH_MX28EVK MX28EVK 2531
439smartq5 MACH_SMARTQ5 SMARTQ5 2534 447smartq5 MACH_SMARTQ5 SMARTQ5 2534
440davinci_dm6467tevm MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM 2548 448davinci_dm6467tevm MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM 2548
441mxt_td60 MACH_MXT_TD60 MXT_TD60 2550 449mxt_td60 MACH_MXT_TD60 MXT_TD60 2550
442pca101 MACH_PCA101 PCA101 2595
443capc7117 MACH_CAPC7117 CAPC7117 2612 450capc7117 MACH_CAPC7117 CAPC7117 2612
444icontrol MACH_ICONTROL ICONTROL 2624 451icontrol MACH_ICONTROL ICONTROL 2624
445gplugd MACH_GPLUGD GPLUGD 2625 452gplugd MACH_GPLUGD GPLUGD 2625
@@ -465,6 +472,7 @@ igep0030 MACH_IGEP0030 IGEP0030 2717
465sbc3530 MACH_SBC3530 SBC3530 2722 472sbc3530 MACH_SBC3530 SBC3530 2722
466saarb MACH_SAARB SAARB 2727 473saarb MACH_SAARB SAARB 2727
467harmony MACH_HARMONY HARMONY 2731 474harmony MACH_HARMONY HARMONY 2731
475cybook_orizon MACH_CYBOOK_ORIZON CYBOOK_ORIZON 2733
468msm7x30_fluid MACH_MSM7X30_FLUID MSM7X30_FLUID 2741 476msm7x30_fluid MACH_MSM7X30_FLUID MSM7X30_FLUID 2741
469cm_t3517 MACH_CM_T3517 CM_T3517 2750 477cm_t3517 MACH_CM_T3517 CM_T3517 2750
470wbd222 MACH_WBD222 WBD222 2753 478wbd222 MACH_WBD222 WBD222 2753
@@ -480,10 +488,8 @@ eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35SD EUKREA_CPUIMX35SD 2821
480eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822 488eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822
481eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823 489eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823
482smdkc210 MACH_SMDKC210 SMDKC210 2838 490smdkc210 MACH_SMDKC210 SMDKC210 2838
483pcaal1 MACH_PCAAL1 PCAAL1 2843
484t5325 MACH_T5325 T5325 2846 491t5325 MACH_T5325 T5325 2846
485income MACH_INCOME INCOME 2849 492income MACH_INCOME INCOME 2849
486mx257sx MACH_MX257SX MX257SX 2861
487goni MACH_GONI GONI 2862 493goni MACH_GONI GONI 2862
488bv07 MACH_BV07 BV07 2882 494bv07 MACH_BV07 BV07 2882
489openrd_ultimate MACH_OPENRD_ULTIMATE OPENRD_ULTIMATE 2884 495openrd_ultimate MACH_OPENRD_ULTIMATE OPENRD_ULTIMATE 2884
@@ -491,7 +497,6 @@ devixp MACH_DEVIXP DEVIXP 2885
491miccpt MACH_MICCPT MICCPT 2886 497miccpt MACH_MICCPT MICCPT 2886
492mic256 MACH_MIC256 MIC256 2887 498mic256 MACH_MIC256 MIC256 2887
493u5500 MACH_U5500 U5500 2890 499u5500 MACH_U5500 U5500 2890
494pov15hd MACH_POV15HD POV15HD 2910
495linkstation_lschl MACH_LINKSTATION_LSCHL LINKSTATION_LSCHL 2913 500linkstation_lschl MACH_LINKSTATION_LSCHL LINKSTATION_LSCHL 2913
496smdkv310 MACH_SMDKV310 SMDKV310 2925 501smdkv310 MACH_SMDKV310 SMDKV310 2925
497wm8505_7in_netbook MACH_WM8505_7IN_NETBOOK WM8505_7IN_NETBOOK 2928 502wm8505_7in_netbook MACH_WM8505_7IN_NETBOOK WM8505_7IN_NETBOOK 2928
@@ -518,7 +523,6 @@ prima2_evb MACH_PRIMA2_EVB PRIMA2_EVB 3103
518paz00 MACH_PAZ00 PAZ00 3128 523paz00 MACH_PAZ00 PAZ00 3128
519acmenetusfoxg20 MACH_ACMENETUSFOXG20 ACMENETUSFOXG20 3129 524acmenetusfoxg20 MACH_ACMENETUSFOXG20 ACMENETUSFOXG20 3129
520ag5evm MACH_AG5EVM AG5EVM 3189 525ag5evm MACH_AG5EVM AG5EVM 3189
521tsunagi MACH_TSUNAGI TSUNAGI 3197
522ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206 526ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206
523wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207 527wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207
524trimslice MACH_TRIMSLICE TRIMSLICE 3209 528trimslice MACH_TRIMSLICE TRIMSLICE 3209
@@ -529,8 +533,6 @@ msm8960_sim MACH_MSM8960_SIM MSM8960_SIM 3230
529msm8960_rumi3 MACH_MSM8960_RUMI3 MSM8960_RUMI3 3231 533msm8960_rumi3 MACH_MSM8960_RUMI3 MSM8960_RUMI3 3231
530gsia18s MACH_GSIA18S GSIA18S 3234 534gsia18s MACH_GSIA18S GSIA18S 3234
531mx53_loco MACH_MX53_LOCO MX53_LOCO 3273 535mx53_loco MACH_MX53_LOCO MX53_LOCO 3273
532tx53 MACH_TX53 TX53 3279
533encore MACH_ENCORE ENCORE 3284
534wario MACH_WARIO WARIO 3288 536wario MACH_WARIO WARIO 3288
535cm_t3730 MACH_CM_T3730 CM_T3730 3290 537cm_t3730 MACH_CM_T3730 CM_T3730 3290
536hrefv60 MACH_HREFV60 HREFV60 3293 538hrefv60 MACH_HREFV60 HREFV60 3293
@@ -538,603 +540,24 @@ armlex4210 MACH_ARMLEX4210 ARMLEX4210 3361
538snowball MACH_SNOWBALL SNOWBALL 3363 540snowball MACH_SNOWBALL SNOWBALL 3363
539xilinx_ep107 MACH_XILINX_EP107 XILINX_EP107 3378 541xilinx_ep107 MACH_XILINX_EP107 XILINX_EP107 3378
540nuri MACH_NURI NURI 3379 542nuri MACH_NURI NURI 3379
541wtplug MACH_WTPLUG WTPLUG 3412
542veridis_a300 MACH_VERIDIS_A300 VERIDIS_A300 3448
543origen MACH_ORIGEN ORIGEN 3455 543origen MACH_ORIGEN ORIGEN 3455
544wm8650refboard MACH_WM8650REFBOARD WM8650REFBOARD 3472
545xarina MACH_XARINA XARINA 3476
546sdvr MACH_SDVR SDVR 3478
547acer_maya MACH_ACER_MAYA ACER_MAYA 3479
548pico MACH_PICO PICO 3480
549cwmx233 MACH_CWMX233 CWMX233 3481
550cwam1808 MACH_CWAM1808 CWAM1808 3482
551cwdm365 MACH_CWDM365 CWDM365 3483
552mx51_moray MACH_MX51_MORAY MX51_MORAY 3484
553thales_cbc MACH_THALES_CBC THALES_CBC 3485
554bluepoint MACH_BLUEPOINT BLUEPOINT 3486
555dir665 MACH_DIR665 DIR665 3487
556acmerover1 MACH_ACMEROVER1 ACMEROVER1 3488
557shooter_ct MACH_SHOOTER_CT SHOOTER_CT 3489
558bliss MACH_BLISS BLISS 3490
559blissc MACH_BLISSC BLISSC 3491
560thales_adc MACH_THALES_ADC THALES_ADC 3492
561ubisys_p9d_evp MACH_UBISYS_P9D_EVP UBISYS_P9D_EVP 3493
562atdgp318 MACH_ATDGP318 ATDGP318 3494
563dma210u MACH_DMA210U DMA210U 3495
564em_t3 MACH_EM_T3 EM_T3 3496
565htx3250 MACH_HTX3250 HTX3250 3497
566g50 MACH_G50 G50 3498
567eco5 MACH_ECO5 ECO5 3499
568wintergrasp MACH_WINTERGRASP WINTERGRASP 3500
569puro MACH_PURO PURO 3501
570shooter_k MACH_SHOOTER_K SHOOTER_K 3502
571nspire MACH_NSPIRE NSPIRE 3503 544nspire MACH_NSPIRE NSPIRE 3503
572mickxx MACH_MICKXX MICKXX 3504
573lxmb MACH_LXMB LXMB 3505
574adam MACH_ADAM ADAM 3507
575b1004 MACH_B1004 B1004 3508
576oboea MACH_OBOEA OBOEA 3509
577a1015 MACH_A1015 A1015 3510
578robin_vbdt30 MACH_ROBIN_VBDT30 ROBIN_VBDT30 3511
579tegra_enterprise MACH_TEGRA_ENTERPRISE TEGRA_ENTERPRISE 3512
580rfl108200_mk10 MACH_RFL108200_MK10 RFL108200_MK10 3513
581rfl108300_mk16 MACH_RFL108300_MK16 RFL108300_MK16 3514
582rover_v7 MACH_ROVER_V7 ROVER_V7 3515
583miphone MACH_MIPHONE MIPHONE 3516
584femtobts MACH_FEMTOBTS FEMTOBTS 3517
585monopoli MACH_MONOPOLI MONOPOLI 3518
586boss MACH_BOSS BOSS 3519
587davinci_dm368_vtam MACH_DAVINCI_DM368_VTAM DAVINCI_DM368_VTAM 3520
588clcon MACH_CLCON CLCON 3521
589nokia_rm696 MACH_NOKIA_RM696 NOKIA_RM696 3522 545nokia_rm696 MACH_NOKIA_RM696 NOKIA_RM696 3522
590tahiti MACH_TAHITI TAHITI 3523
591fighter MACH_FIGHTER FIGHTER 3524
592sgh_i710 MACH_SGH_I710 SGH_I710 3525
593integreproscb MACH_INTEGREPROSCB INTEGREPROSCB 3526
594monza MACH_MONZA MONZA 3527
595calimain MACH_CALIMAIN CALIMAIN 3528
596mx6q_sabreauto MACH_MX6Q_SABREAUTO MX6Q_SABREAUTO 3529
597gma01x MACH_GMA01X GMA01X 3530
598sbc51 MACH_SBC51 SBC51 3531
599fit MACH_FIT FIT 3532
600steelhead MACH_STEELHEAD STEELHEAD 3533
601panther MACH_PANTHER PANTHER 3534
602msm8960_liquid MACH_MSM8960_LIQUID MSM8960_LIQUID 3535
603lexikonct MACH_LEXIKONCT LEXIKONCT 3536
604ns2816_stb MACH_NS2816_STB NS2816_STB 3537
605sei_mm2_lpc3250 MACH_SEI_MM2_LPC3250 SEI_MM2_LPC3250 3538
606cmimx53 MACH_CMIMX53 CMIMX53 3539
607sandwich MACH_SANDWICH SANDWICH 3540
608chief MACH_CHIEF CHIEF 3541
609pogo_e02 MACH_POGO_E02 POGO_E02 3542
610mikrap_x168 MACH_MIKRAP_X168 MIKRAP_X168 3543 546mikrap_x168 MACH_MIKRAP_X168 MIKRAP_X168 3543
611htcmozart MACH_HTCMOZART HTCMOZART 3544
612htcgold MACH_HTCGOLD HTCGOLD 3545
613mt72xx MACH_MT72XX MT72XX 3546
614mx51_ivy MACH_MX51_IVY MX51_IVY 3547
615mx51_lvd MACH_MX51_LVD MX51_LVD 3548
616omap3_wiser2 MACH_OMAP3_WISER2 OMAP3_WISER2 3549
617dreamplug MACH_DREAMPLUG DREAMPLUG 3550
618cobas_c_111 MACH_COBAS_C_111 COBAS_C_111 3551
619cobas_u_411 MACH_COBAS_U_411 COBAS_U_411 3552
620hssd MACH_HSSD HSSD 3553
621iom35x MACH_IOM35X IOM35X 3554
622psom_omap MACH_PSOM_OMAP PSOM_OMAP 3555
623iphone_2g MACH_IPHONE_2G IPHONE_2G 3556
624iphone_3g MACH_IPHONE_3G IPHONE_3G 3557
625ipod_touch_1g MACH_IPOD_TOUCH_1G IPOD_TOUCH_1G 3558
626pharos_tpc MACH_PHAROS_TPC PHAROS_TPC 3559
627mx53_hydra MACH_MX53_HYDRA MX53_HYDRA 3560
628ns2816_dev_board MACH_NS2816_DEV_BOARD NS2816_DEV_BOARD 3561
629iphone_3gs MACH_IPHONE_3GS IPHONE_3GS 3562
630iphone_4 MACH_IPHONE_4 IPHONE_4 3563
631ipod_touch_4g MACH_IPOD_TOUCH_4G IPOD_TOUCH_4G 3564
632dragon_e1100 MACH_DRAGON_E1100 DRAGON_E1100 3565
633topside MACH_TOPSIDE TOPSIDE 3566
634irisiii MACH_IRISIII IRISIII 3567
635deto_macarm9 MACH_DETO_MACARM9 DETO_MACARM9 3568 547deto_macarm9 MACH_DETO_MACARM9 DETO_MACARM9 3568
636eti_d1 MACH_ETI_D1 ETI_D1 3569
637som3530sdk MACH_SOM3530SDK SOM3530SDK 3570
638oc_engine MACH_OC_ENGINE OC_ENGINE 3571
639apq8064_sim MACH_APQ8064_SIM APQ8064_SIM 3572
640alps MACH_ALPS ALPS 3575
641tny_t3730 MACH_TNY_T3730 TNY_T3730 3576
642geryon_nfe MACH_GERYON_NFE GERYON_NFE 3577
643ns2816_ref_board MACH_NS2816_REF_BOARD NS2816_REF_BOARD 3578
644silverstone MACH_SILVERSTONE SILVERSTONE 3579
645mtt2440 MACH_MTT2440 MTT2440 3580
646ynicdb MACH_YNICDB YNICDB 3581
647bct MACH_BCT BCT 3582
648tuscan MACH_TUSCAN TUSCAN 3583
649xbt_sam9g45 MACH_XBT_SAM9G45 XBT_SAM9G45 3584
650enbw_cmc MACH_ENBW_CMC ENBW_CMC 3585
651ch104mx257 MACH_CH104MX257 CH104MX257 3587
652openpri MACH_OPENPRI OPENPRI 3588
653am335xevm MACH_AM335XEVM AM335XEVM 3589
654picodmb MACH_PICODMB PICODMB 3590
655waluigi MACH_WALUIGI WALUIGI 3591
656punicag7 MACH_PUNICAG7 PUNICAG7 3592
657ipad_1g MACH_IPAD_1G IPAD_1G 3593
658appletv_2g MACH_APPLETV_2G APPLETV_2G 3594
659mach_ecog45 MACH_MACH_ECOG45 MACH_ECOG45 3595
660ait_cam_enc_4xx MACH_AIT_CAM_ENC_4XX AIT_CAM_ENC_4XX 3596
661runnymede MACH_RUNNYMEDE RUNNYMEDE 3597
662play MACH_PLAY PLAY 3598
663hw90260 MACH_HW90260 HW90260 3599
664tagh MACH_TAGH TAGH 3600
665filbert MACH_FILBERT FILBERT 3601
666getinge_netcomv3 MACH_GETINGE_NETCOMV3 GETINGE_NETCOMV3 3602
667cw20 MACH_CW20 CW20 3603
668cinema MACH_CINEMA CINEMA 3604
669cinema_tea MACH_CINEMA_TEA CINEMA_TEA 3605
670cinema_coffee MACH_CINEMA_COFFEE CINEMA_COFFEE 3606
671cinema_juice MACH_CINEMA_JUICE CINEMA_JUICE 3607
672mx53_mirage2 MACH_MX53_MIRAGE2 MX53_MIRAGE2 3609
673mx53_efikasb MACH_MX53_EFIKASB MX53_EFIKASB 3610
674stm_b2000 MACH_STM_B2000 STM_B2000 3612
675m28evk MACH_M28EVK M28EVK 3613 548m28evk MACH_M28EVK M28EVK 3613
676pda MACH_PDA PDA 3614
677meraki_mr58 MACH_MERAKI_MR58 MERAKI_MR58 3615
678kota2 MACH_KOTA2 KOTA2 3616 549kota2 MACH_KOTA2 KOTA2 3616
679letcool MACH_LETCOOL LETCOOL 3617
680mx27iat MACH_MX27IAT MX27IAT 3618
681apollo_td MACH_APOLLO_TD APOLLO_TD 3619
682arena MACH_ARENA ARENA 3620
683gsngateway MACH_GSNGATEWAY GSNGATEWAY 3621
684lf2000 MACH_LF2000 LF2000 3622
685bonito MACH_BONITO BONITO 3623 550bonito MACH_BONITO BONITO 3623
686asymptote MACH_ASYMPTOTE ASYMPTOTE 3624
687bst2brd MACH_BST2BRD BST2BRD 3625
688tx335s MACH_TX335S TX335S 3626
689pelco_tesla MACH_PELCO_TESLA PELCO_TESLA 3627
690rrhtestplat MACH_RRHTESTPLAT RRHTESTPLAT 3628
691vidtonic_pro MACH_VIDTONIC_PRO VIDTONIC_PRO 3629
692pl_apollo MACH_PL_APOLLO PL_APOLLO 3630
693pl_phoenix MACH_PL_PHOENIX PL_PHOENIX 3631
694m28cu3 MACH_M28CU3 M28CU3 3632
695vvbox_hd MACH_VVBOX_HD VVBOX_HD 3633
696coreware_sam9260_ MACH_COREWARE_SAM9260_ COREWARE_SAM9260_ 3634
697marmaduke MACH_MARMADUKE MARMADUKE 3635
698amg_xlcore_camera MACH_AMG_XLCORE_CAMERA AMG_XLCORE_CAMERA 3636
699omap3_egf MACH_OMAP3_EGF OMAP3_EGF 3637 551omap3_egf MACH_OMAP3_EGF OMAP3_EGF 3637
700smdk4212 MACH_SMDK4212 SMDK4212 3638 552smdk4212 MACH_SMDK4212 SMDK4212 3638
701dnp9200 MACH_DNP9200 DNP9200 3639
702tf101 MACH_TF101 TF101 3640
703omap3silvio MACH_OMAP3SILVIO OMAP3SILVIO 3641
704picasso2 MACH_PICASSO2 PICASSO2 3642
705vangogh2 MACH_VANGOGH2 VANGOGH2 3643
706olpc_xo_1_75 MACH_OLPC_XO_1_75 OLPC_XO_1_75 3644
707gx400 MACH_GX400 GX400 3645
708gs300 MACH_GS300 GS300 3646
709acer_a9 MACH_ACER_A9 ACER_A9 3647
710vivow_evm MACH_VIVOW_EVM VIVOW_EVM 3648
711veloce_cxq MACH_VELOCE_CXQ VELOCE_CXQ 3649
712veloce_cxm MACH_VELOCE_CXM VELOCE_CXM 3650
713p1852 MACH_P1852 P1852 3651
714naxy100 MACH_NAXY100 NAXY100 3652
715taishan MACH_TAISHAN TAISHAN 3653
716touchlink MACH_TOUCHLINK TOUCHLINK 3654
717stm32f103ze MACH_STM32F103ZE STM32F103ZE 3655
718mcx MACH_MCX MCX 3656
719stm_nmhdk_fli7610 MACH_STM_NMHDK_FLI7610 STM_NMHDK_FLI7610 3657
720top28x MACH_TOP28X TOP28X 3658
721okl4vp_microvisor MACH_OKL4VP_MICROVISOR OKL4VP_MICROVISOR 3659
722pop MACH_POP POP 3660
723layer MACH_LAYER LAYER 3661
724trondheim MACH_TRONDHEIM TRONDHEIM 3662
725eva MACH_EVA EVA 3663
726trust_taurus MACH_TRUST_TAURUS TRUST_TAURUS 3664
727ns2816_huashan MACH_NS2816_HUASHAN NS2816_HUASHAN 3665
728ns2816_yangcheng MACH_NS2816_YANGCHENG NS2816_YANGCHENG 3666
729p852 MACH_P852 P852 3667
730flea3 MACH_FLEA3 FLEA3 3668
731bowfin MACH_BOWFIN BOWFIN 3669
732mv88de3100 MACH_MV88DE3100 MV88DE3100 3670
733pia_am35x MACH_PIA_AM35X PIA_AM35X 3671
734cedar MACH_CEDAR CEDAR 3672
735picasso_e MACH_PICASSO_E PICASSO_E 3673
736samsung_e60 MACH_SAMSUNG_E60 SAMSUNG_E60 3674
737sdvr_mini MACH_SDVR_MINI SDVR_MINI 3676
738omap3_ij3k MACH_OMAP3_IJ3K OMAP3_IJ3K 3677
739modasmc1 MACH_MODASMC1 MODASMC1 3678
740apq8064_rumi3 MACH_APQ8064_RUMI3 APQ8064_RUMI3 3679
741matrix506 MACH_MATRIX506 MATRIX506 3680
742msm9615_mtp MACH_MSM9615_MTP MSM9615_MTP 3681
743dm36x_spawndc MACH_DM36X_SPAWNDC DM36X_SPAWNDC 3682
744sff792 MACH_SFF792 SFF792 3683
745am335xiaevm MACH_AM335XIAEVM AM335XIAEVM 3684
746g3c2440 MACH_G3C2440 G3C2440 3685
747tion270 MACH_TION270 TION270 3686
748w22q7arm02 MACH_W22Q7ARM02 W22Q7ARM02 3687
749omap_cat MACH_OMAP_CAT OMAP_CAT 3688
750at91sam9n12ek MACH_AT91SAM9N12EK AT91SAM9N12EK 3689
751morrison MACH_MORRISON MORRISON 3690
752svdu MACH_SVDU SVDU 3691
753lpp01 MACH_LPP01 LPP01 3692
754ubc283 MACH_UBC283 UBC283 3693
755zeppelin MACH_ZEPPELIN ZEPPELIN 3694
756motus MACH_MOTUS MOTUS 3695
757neomainboard MACH_NEOMAINBOARD NEOMAINBOARD 3696
758devkit3250 MACH_DEVKIT3250 DEVKIT3250 3697
759devkit7000 MACH_DEVKIT7000 DEVKIT7000 3698
760fmc_uic MACH_FMC_UIC FMC_UIC 3699
761fmc_dcm MACH_FMC_DCM FMC_DCM 3700
762batwm MACH_BATWM BATWM 3701
763atlas6cb MACH_ATLAS6CB ATLAS6CB 3702
764blue MACH_BLUE BLUE 3705
765colorado MACH_COLORADO COLORADO 3706
766popc MACH_POPC POPC 3707
767promwad_jade MACH_PROMWAD_JADE PROMWAD_JADE 3708
768amp MACH_AMP AMP 3709
769gnet_amp MACH_GNET_AMP GNET_AMP 3710
770toques MACH_TOQUES TOQUES 3711
771apx4devkit MACH_APX4DEVKIT APX4DEVKIT 3712 553apx4devkit MACH_APX4DEVKIT APX4DEVKIT 3712
772dct_storm MACH_DCT_STORM DCT_STORM 3713
773owl MACH_OWL OWL 3715
774cogent_csb1741 MACH_COGENT_CSB1741 COGENT_CSB1741 3716
775adillustra610 MACH_ADILLUSTRA610 ADILLUSTRA610 3718
776ecafe_na04 MACH_ECAFE_NA04 ECAFE_NA04 3719
777popct MACH_POPCT POPCT 3720
778omap3_helena MACH_OMAP3_HELENA OMAP3_HELENA 3721
779ach MACH_ACH ACH 3722
780module_dtb MACH_MODULE_DTB MODULE_DTB 3723
781oslo_elisabeth MACH_OSLO_ELISABETH OSLO_ELISABETH 3725
782tt01 MACH_TT01 TT01 3726
783msm8930_cdp MACH_MSM8930_CDP MSM8930_CDP 3727
784msm8930_mtp MACH_MSM8930_MTP MSM8930_MTP 3728
785msm8930_fluid MACH_MSM8930_FLUID MSM8930_FLUID 3729
786ltu11 MACH_LTU11 LTU11 3730
787am1808_spawnco MACH_AM1808_SPAWNCO AM1808_SPAWNCO 3731
788flx6410 MACH_FLX6410 FLX6410 3732
789mx6q_qsb MACH_MX6Q_QSB MX6Q_QSB 3733
790mx53_plt424 MACH_MX53_PLT424 MX53_PLT424 3734
791jasmine MACH_JASMINE JASMINE 3735
792l138_owlboard_plus MACH_L138_OWLBOARD_PLUS L138_OWLBOARD_PLUS 3736
793wr21 MACH_WR21 WR21 3737
794peaboy MACH_PEABOY PEABOY 3739
795mx28_plato MACH_MX28_PLATO MX28_PLATO 3740
796kacom2 MACH_KACOM2 KACOM2 3741
797slco MACH_SLCO SLCO 3742
798imx51pico MACH_IMX51PICO IMX51PICO 3743
799glink1 MACH_GLINK1 GLINK1 3744
800diamond MACH_DIAMOND DIAMOND 3745
801d9000 MACH_D9000 D9000 3746
802w5300e01 MACH_W5300E01 W5300E01 3747
803im6000 MACH_IM6000 IM6000 3748
804mx51_fred51 MACH_MX51_FRED51 MX51_FRED51 3749
805stm32f2 MACH_STM32F2 STM32F2 3750
806ville MACH_VILLE VILLE 3751
807ptip_murnau MACH_PTIP_MURNAU PTIP_MURNAU 3752
808ptip_classic MACH_PTIP_CLASSIC PTIP_CLASSIC 3753
809mx53grb MACH_MX53GRB MX53GRB 3754
810gagarin MACH_GAGARIN GAGARIN 3755
811nas2big MACH_NAS2BIG NAS2BIG 3757
812superfemto MACH_SUPERFEMTO SUPERFEMTO 3758
813teufel MACH_TEUFEL TEUFEL 3759
814dinara MACH_DINARA DINARA 3760
815vanquish MACH_VANQUISH VANQUISH 3761
816zipabox1 MACH_ZIPABOX1 ZIPABOX1 3762
817u9540 MACH_U9540 U9540 3763
818jet MACH_JET JET 3764
819smdk4412 MACH_SMDK4412 SMDK4412 3765 554smdk4412 MACH_SMDK4412 SMDK4412 3765
820elite MACH_ELITE ELITE 3766
821spear320_hmi MACH_SPEAR320_HMI SPEAR320_HMI 3767
822ontario MACH_ONTARIO ONTARIO 3768
823mx6q_sabrelite MACH_MX6Q_SABRELITE MX6Q_SABRELITE 3769
824vc200 MACH_VC200 VC200 3770
825msm7625a_ffa MACH_MSM7625A_FFA MSM7625A_FFA 3771
826msm7625a_surf MACH_MSM7625A_SURF MSM7625A_SURF 3772
827benthossbp MACH_BENTHOSSBP BENTHOSSBP 3773
828smdk5210 MACH_SMDK5210 SMDK5210 3774
829empq2300 MACH_EMPQ2300 EMPQ2300 3775
830minipos MACH_MINIPOS MINIPOS 3776
831omap5_sevm MACH_OMAP5_SEVM OMAP5_SEVM 3777
832shelter MACH_SHELTER SHELTER 3778
833omap3_devkit8500 MACH_OMAP3_DEVKIT8500 OMAP3_DEVKIT8500 3779
834edgetd MACH_EDGETD EDGETD 3780
835copperyard MACH_COPPERYARD COPPERYARD 3781
836edge_u MACH_EDGE_U EDGE_U 3783
837edge_td MACH_EDGE_TD EDGE_TD 3784
838wdss MACH_WDSS WDSS 3785
839dl_pb25 MACH_DL_PB25 DL_PB25 3786
840dss11 MACH_DSS11 DSS11 3787
841cpa MACH_CPA CPA 3788
842aptp2000 MACH_APTP2000 APTP2000 3789
843marzen MACH_MARZEN MARZEN 3790 555marzen MACH_MARZEN MARZEN 3790
844st_turbine MACH_ST_TURBINE ST_TURBINE 3791
845gtl_it3300 MACH_GTL_IT3300 GTL_IT3300 3792
846mx6_mule MACH_MX6_MULE MX6_MULE 3793
847v7pxa_dt MACH_V7PXA_DT V7PXA_DT 3794
848v7mmp_dt MACH_V7MMP_DT V7MMP_DT 3795
849dragon7 MACH_DRAGON7 DRAGON7 3796
850krome MACH_KROME KROME 3797 556krome MACH_KROME KROME 3797
851oratisdante MACH_ORATISDANTE ORATISDANTE 3798
852fathom MACH_FATHOM FATHOM 3799
853dns325 MACH_DNS325 DNS325 3800
854sarnen MACH_SARNEN SARNEN 3801
855ubisys_g1 MACH_UBISYS_G1 UBISYS_G1 3802
856mx53_pf1 MACH_MX53_PF1 MX53_PF1 3803
857asanti MACH_ASANTI ASANTI 3804
858volta MACH_VOLTA VOLTA 3805
859knight MACH_KNIGHT KNIGHT 3807
860beaglebone MACH_BEAGLEBONE BEAGLEBONE 3808
861becker MACH_BECKER BECKER 3809
862fc360 MACH_FC360 FC360 3810
863pmi2_xls MACH_PMI2_XLS PMI2_XLS 3811
864taranto MACH_TARANTO TARANTO 3812
865plutux MACH_PLUTUX PLUTUX 3813
866ipmp_medcom MACH_IPMP_MEDCOM IPMP_MEDCOM 3814
867absolut MACH_ABSOLUT ABSOLUT 3815
868awpb3 MACH_AWPB3 AWPB3 3816
869nfp32xx_dt MACH_NFP32XX_DT NFP32XX_DT 3817
870dl_pb53 MACH_DL_PB53 DL_PB53 3818
871acu_ii MACH_ACU_II ACU_II 3819
872avalon MACH_AVALON AVALON 3820
873sphinx MACH_SPHINX SPHINX 3821
874titan_t MACH_TITAN_T TITAN_T 3822
875harvest_boris MACH_HARVEST_BORIS HARVEST_BORIS 3823
876mach_msm7x30_m3s MACH_MACH_MSM7X30_M3S MACH_MSM7X30_M3S 3824
877smdk5250 MACH_SMDK5250 SMDK5250 3825
878imxt_lite MACH_IMXT_LITE IMXT_LITE 3826
879imxt_std MACH_IMXT_STD IMXT_STD 3827
880imxt_log MACH_IMXT_LOG IMXT_LOG 3828
881imxt_nav MACH_IMXT_NAV IMXT_NAV 3829
882imxt_full MACH_IMXT_FULL IMXT_FULL 3830
883ag09015 MACH_AG09015 AG09015 3831
884am3517_mt_ventoux MACH_AM3517_MT_VENTOUX AM3517_MT_VENTOUX 3832
885dp1arm9 MACH_DP1ARM9 DP1ARM9 3833
886picasso_m MACH_PICASSO_M PICASSO_M 3834
887video_gadget MACH_VIDEO_GADGET VIDEO_GADGET 3835
888mtt_om3x MACH_MTT_OM3X MTT_OM3X 3836
889mx6q_arm2 MACH_MX6Q_ARM2 MX6Q_ARM2 3837
890picosam9g45 MACH_PICOSAM9G45 PICOSAM9G45 3838
891vpm_dm365 MACH_VPM_DM365 VPM_DM365 3839
892bonfire MACH_BONFIRE BONFIRE 3840
893mt2p2d MACH_MT2P2D MT2P2D 3841
894sigpda01 MACH_SIGPDA01 SIGPDA01 3842
895cn27 MACH_CN27 CN27 3843
896mx25_cwtap MACH_MX25_CWTAP MX25_CWTAP 3844
897apf28 MACH_APF28 APF28 3845
898pelco_maxwell MACH_PELCO_MAXWELL PELCO_MAXWELL 3846
899ge_phoenix MACH_GE_PHOENIX GE_PHOENIX 3847
900empc_a500 MACH_EMPC_A500 EMPC_A500 3848
901ims_arm9 MACH_IMS_ARM9 IMS_ARM9 3849
902mini2416 MACH_MINI2416 MINI2416 3850
903mini2450 MACH_MINI2450 MINI2450 3851
904mini310 MACH_MINI310 MINI310 3852
905spear_hurricane MACH_SPEAR_HURRICANE SPEAR_HURRICANE 3853
906mt7208 MACH_MT7208 MT7208 3854
907lpc178x MACH_LPC178X LPC178X 3855
908farleys MACH_FARLEYS FARLEYS 3856
909efm32gg_dk3750 MACH_EFM32GG_DK3750 EFM32GG_DK3750 3857
910zeus_board MACH_ZEUS_BOARD ZEUS_BOARD 3858
911cc51 MACH_CC51 CC51 3859
912fxi_c210 MACH_FXI_C210 FXI_C210 3860
913msm8627_cdp MACH_MSM8627_CDP MSM8627_CDP 3861
914msm8627_mtp MACH_MSM8627_MTP MSM8627_MTP 3862
915armadillo800eva MACH_ARMADILLO800EVA ARMADILLO800EVA 3863 557armadillo800eva MACH_ARMADILLO800EVA ARMADILLO800EVA 3863
916primou MACH_PRIMOU PRIMOU 3864
917primoc MACH_PRIMOC PRIMOC 3865
918primoct MACH_PRIMOCT PRIMOCT 3866
919a9500 MACH_A9500 A9500 3867
920pluto MACH_PLUTO PLUTO 3869
921acfx100 MACH_ACFX100 ACFX100 3870
922msm8625_rumi3 MACH_MSM8625_RUMI3 MSM8625_RUMI3 3871
923valente MACH_VALENTE VALENTE 3872
924crfs_rfeye MACH_CRFS_RFEYE CRFS_RFEYE 3873
925rfeye MACH_RFEYE RFEYE 3874
926phidget_sbc3 MACH_PHIDGET_SBC3 PHIDGET_SBC3 3875
927tcw_mika MACH_TCW_MIKA TCW_MIKA 3876
928imx28_egf MACH_IMX28_EGF IMX28_EGF 3877
929valente_wx MACH_VALENTE_WX VALENTE_WX 3878
930huangshans MACH_HUANGSHANS HUANGSHANS 3879
931bosphorus1 MACH_BOSPHORUS1 BOSPHORUS1 3880
932prima MACH_PRIMA PRIMA 3881
933evita_ulk MACH_EVITA_ULK EVITA_ULK 3884
934merisc600 MACH_MERISC600 MERISC600 3885
935dolak MACH_DOLAK DOLAK 3886
936sbc53 MACH_SBC53 SBC53 3887
937elite_ulk MACH_ELITE_ULK ELITE_ULK 3888
938pov2 MACH_POV2 POV2 3889
939ipod_touch_2g MACH_IPOD_TOUCH_2G IPOD_TOUCH_2G 3890
940da850_pqab MACH_DA850_PQAB DA850_PQAB 3891
941fermi MACH_FERMI FERMI 3892
942ccardwmx28 MACH_CCARDWMX28 CCARDWMX28 3893
943ccardmx28 MACH_CCARDMX28 CCARDMX28 3894
944fs20_fcm2050 MACH_FS20_FCM2050 FS20_FCM2050 3895
945kinetis MACH_KINETIS KINETIS 3896
946kai MACH_KAI KAI 3897
947bcthb2 MACH_BCTHB2 BCTHB2 3898
948inels3_cu MACH_INELS3_CU INELS3_CU 3899
949da850_apollo MACH_DA850_APOLLO DA850_APOLLO 3901
950tracnas MACH_TRACNAS TRACNAS 3902
951mityarm335x MACH_MITYARM335X MITYARM335X 3903
952xcgz7x MACH_XCGZ7X XCGZ7X 3904
953cubox MACH_CUBOX CUBOX 3905
954terminator MACH_TERMINATOR TERMINATOR 3906
955eye03 MACH_EYE03 EYE03 3907
956kota3 MACH_KOTA3 KOTA3 3908
957pscpe MACH_PSCPE PSCPE 3910
958akt1100 MACH_AKT1100 AKT1100 3911
959pcaaxl2 MACH_PCAAXL2 PCAAXL2 3912
960primodd_ct MACH_PRIMODD_CT PRIMODD_CT 3913
961nsbc MACH_NSBC NSBC 3914
962meson2_skt MACH_MESON2_SKT MESON2_SKT 3915
963meson2_ref MACH_MESON2_REF MESON2_REF 3916
964ccardwmx28js MACH_CCARDWMX28JS CCARDWMX28JS 3917
965ccardmx28js MACH_CCARDMX28JS CCARDMX28JS 3918
966indico MACH_INDICO INDICO 3919
967msm8960dt MACH_MSM8960DT MSM8960DT 3920
968primods MACH_PRIMODS PRIMODS 3921
969beluga_m1388 MACH_BELUGA_M1388 BELUGA_M1388 3922
970primotd MACH_PRIMOTD PRIMOTD 3923
971varan_master MACH_VARAN_MASTER VARAN_MASTER 3924
972primodd MACH_PRIMODD PRIMODD 3925
973jetduo MACH_JETDUO JETDUO 3926
974mx53_umobo MACH_MX53_UMOBO MX53_UMOBO 3927 558mx53_umobo MACH_MX53_UMOBO MX53_UMOBO 3927
975trats MACH_TRATS TRATS 3928
976starcraft MACH_STARCRAFT STARCRAFT 3929
977qseven_tegra2 MACH_QSEVEN_TEGRA2 QSEVEN_TEGRA2 3930
978lichee_sun4i_devbd MACH_LICHEE_SUN4I_DEVBD LICHEE_SUN4I_DEVBD 3931
979movenow MACH_MOVENOW MOVENOW 3932
980golf_u MACH_GOLF_U GOLF_U 3933
981msm7627a_evb MACH_MSM7627A_EVB MSM7627A_EVB 3934
982rambo MACH_RAMBO RAMBO 3935
983golfu MACH_GOLFU GOLFU 3936
984mango310 MACH_MANGO310 MANGO310 3937
985dns343 MACH_DNS343 DNS343 3938
986var_som_om44 MACH_VAR_SOM_OM44 VAR_SOM_OM44 3939
987naon MACH_NAON NAON 3940
988vp4000 MACH_VP4000 VP4000 3941
989impcard MACH_IMPCARD IMPCARD 3942
990smoovcam MACH_SMOOVCAM SMOOVCAM 3943
991cobham3725 MACH_COBHAM3725 COBHAM3725 3944
992cobham3730 MACH_COBHAM3730 COBHAM3730 3945
993cobham3703 MACH_COBHAM3703 COBHAM3703 3946
994quetzal MACH_QUETZAL QUETZAL 3947
995apq8064_cdp MACH_APQ8064_CDP APQ8064_CDP 3948
996apq8064_mtp MACH_APQ8064_MTP APQ8064_MTP 3949
997apq8064_fluid MACH_APQ8064_FLUID APQ8064_FLUID 3950
998apq8064_liquid MACH_APQ8064_LIQUID APQ8064_LIQUID 3951
999mango210 MACH_MANGO210 MANGO210 3952
1000mango100 MACH_MANGO100 MANGO100 3953
1001mango24 MACH_MANGO24 MANGO24 3954
1002mango64 MACH_MANGO64 MANGO64 3955
1003nsa320 MACH_NSA320 NSA320 3956
1004elv_ccu2 MACH_ELV_CCU2 ELV_CCU2 3957
1005triton_x00 MACH_TRITON_X00 TRITON_X00 3958
1006triton_1500_2000 MACH_TRITON_1500_2000 TRITON_1500_2000 3959
1007pogoplugv4 MACH_POGOPLUGV4 POGOPLUGV4 3960
1008venus_cl MACH_VENUS_CL VENUS_CL 3961
1009vulcano_g20 MACH_VULCANO_G20 VULCANO_G20 3962
1010sgs_i9100 MACH_SGS_I9100 SGS_I9100 3963
1011stsv2 MACH_STSV2 STSV2 3964
1012csb1724 MACH_CSB1724 CSB1724 3965
1013omapl138_lcdk MACH_OMAPL138_LCDK OMAPL138_LCDK 3966
1014pvd_mx25 MACH_PVD_MX25 PVD_MX25 3968
1015meson6_skt MACH_MESON6_SKT MESON6_SKT 3969
1016meson6_ref MACH_MESON6_REF MESON6_REF 3970
1017pxm MACH_PXM PXM 3971
1018pogoplugv3 MACH_POGOPLUGV3 POGOPLUGV3 3973
1019mlp89626 MACH_MLP89626 MLP89626 3974
1020iomegahmndce MACH_IOMEGAHMNDCE IOMEGAHMNDCE 3975
1021pogoplugv3pci MACH_POGOPLUGV3PCI POGOPLUGV3PCI 3976
1022bntv250 MACH_BNTV250 BNTV250 3977
1023mx53_qseven MACH_MX53_QSEVEN MX53_QSEVEN 3978
1024gtl_it1100 MACH_GTL_IT1100 GTL_IT1100 3979
1025mx6q_sabresd MACH_MX6Q_SABRESD MX6Q_SABRESD 3980
1026mt4 MACH_MT4 MT4 3981 559mt4 MACH_MT4 MT4 3981
1027jumbo_d MACH_JUMBO_D JUMBO_D 3982
1028jumbo_i MACH_JUMBO_I JUMBO_I 3983
1029fs20_dmp MACH_FS20_DMP FS20_DMP 3984
1030dns320 MACH_DNS320 DNS320 3985
1031mx28bacos MACH_MX28BACOS MX28BACOS 3986
1032tl80 MACH_TL80 TL80 3987
1033polatis_nic_1001 MACH_POLATIS_NIC_1001 POLATIS_NIC_1001 3988
1034tely MACH_TELY TELY 3989
1035u8520 MACH_U8520 U8520 3990 560u8520 MACH_U8520 U8520 3990
1036manta MACH_MANTA MANTA 3991
1037mpq8064_cdp MACH_MPQ8064_CDP MPQ8064_CDP 3993
1038mpq8064_dtv MACH_MPQ8064_DTV MPQ8064_DTV 3995
1039dm368som MACH_DM368SOM DM368SOM 3996
1040gprisb2 MACH_GPRISB2 GPRISB2 3997
1041chammid MACH_CHAMMID CHAMMID 3998
1042seoul2 MACH_SEOUL2 SEOUL2 3999
1043omap4_nooktablet MACH_OMAP4_NOOKTABLET OMAP4_NOOKTABLET 4000
1044aalto MACH_AALTO AALTO 4001
1045metro MACH_METRO METRO 4002
1046cydm3730 MACH_CYDM3730 CYDM3730 4003
1047tqma53 MACH_TQMA53 TQMA53 4004
1048msm7627a_qrd3 MACH_MSM7627A_QRD3 MSM7627A_QRD3 4005
1049mx28_canby MACH_MX28_CANBY MX28_CANBY 4006
1050tiger MACH_TIGER TIGER 4007
1051pcats_9307_type_a MACH_PCATS_9307_TYPE_A PCATS_9307_TYPE_A 4008
1052pcats_9307_type_o MACH_PCATS_9307_TYPE_O PCATS_9307_TYPE_O 4009
1053pcats_9307_type_r MACH_PCATS_9307_TYPE_R PCATS_9307_TYPE_R 4010
1054streamplug MACH_STREAMPLUG STREAMPLUG 4011
1055icechicken_dev MACH_ICECHICKEN_DEV ICECHICKEN_DEV 4012
1056hedgehog MACH_HEDGEHOG HEDGEHOG 4013
1057yusend_obc MACH_YUSEND_OBC YUSEND_OBC 4014
1058imxninja MACH_IMXNINJA IMXNINJA 4015
1059omap4_jarod MACH_OMAP4_JAROD OMAP4_JAROD 4016
1060eco5_pk MACH_ECO5_PK ECO5_PK 4017
1061qj2440 MACH_QJ2440 QJ2440 4018
1062mx6q_mercury MACH_MX6Q_MERCURY MX6Q_MERCURY 4019
1063cm6810 MACH_CM6810 CM6810 4020
1064omap4_torpedo MACH_OMAP4_TORPEDO OMAP4_TORPEDO 4021
1065nsa310 MACH_NSA310 NSA310 4022
1066tmx536 MACH_TMX536 TMX536 4023
1067ktt20 MACH_KTT20 KTT20 4024
1068dragonix MACH_DRAGONIX DRAGONIX 4025
1069lungching MACH_LUNGCHING LUNGCHING 4026
1070bulogics MACH_BULOGICS BULOGICS 4027
1071mx535_sx MACH_MX535_SX MX535_SX 4028
1072ngui3250 MACH_NGUI3250 NGUI3250 4029
1073salutec_dac MACH_SALUTEC_DAC SALUTEC_DAC 4030
1074loco MACH_LOCO LOCO 4031
1075ctera_plug_usi MACH_CTERA_PLUG_USI CTERA_PLUG_USI 4032
1076scepter MACH_SCEPTER SCEPTER 4033
1077sga MACH_SGA SGA 4034
1078p_81_j5 MACH_P_81_J5 P_81_J5 4035
1079p_81_o4 MACH_P_81_O4 P_81_O4 4036
1080msm8625_surf MACH_MSM8625_SURF MSM8625_SURF 4037
1081carallon_shark MACH_CARALLON_SHARK CARALLON_SHARK 4038
1082ordog MACH_ORDOG ORDOG 4040
1083puente_io MACH_PUENTE_IO PUENTE_IO 4041
1084msm8625_evb MACH_MSM8625_EVB MSM8625_EVB 4042
1085ev_am1707 MACH_EV_AM1707 EV_AM1707 4043
1086ev_am1707e2 MACH_EV_AM1707E2 EV_AM1707E2 4044
1087ev_am3517e2 MACH_EV_AM3517E2 EV_AM3517E2 4045
1088calabria MACH_CALABRIA CALABRIA 4046
1089ev_imx287 MACH_EV_IMX287 EV_IMX287 4047
1090erau MACH_ERAU ERAU 4048
1091sichuan MACH_SICHUAN SICHUAN 4049
1092davinci_da850 MACH_DAVINCI_DA850 DAVINCI_DA850 4051
1093omap138_trunarc MACH_OMAP138_TRUNARC OMAP138_TRUNARC 4052
1094bcm4761 MACH_BCM4761 BCM4761 4053
1095picasso_e2 MACH_PICASSO_E2 PICASSO_E2 4054
1096picasso_mf MACH_PICASSO_MF PICASSO_MF 4055
1097miro MACH_MIRO MIRO 4056
1098at91sam9g20ewon3 MACH_AT91SAM9G20EWON3 AT91SAM9G20EWON3 4057
1099yoyo MACH_YOYO YOYO 4058
1100windjkl MACH_WINDJKL WINDJKL 4059
1101monarudo MACH_MONARUDO MONARUDO 4060
1102batan MACH_BATAN BATAN 4061
1103tadao MACH_TADAO TADAO 4062
1104baso MACH_BASO BASO 4063
1105mahon MACH_MAHON MAHON 4064
1106villec2 MACH_VILLEC2 VILLEC2 4065
1107asi1230 MACH_ASI1230 ASI1230 4066
1108alaska MACH_ALASKA ALASKA 4067
1109swarco_shdsl2 MACH_SWARCO_SHDSL2 SWARCO_SHDSL2 4068
1110oxrtu MACH_OXRTU OXRTU 4069
1111omap5_panda MACH_OMAP5_PANDA OMAP5_PANDA 4070
1112c8000 MACH_C8000 C8000 4072
1113bje_display3_5 MACH_BJE_DISPLAY3_5 BJE_DISPLAY3_5 4073
1114picomod7 MACH_PICOMOD7 PICOMOD7 4074
1115picocom5 MACH_PICOCOM5 PICOCOM5 4075
1116qblissa8 MACH_QBLISSA8 QBLISSA8 4076
1117armstonea8 MACH_ARMSTONEA8 ARMSTONEA8 4077
1118netdcu14 MACH_NETDCU14 NETDCU14 4078
1119at91sam9x5_epiphan MACH_AT91SAM9X5_EPIPHAN AT91SAM9X5_EPIPHAN 4079
1120p2u MACH_P2U P2U 4080
1121doris MACH_DORIS DORIS 4081
1122j49 MACH_J49 J49 4082
1123vdss2e MACH_VDSS2E VDSS2E 4083
1124vc300 MACH_VC300 VC300 4084
1125ns115_pad_test MACH_NS115_PAD_TEST NS115_PAD_TEST 4085
1126ns115_pad_ref MACH_NS115_PAD_REF NS115_PAD_REF 4086
1127ns115_phone_test MACH_NS115_PHONE_TEST NS115_PHONE_TEST 4087
1128ns115_phone_ref MACH_NS115_PHONE_REF NS115_PHONE_REF 4088
1129golfc MACH_GOLFC GOLFC 4089
1130xerox_olympus MACH_XEROX_OLYMPUS XEROX_OLYMPUS 4090
1131mx6sl_arm2 MACH_MX6SL_ARM2 MX6SL_ARM2 4091
1132csb1701_csb1726 MACH_CSB1701_CSB1726 CSB1701_CSB1726 4092
1133at91sam9xeek MACH_AT91SAM9XEEK AT91SAM9XEEK 4093
1134ebv210 MACH_EBV210 EBV210 4094
1135msm7627a_qrd7 MACH_MSM7627A_QRD7 MSM7627A_QRD7 4095
1136svthin MACH_SVTHIN SVTHIN 4096
1137duovero MACH_DUOVERO DUOVERO 4097
1138chupacabra MACH_CHUPACABRA CHUPACABRA 4098 561chupacabra MACH_CHUPACABRA CHUPACABRA 4098
1139scorpion MACH_SCORPION SCORPION 4099 562scorpion MACH_SCORPION SCORPION 4099
1140davinci_he_hmi10 MACH_DAVINCI_HE_HMI10 DAVINCI_HE_HMI10 4100 563davinci_he_hmi10 MACH_DAVINCI_HE_HMI10 DAVINCI_HE_HMI10 4100
@@ -1157,7 +580,6 @@ tam335x MACH_TAM335X TAM335X 4116
1157grouper MACH_GROUPER GROUPER 4117 580grouper MACH_GROUPER GROUPER 4117
1158mpcsa21_9g20 MACH_MPCSA21_9G20 MPCSA21_9G20 4118 581mpcsa21_9g20 MACH_MPCSA21_9G20 MPCSA21_9G20 4118
1159m6u_cpu MACH_M6U_CPU M6U_CPU 4119 582m6u_cpu MACH_M6U_CPU M6U_CPU 4119
1160davinci_dp10 MACH_DAVINCI_DP10 DAVINCI_DP10 4120
1161ginkgo MACH_GINKGO GINKGO 4121 583ginkgo MACH_GINKGO GINKGO 4121
1162cgt_qmx6 MACH_CGT_QMX6 CGT_QMX6 4122 584cgt_qmx6 MACH_CGT_QMX6 CGT_QMX6 4122
1163profpga MACH_PROFPGA PROFPGA 4123 585profpga MACH_PROFPGA PROFPGA 4123
@@ -1204,3 +626,384 @@ baileys MACH_BAILEYS BAILEYS 4169
1204familybox MACH_FAMILYBOX FAMILYBOX 4170 626familybox MACH_FAMILYBOX FAMILYBOX 4170
1205ensemble_mx35 MACH_ENSEMBLE_MX35 ENSEMBLE_MX35 4171 627ensemble_mx35 MACH_ENSEMBLE_MX35 ENSEMBLE_MX35 4171
1206sc_sps_1 MACH_SC_SPS_1 SC_SPS_1 4172 628sc_sps_1 MACH_SC_SPS_1 SC_SPS_1 4172
629ucsimply_sam9260 MACH_UCSIMPLY_SAM9260 UCSIMPLY_SAM9260 4173
630unicorn MACH_UNICORN UNICORN 4174
631m9g45a MACH_M9G45A M9G45A 4175
632mtwebif MACH_MTWEBIF MTWEBIF 4176
633playstone MACH_PLAYSTONE PLAYSTONE 4177
634chelsea MACH_CHELSEA CHELSEA 4178
635bayern MACH_BAYERN BAYERN 4179
636mitwo MACH_MITWO MITWO 4180
637mx25_noah MACH_MX25_NOAH MX25_NOAH 4181
638stm_b2020 MACH_STM_B2020 STM_B2020 4182
639annax_src MACH_ANNAX_SRC ANNAX_SRC 4183
640ionics_stratus MACH_IONICS_STRATUS IONICS_STRATUS 4184
641hugo MACH_HUGO HUGO 4185
642em300 MACH_EM300 EM300 4186
643mmp3_qseven MACH_MMP3_QSEVEN MMP3_QSEVEN 4187
644bosphorus2 MACH_BOSPHORUS2 BOSPHORUS2 4188
645tt2200 MACH_TT2200 TT2200 4189
646ocelot3 MACH_OCELOT3 OCELOT3 4190
647tek_cobra MACH_TEK_COBRA TEK_COBRA 4191
648protou MACH_PROTOU PROTOU 4192
649msm8625_evt MACH_MSM8625_EVT MSM8625_EVT 4193
650mx53_sellwood MACH_MX53_SELLWOOD MX53_SELLWOOD 4194
651somiq_am35 MACH_SOMIQ_AM35 SOMIQ_AM35 4195
652somiq_am37 MACH_SOMIQ_AM37 SOMIQ_AM37 4196
653k2_plc_cl MACH_K2_PLC_CL K2_PLC_CL 4197
654tc2 MACH_TC2 TC2 4198
655dulex_j MACH_DULEX_J DULEX_J 4199
656stm_b2044 MACH_STM_B2044 STM_B2044 4200
657deluxe_j MACH_DELUXE_J DELUXE_J 4201
658mango2443 MACH_MANGO2443 MANGO2443 4202
659cp2dcg MACH_CP2DCG CP2DCG 4203
660cp2dtg MACH_CP2DTG CP2DTG 4204
661cp2dug MACH_CP2DUG CP2DUG 4205
662var_som_am33 MACH_VAR_SOM_AM33 VAR_SOM_AM33 4206
663pepper MACH_PEPPER PEPPER 4207
664mango2450 MACH_MANGO2450 MANGO2450 4208
665valente_wx_c9 MACH_VALENTE_WX_C9 VALENTE_WX_C9 4209
666minitv MACH_MINITV MINITV 4210
667u8540 MACH_U8540 U8540 4211
668iv_atlas_i_z7e MACH_IV_ATLAS_I_Z7E IV_ATLAS_I_Z7E 4212
669mach_type_sky MACH_MACH_TYPE_SKY MACH_TYPE_SKY 4214
670bluesky MACH_BLUESKY BLUESKY 4215
671ngrouter MACH_NGROUTER NGROUTER 4216
672mx53_denetim MACH_MX53_DENETIM MX53_DENETIM 4217
673opal MACH_OPAL OPAL 4218
674gnet_us3gref MACH_GNET_US3GREF GNET_US3GREF 4219
675gnet_nc3g MACH_GNET_NC3G GNET_NC3G 4220
676gnet_ge3g MACH_GNET_GE3G GNET_GE3G 4221
677adp2 MACH_ADP2 ADP2 4222
678tqma28 MACH_TQMA28 TQMA28 4223
679kacom3 MACH_KACOM3 KACOM3 4224
680rrhdemo MACH_RRHDEMO RRHDEMO 4225
681protodug MACH_PROTODUG PROTODUG 4226
682lago MACH_LAGO LAGO 4227
683ktt30 MACH_KTT30 KTT30 4228
684ts43xx MACH_TS43XX TS43XX 4229
685mx6q_denso MACH_MX6Q_DENSO MX6Q_DENSO 4230
686comsat_gsmumts8 MACH_COMSAT_GSMUMTS8 COMSAT_GSMUMTS8 4231
687dreamx MACH_DREAMX DREAMX 4232
688thunderstonem MACH_THUNDERSTONEM THUNDERSTONEM 4233
689yoyopad MACH_YOYOPAD YOYOPAD 4234
690yoyopatient MACH_YOYOPATIENT YOYOPATIENT 4235
691a10l MACH_A10L A10L 4236
692mq60 MACH_MQ60 MQ60 4237
693linkstation_lsql MACH_LINKSTATION_LSQL LINKSTATION_LSQL 4238
694am3703gateway MACH_AM3703GATEWAY AM3703GATEWAY 4239
695accipiter MACH_ACCIPITER ACCIPITER 4240
696magnidug MACH_MAGNIDUG MAGNIDUG 4242
697hydra MACH_HYDRA HYDRA 4243
698sun3i MACH_SUN3I SUN3I 4244
699stm_b2078 MACH_STM_B2078 STM_B2078 4245
700at91sam9263deskv2 MACH_AT91SAM9263DESKV2 AT91SAM9263DESKV2 4246
701deluxe_r MACH_DELUXE_R DELUXE_R 4247
702p_98_v MACH_P_98_V P_98_V 4248
703p_98_c MACH_P_98_C P_98_C 4249
704davinci_am18xx_omn MACH_DAVINCI_AM18XX_OMN DAVINCI_AM18XX_OMN 4250
705socfpga_cyclone5 MACH_SOCFPGA_CYCLONE5 SOCFPGA_CYCLONE5 4251
706cabatuin MACH_CABATUIN CABATUIN 4252
707yoyopad_ft MACH_YOYOPAD_FT YOYOPAD_FT 4253
708dan2400evb MACH_DAN2400EVB DAN2400EVB 4254
709dan3400evb MACH_DAN3400EVB DAN3400EVB 4255
710edm_sf_imx6 MACH_EDM_SF_IMX6 EDM_SF_IMX6 4256
711edm_cf_imx6 MACH_EDM_CF_IMX6 EDM_CF_IMX6 4257
712vpos3xx MACH_VPOS3XX VPOS3XX 4258
713vulcano_9x5 MACH_VULCANO_9X5 VULCANO_9X5 4259
714spmp8000 MACH_SPMP8000 SPMP8000 4260
715catalina MACH_CATALINA CATALINA 4261
716rd88f5181l_fe MACH_RD88F5181L_FE RD88F5181L_FE 4262
717mx535_mx MACH_MX535_MX MX535_MX 4263
718armadillo840 MACH_ARMADILLO840 ARMADILLO840 4264
719spc9000baseboard MACH_SPC9000BASEBOARD SPC9000BASEBOARD 4265
720iris MACH_IRIS IRIS 4266
721protodcg MACH_PROTODCG PROTODCG 4267
722palmtree MACH_PALMTREE PALMTREE 4268
723novena MACH_NOVENA NOVENA 4269
724ma_um MACH_MA_UM MA_UM 4270
725ma_am MACH_MA_AM MA_AM 4271
726ems348 MACH_EMS348 EMS348 4272
727cm_fx6 MACH_CM_FX6 CM_FX6 4273
728arndale MACH_ARNDALE ARNDALE 4274
729q5xr5 MACH_Q5XR5 Q5XR5 4275
730willow MACH_WILLOW WILLOW 4276
731omap3621_odyv3 MACH_OMAP3621_ODYV3 OMAP3621_ODYV3 4277
732omapl138_presonus MACH_OMAPL138_PRESONUS OMAPL138_PRESONUS 4278
733dvf99 MACH_DVF99 DVF99 4279
734impression_j MACH_IMPRESSION_J IMPRESSION_J 4280
735qblissa9 MACH_QBLISSA9 QBLISSA9 4281
736robin_heliview10 MACH_ROBIN_HELIVIEW10 ROBIN_HELIVIEW10 4282
737sun7i MACH_SUN7I SUN7I 4283
738mx6q_hdmidongle MACH_MX6Q_HDMIDONGLE MX6Q_HDMIDONGLE 4284
739mx6_sid2 MACH_MX6_SID2 MX6_SID2 4285
740helios_v3 MACH_HELIOS_V3 HELIOS_V3 4286
741helios_v4 MACH_HELIOS_V4 HELIOS_V4 4287
742q7_imx6 MACH_Q7_IMX6 Q7_IMX6 4288
743odroidx MACH_ODROIDX ODROIDX 4289
744robpro MACH_ROBPRO ROBPRO 4290
745research59if_mk1 MACH_RESEARCH59IF_MK1 RESEARCH59IF_MK1 4291
746bobsleigh MACH_BOBSLEIGH BOBSLEIGH 4292
747dcshgwt3 MACH_DCSHGWT3 DCSHGWT3 4293
748gld1018 MACH_GLD1018 GLD1018 4294
749ev10 MACH_EV10 EV10 4295
750nitrogen6x MACH_NITROGEN6X NITROGEN6X 4296
751p_107_bb MACH_P_107_BB P_107_BB 4297
752evita_utl MACH_EVITA_UTL EVITA_UTL 4298
753falconwing MACH_FALCONWING FALCONWING 4299
754dct3 MACH_DCT3 DCT3 4300
755cpx2e_cell MACH_CPX2E_CELL CPX2E_CELL 4301
756amiro MACH_AMIRO AMIRO 4302
757mx6q_brassboard MACH_MX6Q_BRASSBOARD MX6Q_BRASSBOARD 4303
758dalmore MACH_DALMORE DALMORE 4304
759omap3_portal7cp MACH_OMAP3_PORTAL7CP OMAP3_PORTAL7CP 4305
760tegra_pluto MACH_TEGRA_PLUTO TEGRA_PLUTO 4306
761mx6sl_evk MACH_MX6SL_EVK MX6SL_EVK 4307
762m7 MACH_M7 M7 4308
763pxm2 MACH_PXM2 PXM2 4309
764haba_knx_lite MACH_HABA_KNX_LITE HABA_KNX_LITE 4310
765tai MACH_TAI TAI 4311
766prototd MACH_PROTOTD PROTOTD 4312
767dst_tonto MACH_DST_TONTO DST_TONTO 4313
768draco MACH_DRACO DRACO 4314
769dxr2 MACH_DXR2 DXR2 4315
770rut MACH_RUT RUT 4316
771am180x_wsc MACH_AM180X_WSC AM180X_WSC 4317
772deluxe_u MACH_DELUXE_U DELUXE_U 4318
773deluxe_ul MACH_DELUXE_UL DELUXE_UL 4319
774at91sam9260medths MACH_AT91SAM9260MEDTHS AT91SAM9260MEDTHS 4320
775matrix516 MACH_MATRIX516 MATRIX516 4321
776vid401x MACH_VID401X VID401X 4322
777helios_v5 MACH_HELIOS_V5 HELIOS_V5 4323
778playpaq2 MACH_PLAYPAQ2 PLAYPAQ2 4324
779igam MACH_IGAM IGAM 4325
780amico_i MACH_AMICO_I AMICO_I 4326
781amico_e MACH_AMICO_E AMICO_E 4327
782sentient_mm3_ck MACH_SENTIENT_MM3_CK SENTIENT_MM3_CK 4328
783smx6 MACH_SMX6 SMX6 4329
784pango MACH_PANGO PANGO 4330
785ns115_stick MACH_NS115_STICK NS115_STICK 4331
786bctrm3 MACH_BCTRM3 BCTRM3 4332
787doctorws MACH_DOCTORWS DOCTORWS 4333
788m2601 MACH_M2601 M2601 4334
789vgg1111 MACH_VGG1111 VGG1111 4337
790countach MACH_COUNTACH COUNTACH 4338
791visstrim_sm20 MACH_VISSTRIM_SM20 VISSTRIM_SM20 4339
792a639 MACH_A639 A639 4340
793spacemonkey MACH_SPACEMONKEY SPACEMONKEY 4341
794zpdu_stamp MACH_ZPDU_STAMP ZPDU_STAMP 4342
795htc_g7_clone MACH_HTC_G7_CLONE HTC_G7_CLONE 4343
796ft2080_corvus MACH_FT2080_CORVUS FT2080_CORVUS 4344
797fisland MACH_FISLAND FISLAND 4345
798zpdu MACH_ZPDU ZPDU 4346
799urt MACH_URT URT 4347
800conti_ovip MACH_CONTI_OVIP CONTI_OVIP 4348
801omapl138_nagra MACH_OMAPL138_NAGRA OMAPL138_NAGRA 4349
802da850_at3kp1 MACH_DA850_AT3KP1 DA850_AT3KP1 4350
803da850_at3kp2 MACH_DA850_AT3KP2 DA850_AT3KP2 4351
804surma MACH_SURMA SURMA 4352
805stm_b2092 MACH_STM_B2092 STM_B2092 4353
806mx535_ycr MACH_MX535_YCR MX535_YCR 4354
807m7_wl MACH_M7_WL M7_WL 4355
808m7_u MACH_M7_U M7_U 4356
809omap3_stndt_evm MACH_OMAP3_STNDT_EVM OMAP3_STNDT_EVM 4357
810m7_wlv MACH_M7_WLV M7_WLV 4358
811xam3517 MACH_XAM3517 XAM3517 4359
812a220 MACH_A220 A220 4360
813aclima_odie MACH_ACLIMA_ODIE ACLIMA_ODIE 4361
814vibble MACH_VIBBLE VIBBLE 4362
815k2_u MACH_K2_U K2_U 4363
816mx53_egf MACH_MX53_EGF MX53_EGF 4364
817novpek_imx53 MACH_NOVPEK_IMX53 NOVPEK_IMX53 4365
818novpek_imx6x MACH_NOVPEK_IMX6X NOVPEK_IMX6X 4366
819mx25_smartbox MACH_MX25_SMARTBOX MX25_SMARTBOX 4367
820eicg6410 MACH_EICG6410 EICG6410 4368
821picasso_e3 MACH_PICASSO_E3 PICASSO_E3 4369
822motonavigator MACH_MOTONAVIGATOR MOTONAVIGATOR 4370
823varioconnect2 MACH_VARIOCONNECT2 VARIOCONNECT2 4371
824deluxe_tw MACH_DELUXE_TW DELUXE_TW 4372
825kore3 MACH_KORE3 KORE3 4374
826mx6s_drs MACH_MX6S_DRS MX6S_DRS 4375
827cmimx6 MACH_CMIMX6 CMIMX6 4376
828roth MACH_ROTH ROTH 4377
829eq4ux MACH_EQ4UX EQ4UX 4378
830x1plus MACH_X1PLUS X1PLUS 4379
831modimx27 MACH_MODIMX27 MODIMX27 4380
832videon_hduac MACH_VIDEON_HDUAC VIDEON_HDUAC 4381
833blackbird MACH_BLACKBIRD BLACKBIRD 4382
834runmaster MACH_RUNMASTER RUNMASTER 4383
835ceres MACH_CERES CERES 4384
836nad435 MACH_NAD435 NAD435 4385
837ns115_proto_type MACH_NS115_PROTO_TYPE NS115_PROTO_TYPE 4386
838fs20_vcc MACH_FS20_VCC FS20_VCC 4387
839meson6tv_skt MACH_MESON6TV_SKT MESON6TV_SKT 4389
840keystone MACH_KEYSTONE KEYSTONE 4390
841pcm052 MACH_PCM052 PCM052 4391
842qrd_skud_prime MACH_QRD_SKUD_PRIME QRD_SKUD_PRIME 4393
843guf_santaro MACH_GUF_SANTARO GUF_SANTARO 4395
844sheepshead MACH_SHEEPSHEAD SHEEPSHEAD 4396
845mx6_iwg15m_mxm MACH_MX6_IWG15M_MXM MX6_IWG15M_MXM 4397
846mx6_iwg15m_q7 MACH_MX6_IWG15M_Q7 MX6_IWG15M_Q7 4398
847at91sam9263if8mic MACH_AT91SAM9263IF8MIC AT91SAM9263IF8MIC 4399
848marcopolo MACH_MARCOPOLO MARCOPOLO 4401
849mx535_sdcr MACH_MX535_SDCR MX535_SDCR 4402
850mx53_csb2733 MACH_MX53_CSB2733 MX53_CSB2733 4403
851diva MACH_DIVA DIVA 4404
852ncr_7744 MACH_NCR_7744 NCR_7744 4405
853macallan MACH_MACALLAN MACALLAN 4406
854wnr3500 MACH_WNR3500 WNR3500 4407
855pgavrf MACH_PGAVRF PGAVRF 4408
856helios_v6 MACH_HELIOS_V6 HELIOS_V6 4409
857lcct MACH_LCCT LCCT 4410
858csndug MACH_CSNDUG CSNDUG 4411
859wandboard_imx6 MACH_WANDBOARD_IMX6 WANDBOARD_IMX6 4412
860omap4_jet MACH_OMAP4_JET OMAP4_JET 4413
861tegra_roth MACH_TEGRA_ROTH TEGRA_ROTH 4414
862m7dcg MACH_M7DCG M7DCG 4415
863m7dug MACH_M7DUG M7DUG 4416
864m7dtg MACH_M7DTG M7DTG 4417
865ap42x MACH_AP42X AP42X 4418
866var_som_mx6 MACH_VAR_SOM_MX6 VAR_SOM_MX6 4419
867pdlu MACH_PDLU PDLU 4420
868hydrogen MACH_HYDROGEN HYDROGEN 4421
869npa211e MACH_NPA211E NPA211E 4422
870arcadia MACH_ARCADIA ARCADIA 4423
871arcadia_l MACH_ARCADIA_L ARCADIA_L 4424
872msm8930dt MACH_MSM8930DT MSM8930DT 4425
873ktam3874 MACH_KTAM3874 KTAM3874 4426
874cec4 MACH_CEC4 CEC4 4427
875ape6evm MACH_APE6EVM APE6EVM 4428
876tx6 MACH_TX6 TX6 4429
877cfa10037 MACH_CFA10037 CFA10037 4431
878ezp1000 MACH_EZP1000 EZP1000 4433
879wgr826v MACH_WGR826V WGR826V 4434
880exuma MACH_EXUMA EXUMA 4435
881fregate MACH_FREGATE FREGATE 4436
882osirisimx508 MACH_OSIRISIMX508 OSIRISIMX508 4437
883st_exigo MACH_ST_EXIGO ST_EXIGO 4438
884pismo MACH_PISMO PISMO 4439
885atc7 MACH_ATC7 ATC7 4440
886nspireclp MACH_NSPIRECLP NSPIRECLP 4441
887nspiretp MACH_NSPIRETP NSPIRETP 4442
888nspirecx MACH_NSPIRECX NSPIRECX 4443
889maya MACH_MAYA MAYA 4444
890wecct MACH_WECCT WECCT 4445
891m2s MACH_M2S M2S 4446
892msm8625q_evbd MACH_MSM8625Q_EVBD MSM8625Q_EVBD 4447
893tiny210 MACH_TINY210 TINY210 4448
894g3 MACH_G3 G3 4449
895hurricane MACH_HURRICANE HURRICANE 4450
896mx6_pod MACH_MX6_POD MX6_POD 4451
897elondcn MACH_ELONDCN ELONDCN 4452
898cwmx535 MACH_CWMX535 CWMX535 4453
899m7_wlj MACH_M7_WLJ M7_WLJ 4454
900qsp_arm MACH_QSP_ARM QSP_ARM 4455
901msm8625q_skud MACH_MSM8625Q_SKUD MSM8625Q_SKUD 4456
902htcmondrian MACH_HTCMONDRIAN HTCMONDRIAN 4457
903watson_ead MACH_WATSON_EAD WATSON_EAD 4458
904mitwoa MACH_MITWOA MITWOA 4459
905omap3_wolverine MACH_OMAP3_WOLVERINE OMAP3_WOLVERINE 4460
906mapletree MACH_MAPLETREE MAPLETREE 4461
907msm8625_fih_sae MACH_MSM8625_FIH_SAE MSM8625_FIH_SAE 4462
908epc35 MACH_EPC35 EPC35 4463
909smartrtu MACH_SMARTRTU SMARTRTU 4464
910rcm101 MACH_RCM101 RCM101 4465
911amx_imx53_mxx MACH_AMX_IMX53_MXX AMX_IMX53_MXX 4466
912acer_a12 MACH_ACER_A12 ACER_A12 4470
913sbc6x MACH_SBC6X SBC6X 4471
914u2 MACH_U2 U2 4472
915smdk4270 MACH_SMDK4270 SMDK4270 4473
916priscillag MACH_PRISCILLAG PRISCILLAG 4474
917priscillac MACH_PRISCILLAC PRISCILLAC 4475
918priscilla MACH_PRISCILLA PRISCILLA 4476
919innova_shpu_v2 MACH_INNOVA_SHPU_V2 INNOVA_SHPU_V2 4477
920mach_type_dep2410 MACH_MACH_TYPE_DEP2410 MACH_TYPE_DEP2410 4479
921bctre3 MACH_BCTRE3 BCTRE3 4480
922omap_m100 MACH_OMAP_M100 OMAP_M100 4481
923flo MACH_FLO FLO 4482
924nanobone MACH_NANOBONE NANOBONE 4483
925stm_b2105 MACH_STM_B2105 STM_B2105 4484
926omap4_bsc_bap_v3 MACH_OMAP4_BSC_BAP_V3 OMAP4_BSC_BAP_V3 4485
927ss1pam MACH_SS1PAM SS1PAM 4486
928primominiu MACH_PRIMOMINIU PRIMOMINIU 4488
929mrt_35hd_dualnas_e MACH_MRT_35HD_DUALNAS_E MRT_35HD_DUALNAS_E 4489
930kiwi MACH_KIWI KIWI 4490
931hw90496 MACH_HW90496 HW90496 4491
932mep2440 MACH_MEP2440 MEP2440 4492
933colibri_t30 MACH_COLIBRI_T30 COLIBRI_T30 4493
934cwv1 MACH_CWV1 CWV1 4494
935nsa325 MACH_NSA325 NSA325 4495
936dpxmtc MACH_DPXMTC DPXMTC 4497
937tt_stuttgart MACH_TT_STUTTGART TT_STUTTGART 4498
938miranda_apcii MACH_MIRANDA_APCII MIRANDA_APCII 4499
939mx6q_moderox MACH_MX6Q_MODEROX MX6Q_MODEROX 4500
940mudskipper MACH_MUDSKIPPER MUDSKIPPER 4501
941urania MACH_URANIA URANIA 4502
942stm_b2112 MACH_STM_B2112 STM_B2112 4503
943mx6q_ats_phoenix MACH_MX6Q_ATS_PHOENIX MX6Q_ATS_PHOENIX 4505
944stm_b2116 MACH_STM_B2116 STM_B2116 4506
945mythology MACH_MYTHOLOGY MYTHOLOGY 4507
946fc360v1 MACH_FC360V1 FC360V1 4508
947gps_sensor MACH_GPS_SENSOR GPS_SENSOR 4509
948gazelle MACH_GAZELLE GAZELLE 4510
949mpq8064_dma MACH_MPQ8064_DMA MPQ8064_DMA 4511
950wems_asd01 MACH_WEMS_ASD01 WEMS_ASD01 4512
951apalis_t30 MACH_APALIS_T30 APALIS_T30 4513
952armstonea9 MACH_ARMSTONEA9 ARMSTONEA9 4515
953omap_blazetablet MACH_OMAP_BLAZETABLET OMAP_BLAZETABLET 4516
954ar6mxq MACH_AR6MXQ AR6MXQ 4517
955ar6mxs MACH_AR6MXS AR6MXS 4518
956gwventana MACH_GWVENTANA GWVENTANA 4520
957igep0033 MACH_IGEP0033 IGEP0033 4521
958h52c1_concerto MACH_H52C1_CONCERTO H52C1_CONCERTO 4524
959fcmbrd MACH_FCMBRD FCMBRD 4525
960pcaaxs1 MACH_PCAAXS1 PCAAXS1 4526
961ls_orca MACH_LS_ORCA LS_ORCA 4527
962pcm051lb MACH_PCM051LB PCM051LB 4528
963mx6s_lp507_gvci MACH_MX6S_LP507_GVCI MX6S_LP507_GVCI 4529
964dido MACH_DIDO DIDO 4530
965swarco_itc3_9g20 MACH_SWARCO_ITC3_9G20 SWARCO_ITC3_9G20 4531
966robo_roady MACH_ROBO_ROADY ROBO_ROADY 4532
967rskrza1 MACH_RSKRZA1 RSKRZA1 4533
968swarco_sid MACH_SWARCO_SID SWARCO_SID 4534
969mx6_iwg15s_sbc MACH_MX6_IWG15S_SBC MX6_IWG15S_SBC 4535
970mx6q_camaro MACH_MX6Q_CAMARO MX6Q_CAMARO 4536
971hb6mxs MACH_HB6MXS HB6MXS 4537
972lager MACH_LAGER LAGER 4538
973lp8x4x MACH_LP8X4X LP8X4X 4539
974tegratab7 MACH_TEGRATAB7 TEGRATAB7 4540
975andromeda MACH_ANDROMEDA ANDROMEDA 4541
976bootes MACH_BOOTES BOOTES 4542
977nethmi MACH_NETHMI NETHMI 4543
978tegratab MACH_TEGRATAB TEGRATAB 4544
979som5_evb MACH_SOM5_EVB SOM5_EVB 4545
980venaticorum MACH_VENATICORUM VENATICORUM 4546
981stm_b2110 MACH_STM_B2110 STM_B2110 4547
982elux_hathor MACH_ELUX_HATHOR ELUX_HATHOR 4548
983helios_v7 MACH_HELIOS_V7 HELIOS_V7 4549
984xc10v1 MACH_XC10V1 XC10V1 4550
985cp2u MACH_CP2U CP2U 4551
986iap_f MACH_IAP_F IAP_F 4552
987iap_g MACH_IAP_G IAP_G 4553
988aae MACH_AAE AAE 4554
989pegasus MACH_PEGASUS PEGASUS 4555
990cygnus MACH_CYGNUS CYGNUS 4556
991centaurus MACH_CENTAURUS CENTAURUS 4557
992msm8930_qrd8930 MACH_MSM8930_QRD8930 MSM8930_QRD8930 4558
993quby_tim MACH_QUBY_TIM QUBY_TIM 4559
994zedi3250a MACH_ZEDI3250A ZEDI3250A 4560
995grus MACH_GRUS GRUS 4561
996apollo3 MACH_APOLLO3 APOLLO3 4562
997cowon_r7 MACH_COWON_R7 COWON_R7 4563
998tonga3 MACH_TONGA3 TONGA3 4564
999p535 MACH_P535 P535 4565
1000sa3874i MACH_SA3874I SA3874I 4566
1001mx6_navico_com MACH_MX6_NAVICO_COM MX6_NAVICO_COM 4567
1002proxmobil2 MACH_PROXMOBIL2 PROXMOBIL2 4568
1003ubinux1 MACH_UBINUX1 UBINUX1 4569
1004istos MACH_ISTOS ISTOS 4570
1005benvolio4 MACH_BENVOLIO4 BENVOLIO4 4571
1006eco5_bx2 MACH_ECO5_BX2 ECO5_BX2 4572
1007eukrea_cpuimx28sd MACH_EUKREA_CPUIMX28SD EUKREA_CPUIMX28SD 4573
1008domotab MACH_DOMOTAB DOMOTAB 4574
1009pfla03 MACH_PFLA03 PFLA03 4575
diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h
index cf60d0a9f176..fc6483f83ccc 100644
--- a/arch/avr32/include/asm/io.h
+++ b/arch/avr32/include/asm/io.h
@@ -165,6 +165,10 @@ BUILDIO_IOPORT(l, u32)
165#define readw_be __raw_readw 165#define readw_be __raw_readw
166#define readl_be __raw_readl 166#define readl_be __raw_readl
167 167
168#define writeb_relaxed writeb
169#define writew_relaxed writew
170#define writel_relaxed writel
171
168#define writeb_be __raw_writeb 172#define writeb_be __raw_writeb
169#define writew_be __raw_writew 173#define writew_be __raw_writew
170#define writel_be __raw_writel 174#define writel_be __raw_writel
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 256c5bf0adb7..04d69c4a5ac2 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -304,7 +304,7 @@ syscall_exit_work:
304 subi r12,r12,TI_FLAGS 304 subi r12,r12,TI_FLAGS
305 305
3064: /* Anything else left to do? */ 3064: /* Anything else left to do? */
307 SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */ 307 SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
308 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) 308 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
309 beq .ret_from_except_lite 309 beq .ret_from_except_lite
310 310
@@ -657,7 +657,7 @@ resume_kernel:
657 /* Clear _TIF_EMULATE_STACK_STORE flag */ 657 /* Clear _TIF_EMULATE_STACK_STORE flag */
658 lis r11,_TIF_EMULATE_STACK_STORE@h 658 lis r11,_TIF_EMULATE_STACK_STORE@h
659 addi r5,r9,TI_FLAGS 659 addi r5,r9,TI_FLAGS
660 ldarx r4,0,r5 6600: ldarx r4,0,r5
661 andc r4,r4,r11 661 andc r4,r4,r11
662 stdcx. r4,0,r5 662 stdcx. r4,0,r5
663 bne- 0b 663 bne- 0b
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 59dd545fdde1..16e77a81ab4f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
555 new->thread.regs->msr |= 555 new->thread.regs->msr |=
556 (MSR_FP | new->thread.fpexc_mode); 556 (MSR_FP | new->thread.fpexc_mode);
557 } 557 }
558#ifdef CONFIG_ALTIVEC
558 if (msr & MSR_VEC) { 559 if (msr & MSR_VEC) {
559 do_load_up_transact_altivec(&new->thread); 560 do_load_up_transact_altivec(&new->thread);
560 new->thread.regs->msr |= MSR_VEC; 561 new->thread.regs->msr |= MSR_VEC;
561 } 562 }
563#endif
562 /* We may as well turn on VSX too since all the state is restored now */ 564 /* We may as well turn on VSX too since all the state is restored now */
563 if (msr & MSR_VSX) 565 if (msr & MSR_VSX)
564 new->thread.regs->msr |= MSR_VSX; 566 new->thread.regs->msr |= MSR_VSX;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 3acb28e245b4..95068bf569ad 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs,
866 do_load_up_transact_fpu(&current->thread); 866 do_load_up_transact_fpu(&current->thread);
867 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 867 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
868 } 868 }
869#ifdef CONFIG_ALTIVEC
869 if (msr & MSR_VEC) { 870 if (msr & MSR_VEC) {
870 do_load_up_transact_altivec(&current->thread); 871 do_load_up_transact_altivec(&current->thread);
871 regs->msr |= MSR_VEC; 872 regs->msr |= MSR_VEC;
872 } 873 }
874#endif
873 875
874 return 0; 876 return 0;
875} 877}
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 995f8543cb57..c1794286098c 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
522 do_load_up_transact_fpu(&current->thread); 522 do_load_up_transact_fpu(&current->thread);
523 regs->msr |= (MSR_FP | current->thread.fpexc_mode); 523 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
524 } 524 }
525#ifdef CONFIG_ALTIVEC
525 if (msr & MSR_VEC) { 526 if (msr & MSR_VEC) {
526 do_load_up_transact_altivec(&current->thread); 527 do_load_up_transact_altivec(&current->thread);
527 regs->msr |= MSR_VEC; 528 regs->msr |= MSR_VEC;
528 } 529 }
530#endif
529 531
530 return err; 532 return err;
531} 533}
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 84dbace657ce..2da67e7a16d5 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint)
309 or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */ 309 or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */
310 mtmsr r5 310 mtmsr r5
311 311
312#ifdef CONFIG_ALTIVEC
312 /* FP and VEC registers: These are recheckpointed from thread.fpr[] 313 /* FP and VEC registers: These are recheckpointed from thread.fpr[]
313 * and thread.vr[] respectively. The thread.transact_fpr[] version 314 * and thread.vr[] respectively. The thread.transact_fpr[] version
314 * is more modern, and will be loaded subsequently by any FPUnavailable 315 * is more modern, and will be loaded subsequently by any FPUnavailable
@@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint)
323 REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */ 324 REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */
324 ld r5, THREAD_VRSAVE(r3) 325 ld r5, THREAD_VRSAVE(r3)
325 mtspr SPRN_VRSAVE, r5 326 mtspr SPRN_VRSAVE, r5
327#endif
326 328
327dont_restore_vec: 329dont_restore_vec:
328 andi. r0, r4, MSR_FP 330 andi. r0, r4, MSR_FP
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index 41cefd43655f..33db48a8ce24 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -26,17 +26,20 @@
26#define E500_PID_NUM 3 26#define E500_PID_NUM 3
27#define E500_TLB_NUM 2 27#define E500_TLB_NUM 2
28 28
29#define E500_TLB_VALID 1 29/* entry is mapped somewhere in host TLB */
30#define E500_TLB_BITMAP 2 30#define E500_TLB_VALID (1 << 0)
31/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
32#define E500_TLB_BITMAP (1 << 1)
33/* TLB1 entry is mapped by host TLB0 */
31#define E500_TLB_TLB0 (1 << 2) 34#define E500_TLB_TLB0 (1 << 2)
32 35
33struct tlbe_ref { 36struct tlbe_ref {
34 pfn_t pfn; 37 pfn_t pfn; /* valid only for TLB0, except briefly */
35 unsigned int flags; /* E500_TLB_* */ 38 unsigned int flags; /* E500_TLB_* */
36}; 39};
37 40
38struct tlbe_priv { 41struct tlbe_priv {
39 struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ 42 struct tlbe_ref ref;
40}; 43};
41 44
42#ifdef CONFIG_KVM_E500V2 45#ifdef CONFIG_KVM_E500V2
@@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 {
63 66
64 unsigned int gtlb_nv[E500_TLB_NUM]; 67 unsigned int gtlb_nv[E500_TLB_NUM];
65 68
66 /*
67 * information associated with each host TLB entry --
68 * TLB1 only for now. If/when guest TLB1 entries can be
69 * mapped with host TLB0, this will be used for that too.
70 *
71 * We don't want to use this for guest TLB0 because then we'd
72 * have the overhead of doing the translation again even if
73 * the entry is still in the guest TLB (e.g. we swapped out
74 * and back, and our host TLB entries got evicted).
75 */
76 struct tlbe_ref *tlb_refs[E500_TLB_NUM];
77 unsigned int host_tlb1_nv; 69 unsigned int host_tlb1_nv;
78 70
79 u32 svr; 71 u32 svr;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index a222edfb9a9b..1c6a9d729df4 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
193 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; 193 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
194 194
195 /* Don't bother with unmapped entries */ 195 /* Don't bother with unmapped entries */
196 if (!(ref->flags & E500_TLB_VALID)) 196 if (!(ref->flags & E500_TLB_VALID)) {
197 return; 197 WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
198 "%s: flags %x\n", __func__, ref->flags);
199 WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
200 }
198 201
199 if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { 202 if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
200 u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; 203 u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
@@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
248 pfn_t pfn) 251 pfn_t pfn)
249{ 252{
250 ref->pfn = pfn; 253 ref->pfn = pfn;
251 ref->flags = E500_TLB_VALID; 254 ref->flags |= E500_TLB_VALID;
252 255
253 if (tlbe_is_writable(gtlbe)) 256 if (tlbe_is_writable(gtlbe))
254 kvm_set_pfn_dirty(pfn); 257 kvm_set_pfn_dirty(pfn);
@@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
257static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) 260static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
258{ 261{
259 if (ref->flags & E500_TLB_VALID) { 262 if (ref->flags & E500_TLB_VALID) {
263 /* FIXME: don't log bogus pfn for TLB1 */
260 trace_kvm_booke206_ref_release(ref->pfn, ref->flags); 264 trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
261 ref->flags = 0; 265 ref->flags = 0;
262 } 266 }
@@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
274 278
275static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) 279static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
276{ 280{
277 int tlbsel = 0; 281 int tlbsel;
278 int i;
279
280 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
281 struct tlbe_ref *ref =
282 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
283 kvmppc_e500_ref_release(ref);
284 }
285}
286
287static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
288{
289 int stlbsel = 1;
290 int i; 282 int i;
291 283
292 kvmppc_e500_tlbil_all(vcpu_e500); 284 for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
293 285 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
294 for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { 286 struct tlbe_ref *ref =
295 struct tlbe_ref *ref = 287 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
296 &vcpu_e500->tlb_refs[stlbsel][i]; 288 kvmppc_e500_ref_release(ref);
297 kvmppc_e500_ref_release(ref); 289 }
298 } 290 }
299
300 clear_tlb_privs(vcpu_e500);
301} 291}
302 292
303void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) 293void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
304{ 294{
305 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 295 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
306 clear_tlb_refs(vcpu_e500); 296 kvmppc_e500_tlbil_all(vcpu_e500);
297 clear_tlb_privs(vcpu_e500);
307 clear_tlb1_bitmap(vcpu_e500); 298 clear_tlb1_bitmap(vcpu_e500);
308} 299}
309 300
@@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
458 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); 449 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
459 } 450 }
460 451
461 /* Drop old ref and setup new one. */
462 kvmppc_e500_ref_release(ref);
463 kvmppc_e500_ref_setup(ref, gtlbe, pfn); 452 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
464 453
465 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 454 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
507 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) 496 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
508 vcpu_e500->host_tlb1_nv = 0; 497 vcpu_e500->host_tlb1_nv = 0;
509 498
510 vcpu_e500->tlb_refs[1][sesel] = *ref;
511 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
512 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
513 if (vcpu_e500->h2g_tlb1_rmap[sesel]) { 499 if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
514 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; 500 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
515 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); 501 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
516 } 502 }
517 vcpu_e500->h2g_tlb1_rmap[sesel] = esel; 503
504 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
505 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
506 vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
507 WARN_ON(!(ref->flags & E500_TLB_VALID));
518 508
519 return sesel; 509 return sesel;
520} 510}
@@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
526 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, 516 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
527 struct kvm_book3e_206_tlb_entry *stlbe, int esel) 517 struct kvm_book3e_206_tlb_entry *stlbe, int esel)
528{ 518{
529 struct tlbe_ref ref; 519 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
530 int sesel; 520 int sesel;
531 int r; 521 int r;
532 522
533 ref.flags = 0;
534 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, 523 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
535 &ref); 524 ref);
536 if (r) 525 if (r)
537 return r; 526 return r;
538 527
@@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
544 } 533 }
545 534
546 /* Otherwise map into TLB1 */ 535 /* Otherwise map into TLB1 */
547 sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel); 536 sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
548 write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); 537 write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
549 538
550 return 0; 539 return 0;
@@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
565 case 0: 554 case 0:
566 priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; 555 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
567 556
568 /* Triggers after clear_tlb_refs or on initial mapping */ 557 /* Triggers after clear_tlb_privs or on initial mapping */
569 if (!(priv->ref.flags & E500_TLB_VALID)) { 558 if (!(priv->ref.flags & E500_TLB_VALID)) {
570 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); 559 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
571 } else { 560 } else {
@@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
665 host_tlb_params[0].entries / host_tlb_params[0].ways; 654 host_tlb_params[0].entries / host_tlb_params[0].ways;
666 host_tlb_params[1].sets = 1; 655 host_tlb_params[1].sets = 1;
667 656
668 vcpu_e500->tlb_refs[0] =
669 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
670 GFP_KERNEL);
671 if (!vcpu_e500->tlb_refs[0])
672 goto err;
673
674 vcpu_e500->tlb_refs[1] =
675 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
676 GFP_KERNEL);
677 if (!vcpu_e500->tlb_refs[1])
678 goto err;
679
680 vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * 657 vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
681 host_tlb_params[1].entries, 658 host_tlb_params[1].entries,
682 GFP_KERNEL); 659 GFP_KERNEL);
683 if (!vcpu_e500->h2g_tlb1_rmap) 660 if (!vcpu_e500->h2g_tlb1_rmap)
684 goto err; 661 return -EINVAL;
685 662
686 return 0; 663 return 0;
687
688err:
689 kfree(vcpu_e500->tlb_refs[0]);
690 kfree(vcpu_e500->tlb_refs[1]);
691 return -EINVAL;
692} 664}
693 665
694void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) 666void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
695{ 667{
696 kfree(vcpu_e500->h2g_tlb1_rmap); 668 kfree(vcpu_e500->h2g_tlb1_rmap);
697 kfree(vcpu_e500->tlb_refs[0]);
698 kfree(vcpu_e500->tlb_refs[1]);
699} 669}
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 1f89d26e65fb..2f4baa074b2e 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
108{ 108{
109} 109}
110 110
111static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
112
111void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 113void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
112{ 114{
113 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 115 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
136 mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); 138 mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
137 mtspr(SPRN_GESR, vcpu->arch.shared->esr); 139 mtspr(SPRN_GESR, vcpu->arch.shared->esr);
138 140
139 if (vcpu->arch.oldpir != mfspr(SPRN_PIR)) 141 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
142 __get_cpu_var(last_vcpu_on_cpu) != vcpu) {
140 kvmppc_e500_tlbil_all(vcpu_e500); 143 kvmppc_e500_tlbil_all(vcpu_e500);
144 __get_cpu_var(last_vcpu_on_cpu) = vcpu;
145 }
141 146
142 kvmppc_load_guest_fp(vcpu); 147 kvmppc_load_guest_fp(vcpu);
143} 148}
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 27cb32185ce1..379d96e2105e 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
50#define ioremap_nocache(addr, size) ioremap(addr, size) 50#define ioremap_nocache(addr, size) ioremap(addr, size)
51#define ioremap_wc ioremap_nocache 51#define ioremap_wc ioremap_nocache
52 52
53/* TODO: s390 cannot support io_remap_pfn_range... */
54#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
55 remap_pfn_range(vma, vaddr, pfn, size, prot)
56
57static inline void __iomem *ioremap(unsigned long offset, unsigned long size) 53static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
58{ 54{
59 return (void __iomem *) offset; 55 return (void __iomem *) offset;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 4a5443118cfb..3cb47cf02530 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -57,6 +57,10 @@ extern unsigned long zero_page_mask;
57 (((unsigned long)(vaddr)) &zero_page_mask)))) 57 (((unsigned long)(vaddr)) &zero_page_mask))))
58#define __HAVE_COLOR_ZERO_PAGE 58#define __HAVE_COLOR_ZERO_PAGE
59 59
60/* TODO: s390 cannot support io_remap_pfn_range... */
61#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
62 remap_pfn_range(vma, vaddr, pfn, size, prot)
63
60#endif /* !__ASSEMBLY__ */ 64#endif /* !__ASSEMBLY__ */
61 65
62/* 66/*
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index e26d430ce2fd..ff18e3cfb6b1 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -2,11 +2,16 @@
2 2
3 3
4generic-y += clkdev.h 4generic-y += clkdev.h
5generic-y += cputime.h
5generic-y += div64.h 6generic-y += div64.h
7generic-y += emergency-restart.h
6generic-y += exec.h 8generic-y += exec.h
7generic-y += local64.h 9generic-y += local64.h
10generic-y += mutex.h
8generic-y += irq_regs.h 11generic-y += irq_regs.h
9generic-y += local.h 12generic-y += local.h
10generic-y += module.h 13generic-y += module.h
14generic-y += serial.h
11generic-y += trace_clock.h 15generic-y += trace_clock.h
16generic-y += types.h
12generic-y += word-at-a-time.h 17generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/cputime.h b/arch/sparc/include/asm/cputime.h
deleted file mode 100644
index 1a642b81e019..000000000000
--- a/arch/sparc/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __SPARC_CPUTIME_H
2#define __SPARC_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __SPARC_CPUTIME_H */
diff --git a/arch/sparc/include/asm/emergency-restart.h b/arch/sparc/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42e..000000000000
--- a/arch/sparc/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_EMERGENCY_RESTART_H
2#define _ASM_EMERGENCY_RESTART_H
3
4#include <asm-generic/emergency-restart.h>
5
6#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/sparc/include/asm/mutex.h b/arch/sparc/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/sparc/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 08fcce90316b..7619f2f792af 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
915 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); 915 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
916} 916}
917 917
918#include <asm/tlbflush.h>
918#include <asm-generic/pgtable.h> 919#include <asm-generic/pgtable.h>
919 920
920/* We provide our own get_unmapped_area to cope with VA holes and 921/* We provide our own get_unmapped_area to cope with VA holes and
diff --git a/arch/sparc/include/asm/serial.h b/arch/sparc/include/asm/serial.h
deleted file mode 100644
index f90d61c28059..000000000000
--- a/arch/sparc/include/asm/serial.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __SPARC_SERIAL_H
2#define __SPARC_SERIAL_H
3
4#define BASE_BAUD ( 1843200 / 16 )
5
6#endif /* __SPARC_SERIAL_H */
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index b73da3c5f10a..3c8917f054de 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
36 unsigned long, unsigned long); 36 unsigned long, unsigned long);
37 37
38void cpu_panic(void); 38void cpu_panic(void);
39extern void smp4m_irq_rotate(int cpu);
40 39
41/* 40/*
42 * General functions that each host system must provide. 41 * General functions that each host system must provide.
@@ -46,7 +45,6 @@ void sun4m_init_smp(void);
46void sun4d_init_smp(void); 45void sun4d_init_smp(void);
47 46
48void smp_callin(void); 47void smp_callin(void);
49void smp_boot_cpus(void);
50void smp_store_cpu_info(int); 48void smp_store_cpu_info(int);
51 49
52void smp_resched_interrupt(void); 50void smp_resched_interrupt(void);
@@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void);
107 105
108#define raw_smp_processor_id() (current_thread_info()->cpu) 106#define raw_smp_processor_id() (current_thread_info()->cpu)
109 107
110#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
111#define prof_counter(__cpu) cpu_data(__cpu).counter
112
113void smp_setup_cpu_possible_map(void); 108void smp_setup_cpu_possible_map(void);
114 109
115#endif /* !(__ASSEMBLY__) */ 110#endif /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h
index cad36f56fa03..c7de3323819c 100644
--- a/arch/sparc/include/asm/switch_to_64.h
+++ b/arch/sparc/include/asm/switch_to_64.h
@@ -18,8 +18,7 @@ do { \
18 * and 2 stores in this critical code path. -DaveM 18 * and 2 stores in this critical code path. -DaveM
19 */ 19 */
20#define switch_to(prev, next, last) \ 20#define switch_to(prev, next, last) \
21do { flush_tlb_pending(); \ 21do { save_and_clear_fpu(); \
22 save_and_clear_fpu(); \
23 /* If you are tempted to conditionalize the following */ \ 22 /* If you are tempted to conditionalize the following */ \
24 /* so that ASI is only written if it changes, think again. */ \ 23 /* so that ASI is only written if it changes, think again. */ \
25 __asm__ __volatile__("wr %%g0, %0, %%asi" \ 24 __asm__ __volatile__("wr %%g0, %0, %%asi" \
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
index 2ef463494153..f0d6a9700f4c 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -11,24 +11,40 @@
11struct tlb_batch { 11struct tlb_batch {
12 struct mm_struct *mm; 12 struct mm_struct *mm;
13 unsigned long tlb_nr; 13 unsigned long tlb_nr;
14 unsigned long active;
14 unsigned long vaddrs[TLB_BATCH_NR]; 15 unsigned long vaddrs[TLB_BATCH_NR];
15}; 16};
16 17
17extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); 18extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
18extern void flush_tsb_user(struct tlb_batch *tb); 19extern void flush_tsb_user(struct tlb_batch *tb);
20extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
19 21
20/* TLB flush operations. */ 22/* TLB flush operations. */
21 23
22extern void flush_tlb_pending(void); 24static inline void flush_tlb_mm(struct mm_struct *mm)
25{
26}
27
28static inline void flush_tlb_page(struct vm_area_struct *vma,
29 unsigned long vmaddr)
30{
31}
32
33static inline void flush_tlb_range(struct vm_area_struct *vma,
34 unsigned long start, unsigned long end)
35{
36}
37
38#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
23 39
24#define flush_tlb_range(vma,start,end) \ 40extern void flush_tlb_pending(void);
25 do { (void)(start); flush_tlb_pending(); } while (0) 41extern void arch_enter_lazy_mmu_mode(void);
26#define flush_tlb_page(vma,addr) flush_tlb_pending() 42extern void arch_leave_lazy_mmu_mode(void);
27#define flush_tlb_mm(mm) flush_tlb_pending() 43#define arch_flush_lazy_mmu_mode() do {} while (0)
28 44
29/* Local cpu only. */ 45/* Local cpu only. */
30extern void __flush_tlb_all(void); 46extern void __flush_tlb_all(void);
31 47extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
32extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); 48extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
33 49
34#ifndef CONFIG_SMP 50#ifndef CONFIG_SMP
@@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \
38 __flush_tlb_kernel_range(start,end); \ 54 __flush_tlb_kernel_range(start,end); \
39} while (0) 55} while (0)
40 56
57static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
58{
59 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
60}
61
41#else /* CONFIG_SMP */ 62#else /* CONFIG_SMP */
42 63
43extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); 64extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
65extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
44 66
45#define flush_tlb_kernel_range(start, end) \ 67#define flush_tlb_kernel_range(start, end) \
46do { flush_tsb_kernel_range(start,end); \ 68do { flush_tsb_kernel_range(start,end); \
47 smp_flush_tlb_kernel_range(start, end); \ 69 smp_flush_tlb_kernel_range(start, end); \
48} while (0) 70} while (0)
49 71
72#define global_flush_tlb_page(mm, vaddr) \
73 smp_flush_tlb_page(mm, vaddr)
74
50#endif /* ! CONFIG_SMP */ 75#endif /* ! CONFIG_SMP */
51 76
52#endif /* _SPARC64_TLBFLUSH_H */ 77#endif /* _SPARC64_TLBFLUSH_H */
diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild
index ce175aff71b7..b5843ee09fb5 100644
--- a/arch/sparc/include/uapi/asm/Kbuild
+++ b/arch/sparc/include/uapi/asm/Kbuild
@@ -44,7 +44,6 @@ header-y += swab.h
44header-y += termbits.h 44header-y += termbits.h
45header-y += termios.h 45header-y += termios.h
46header-y += traps.h 46header-y += traps.h
47header-y += types.h
48header-y += uctx.h 47header-y += uctx.h
49header-y += unistd.h 48header-y += unistd.h
50header-y += utrap.h 49header-y += utrap.h
diff --git a/arch/sparc/include/uapi/asm/types.h b/arch/sparc/include/uapi/asm/types.h
deleted file mode 100644
index 383d156cde9c..000000000000
--- a/arch/sparc/include/uapi/asm/types.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _SPARC_TYPES_H
2#define _SPARC_TYPES_H
3/*
4 * This file is never included by application software unless
5 * explicitly requested (e.g., via linux/types.h) in which case the
6 * application is Linux specific so (user-) name space pollution is
7 * not a major issue. However, for interoperability, libraries still
8 * need to be careful to avoid a name clashes.
9 */
10
11#if defined(__sparc__)
12
13#include <asm-generic/int-ll64.h>
14
15#endif /* defined(__sparc__) */
16
17#endif /* defined(_SPARC_TYPES_H) */
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 537eb66abd06..ca64d2a86ec0 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm)
849} 849}
850 850
851extern unsigned long xcall_flush_tlb_mm; 851extern unsigned long xcall_flush_tlb_mm;
852extern unsigned long xcall_flush_tlb_pending; 852extern unsigned long xcall_flush_tlb_page;
853extern unsigned long xcall_flush_tlb_kernel_range; 853extern unsigned long xcall_flush_tlb_kernel_range;
854extern unsigned long xcall_fetch_glob_regs; 854extern unsigned long xcall_fetch_glob_regs;
855extern unsigned long xcall_fetch_glob_pmu; 855extern unsigned long xcall_fetch_glob_pmu;
@@ -1074,23 +1074,56 @@ local_flush_and_out:
1074 put_cpu(); 1074 put_cpu();
1075} 1075}
1076 1076
1077struct tlb_pending_info {
1078 unsigned long ctx;
1079 unsigned long nr;
1080 unsigned long *vaddrs;
1081};
1082
1083static void tlb_pending_func(void *info)
1084{
1085 struct tlb_pending_info *t = info;
1086
1087 __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1088}
1089
1077void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) 1090void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1078{ 1091{
1079 u32 ctx = CTX_HWBITS(mm->context); 1092 u32 ctx = CTX_HWBITS(mm->context);
1093 struct tlb_pending_info info;
1080 int cpu = get_cpu(); 1094 int cpu = get_cpu();
1081 1095
1096 info.ctx = ctx;
1097 info.nr = nr;
1098 info.vaddrs = vaddrs;
1099
1082 if (mm == current->mm && atomic_read(&mm->mm_users) == 1) 1100 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1083 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); 1101 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1084 else 1102 else
1085 smp_cross_call_masked(&xcall_flush_tlb_pending, 1103 smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1086 ctx, nr, (unsigned long) vaddrs, 1104 &info, 1);
1087 mm_cpumask(mm));
1088 1105
1089 __flush_tlb_pending(ctx, nr, vaddrs); 1106 __flush_tlb_pending(ctx, nr, vaddrs);
1090 1107
1091 put_cpu(); 1108 put_cpu();
1092} 1109}
1093 1110
1111void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1112{
1113 unsigned long context = CTX_HWBITS(mm->context);
1114 int cpu = get_cpu();
1115
1116 if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1117 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1118 else
1119 smp_cross_call_masked(&xcall_flush_tlb_page,
1120 context, vaddr, 0,
1121 mm_cpumask(mm));
1122 __flush_tlb_page(context, vaddr);
1123
1124 put_cpu();
1125}
1126
1094void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) 1127void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1095{ 1128{
1096 start &= PAGE_MASK; 1129 start &= PAGE_MASK;
diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c
index 48d00e72ce15..8ec4e9c0251a 100644
--- a/arch/sparc/lib/bitext.c
+++ b/arch/sparc/lib/bitext.c
@@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len)
119 119
120void bit_map_init(struct bit_map *t, unsigned long *map, int size) 120void bit_map_init(struct bit_map *t, unsigned long *map, int size)
121{ 121{
122 122 bitmap_zero(map, size);
123 if ((size & 07) != 0)
124 BUG();
125 memset(map, 0, size>>3);
126
127 memset(t, 0, sizeof *t); 123 memset(t, 0, sizeof *t);
128 spin_lock_init(&t->lock); 124 spin_lock_init(&t->lock);
129 t->map = map; 125 t->map = map;
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 0f4f7191fbba..28f96f27c768 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -34,7 +34,7 @@
34#define IOMMU_RNGE IOMMU_RNGE_256MB 34#define IOMMU_RNGE IOMMU_RNGE_256MB
35#define IOMMU_START 0xF0000000 35#define IOMMU_START 0xF0000000
36#define IOMMU_WINSIZE (256*1024*1024U) 36#define IOMMU_WINSIZE (256*1024*1024U)
37#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */ 37#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
38#define IOMMU_ORDER 6 /* 4096 * (1<<6) */ 38#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
39 39
40/* srmmu.c */ 40/* srmmu.c */
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index c38bb72e3e80..036c2797dece 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void)
280 SRMMU_NOCACHE_ALIGN_MAX, 0UL); 280 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
281 memset(srmmu_nocache_pool, 0, srmmu_nocache_size); 281 memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
282 282
283 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); 283 srmmu_nocache_bitmap =
284 __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
285 SMP_CACHE_BYTES, 0UL);
284 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); 286 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
285 287
286 srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); 288 srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index ba6ae7ffdc2c..272aa4f7657e 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
24void flush_tlb_pending(void) 24void flush_tlb_pending(void)
25{ 25{
26 struct tlb_batch *tb = &get_cpu_var(tlb_batch); 26 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
27 struct mm_struct *mm = tb->mm;
27 28
28 if (tb->tlb_nr) { 29 if (!tb->tlb_nr)
29 flush_tsb_user(tb); 30 goto out;
30 31
31 if (CTX_VALID(tb->mm->context)) { 32 flush_tsb_user(tb);
33
34 if (CTX_VALID(mm->context)) {
35 if (tb->tlb_nr == 1) {
36 global_flush_tlb_page(mm, tb->vaddrs[0]);
37 } else {
32#ifdef CONFIG_SMP 38#ifdef CONFIG_SMP
33 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, 39 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
34 &tb->vaddrs[0]); 40 &tb->vaddrs[0]);
@@ -37,12 +43,30 @@ void flush_tlb_pending(void)
37 tb->tlb_nr, &tb->vaddrs[0]); 43 tb->tlb_nr, &tb->vaddrs[0]);
38#endif 44#endif
39 } 45 }
40 tb->tlb_nr = 0;
41 } 46 }
42 47
48 tb->tlb_nr = 0;
49
50out:
43 put_cpu_var(tlb_batch); 51 put_cpu_var(tlb_batch);
44} 52}
45 53
54void arch_enter_lazy_mmu_mode(void)
55{
56 struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
57
58 tb->active = 1;
59}
60
61void arch_leave_lazy_mmu_mode(void)
62{
63 struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
64
65 if (tb->tlb_nr)
66 flush_tlb_pending();
67 tb->active = 0;
68}
69
46static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, 70static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
47 bool exec) 71 bool exec)
48{ 72{
@@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
60 nr = 0; 84 nr = 0;
61 } 85 }
62 86
87 if (!tb->active) {
88 global_flush_tlb_page(mm, vaddr);
89 flush_tsb_user_page(mm, vaddr);
90 return;
91 }
92
63 if (nr == 0) 93 if (nr == 0)
64 tb->mm = mm; 94 tb->mm = mm;
65 95
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 428982b9becf..2cc3bce5ee91 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -7,11 +7,10 @@
7#include <linux/preempt.h> 7#include <linux/preempt.h>
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <asm/page.h> 9#include <asm/page.h>
10#include <asm/tlbflush.h>
11#include <asm/tlb.h>
12#include <asm/mmu_context.h>
13#include <asm/pgtable.h> 10#include <asm/pgtable.h>
11#include <asm/mmu_context.h>
14#include <asm/tsb.h> 12#include <asm/tsb.h>
13#include <asm/tlb.h>
15#include <asm/oplib.h> 14#include <asm/oplib.h>
16 15
17extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; 16extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
@@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
46 } 45 }
47} 46}
48 47
49static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, 48static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
50 unsigned long tsb, unsigned long nentries) 49 unsigned long hash_shift,
50 unsigned long nentries)
51{ 51{
52 unsigned long i; 52 unsigned long tag, ent, hash;
53 53
54 for (i = 0; i < tb->tlb_nr; i++) { 54 v &= ~0x1UL;
55 unsigned long v = tb->vaddrs[i]; 55 hash = tsb_hash(v, hash_shift, nentries);
56 unsigned long tag, ent, hash; 56 ent = tsb + (hash * sizeof(struct tsb));
57 tag = (v >> 22UL);
57 58
58 v &= ~0x1UL; 59 tsb_flush(ent, tag);
60}
59 61
60 hash = tsb_hash(v, hash_shift, nentries); 62static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
61 ent = tsb + (hash * sizeof(struct tsb)); 63 unsigned long tsb, unsigned long nentries)
62 tag = (v >> 22UL); 64{
65 unsigned long i;
63 66
64 tsb_flush(ent, tag); 67 for (i = 0; i < tb->tlb_nr; i++)
65 } 68 __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
66} 69}
67 70
68void flush_tsb_user(struct tlb_batch *tb) 71void flush_tsb_user(struct tlb_batch *tb)
@@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb)
90 spin_unlock_irqrestore(&mm->context.lock, flags); 93 spin_unlock_irqrestore(&mm->context.lock, flags);
91} 94}
92 95
96void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
97{
98 unsigned long nentries, base, flags;
99
100 spin_lock_irqsave(&mm->context.lock, flags);
101
102 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
103 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
104 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
105 base = __pa(base);
106 __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
107
108#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
109 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
110 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
111 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
112 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
113 base = __pa(base);
114 __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
115 }
116#endif
117 spin_unlock_irqrestore(&mm->context.lock, flags);
118}
119
93#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K 120#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
94#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K 121#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
95 122
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index f8e13d421fcb..432aa0cb1b38 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -53,6 +53,33 @@ __flush_tlb_mm: /* 18 insns */
53 nop 53 nop
54 54
55 .align 32 55 .align 32
56 .globl __flush_tlb_page
57__flush_tlb_page: /* 22 insns */
58 /* %o0 = context, %o1 = vaddr */
59 rdpr %pstate, %g7
60 andn %g7, PSTATE_IE, %g2
61 wrpr %g2, %pstate
62 mov SECONDARY_CONTEXT, %o4
63 ldxa [%o4] ASI_DMMU, %g2
64 stxa %o0, [%o4] ASI_DMMU
65 andcc %o1, 1, %g0
66 andn %o1, 1, %o3
67 be,pn %icc, 1f
68 or %o3, 0x10, %o3
69 stxa %g0, [%o3] ASI_IMMU_DEMAP
701: stxa %g0, [%o3] ASI_DMMU_DEMAP
71 membar #Sync
72 stxa %g2, [%o4] ASI_DMMU
73 sethi %hi(KERNBASE), %o4
74 flush %o4
75 retl
76 wrpr %g7, 0x0, %pstate
77 nop
78 nop
79 nop
80 nop
81
82 .align 32
56 .globl __flush_tlb_pending 83 .globl __flush_tlb_pending
57__flush_tlb_pending: /* 26 insns */ 84__flush_tlb_pending: /* 26 insns */
58 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 85 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
@@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
203 retl 230 retl
204 wrpr %g7, 0x0, %pstate 231 wrpr %g7, 0x0, %pstate
205 232
233__cheetah_flush_tlb_page: /* 22 insns */
234 /* %o0 = context, %o1 = vaddr */
235 rdpr %pstate, %g7
236 andn %g7, PSTATE_IE, %g2
237 wrpr %g2, 0x0, %pstate
238 wrpr %g0, 1, %tl
239 mov PRIMARY_CONTEXT, %o4
240 ldxa [%o4] ASI_DMMU, %g2
241 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
242 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
243 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
244 stxa %o0, [%o4] ASI_DMMU
245 andcc %o1, 1, %g0
246 be,pn %icc, 1f
247 andn %o1, 1, %o3
248 stxa %g0, [%o3] ASI_IMMU_DEMAP
2491: stxa %g0, [%o3] ASI_DMMU_DEMAP
250 membar #Sync
251 stxa %g2, [%o4] ASI_DMMU
252 sethi %hi(KERNBASE), %o4
253 flush %o4
254 wrpr %g0, 0, %tl
255 retl
256 wrpr %g7, 0x0, %pstate
257
206__cheetah_flush_tlb_pending: /* 27 insns */ 258__cheetah_flush_tlb_pending: /* 27 insns */
207 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 259 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
208 rdpr %pstate, %g7 260 rdpr %pstate, %g7
@@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
269 retl 321 retl
270 nop 322 nop
271 323
324__hypervisor_flush_tlb_page: /* 11 insns */
325 /* %o0 = context, %o1 = vaddr */
326 mov %o0, %g2
327 mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
328 mov %g2, %o1 /* ARG1: mmu context */
329 mov HV_MMU_ALL, %o2 /* ARG2: flags */
330 srlx %o0, PAGE_SHIFT, %o0
331 sllx %o0, PAGE_SHIFT, %o0
332 ta HV_MMU_UNMAP_ADDR_TRAP
333 brnz,pn %o0, __hypervisor_tlb_tl0_error
334 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
335 retl
336 nop
337
272__hypervisor_flush_tlb_pending: /* 16 insns */ 338__hypervisor_flush_tlb_pending: /* 16 insns */
273 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ 339 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
274 sllx %o1, 3, %g1 340 sllx %o1, 3, %g1
@@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
339 call tlb_patch_one 405 call tlb_patch_one
340 mov 19, %o2 406 mov 19, %o2
341 407
408 sethi %hi(__flush_tlb_page), %o0
409 or %o0, %lo(__flush_tlb_page), %o0
410 sethi %hi(__cheetah_flush_tlb_page), %o1
411 or %o1, %lo(__cheetah_flush_tlb_page), %o1
412 call tlb_patch_one
413 mov 22, %o2
414
342 sethi %hi(__flush_tlb_pending), %o0 415 sethi %hi(__flush_tlb_pending), %o0
343 or %o0, %lo(__flush_tlb_pending), %o0 416 or %o0, %lo(__flush_tlb_pending), %o0
344 sethi %hi(__cheetah_flush_tlb_pending), %o1 417 sethi %hi(__cheetah_flush_tlb_pending), %o1
@@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */
397 nop 470 nop
398 nop 471 nop
399 472
400 .globl xcall_flush_tlb_pending 473 .globl xcall_flush_tlb_page
401xcall_flush_tlb_pending: /* 21 insns */ 474xcall_flush_tlb_page: /* 17 insns */
402 /* %g5=context, %g1=nr, %g7=vaddrs[] */ 475 /* %g5=context, %g1=vaddr */
403 sllx %g1, 3, %g1
404 mov PRIMARY_CONTEXT, %g4 476 mov PRIMARY_CONTEXT, %g4
405 ldxa [%g4] ASI_DMMU, %g2 477 ldxa [%g4] ASI_DMMU, %g2
406 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 478 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */
408 or %g5, %g4, %g5 480 or %g5, %g4, %g5
409 mov PRIMARY_CONTEXT, %g4 481 mov PRIMARY_CONTEXT, %g4
410 stxa %g5, [%g4] ASI_DMMU 482 stxa %g5, [%g4] ASI_DMMU
4111: sub %g1, (1 << 3), %g1 483 andcc %g1, 0x1, %g0
412 ldx [%g7 + %g1], %g5
413 andcc %g5, 0x1, %g0
414 be,pn %icc, 2f 484 be,pn %icc, 2f
415 485 andn %g1, 0x1, %g5
416 andn %g5, 0x1, %g5
417 stxa %g0, [%g5] ASI_IMMU_DEMAP 486 stxa %g0, [%g5] ASI_IMMU_DEMAP
4182: stxa %g0, [%g5] ASI_DMMU_DEMAP 4872: stxa %g0, [%g5] ASI_DMMU_DEMAP
419 membar #Sync 488 membar #Sync
420 brnz,pt %g1, 1b
421 nop
422 stxa %g2, [%g4] ASI_DMMU 489 stxa %g2, [%g4] ASI_DMMU
423 retry 490 retry
424 nop 491 nop
492 nop
425 493
426 .globl xcall_flush_tlb_kernel_range 494 .globl xcall_flush_tlb_kernel_range
427xcall_flush_tlb_kernel_range: /* 25 insns */ 495xcall_flush_tlb_kernel_range: /* 25 insns */
@@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
656 membar #Sync 724 membar #Sync
657 retry 725 retry
658 726
659 .globl __hypervisor_xcall_flush_tlb_pending 727 .globl __hypervisor_xcall_flush_tlb_page
660__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ 728__hypervisor_xcall_flush_tlb_page: /* 17 insns */
661 /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ 729 /* %g5=ctx, %g1=vaddr */
662 sllx %g1, 3, %g1
663 mov %o0, %g2 730 mov %o0, %g2
664 mov %o1, %g3 731 mov %o1, %g3
665 mov %o2, %g4 732 mov %o2, %g4
6661: sub %g1, (1 << 3), %g1 733 mov %g1, %o0 /* ARG0: virtual address */
667 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
668 mov %g5, %o1 /* ARG1: mmu context */ 734 mov %g5, %o1 /* ARG1: mmu context */
669 mov HV_MMU_ALL, %o2 /* ARG2: flags */ 735 mov HV_MMU_ALL, %o2 /* ARG2: flags */
670 srlx %o0, PAGE_SHIFT, %o0 736 srlx %o0, PAGE_SHIFT, %o0
@@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
673 mov HV_MMU_UNMAP_ADDR_TRAP, %g6 739 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
674 brnz,a,pn %o0, __hypervisor_tlb_xcall_error 740 brnz,a,pn %o0, __hypervisor_tlb_xcall_error
675 mov %o0, %g5 741 mov %o0, %g5
676 brnz,pt %g1, 1b
677 nop
678 mov %g2, %o0 742 mov %g2, %o0
679 mov %g3, %o1 743 mov %g3, %o1
680 mov %g4, %o2 744 mov %g4, %o2
@@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops:
757 call tlb_patch_one 821 call tlb_patch_one
758 mov 10, %o2 822 mov 10, %o2
759 823
824 sethi %hi(__flush_tlb_page), %o0
825 or %o0, %lo(__flush_tlb_page), %o0
826 sethi %hi(__hypervisor_flush_tlb_page), %o1
827 or %o1, %lo(__hypervisor_flush_tlb_page), %o1
828 call tlb_patch_one
829 mov 11, %o2
830
760 sethi %hi(__flush_tlb_pending), %o0 831 sethi %hi(__flush_tlb_pending), %o0
761 or %o0, %lo(__flush_tlb_pending), %o0 832 or %o0, %lo(__flush_tlb_pending), %o0
762 sethi %hi(__hypervisor_flush_tlb_pending), %o1 833 sethi %hi(__hypervisor_flush_tlb_pending), %o1
@@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops:
788 call tlb_patch_one 859 call tlb_patch_one
789 mov 21, %o2 860 mov 21, %o2
790 861
791 sethi %hi(xcall_flush_tlb_pending), %o0 862 sethi %hi(xcall_flush_tlb_page), %o0
792 or %o0, %lo(xcall_flush_tlb_pending), %o0 863 or %o0, %lo(xcall_flush_tlb_page), %o0
793 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 864 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
794 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 865 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
795 call tlb_patch_one 866 call tlb_patch_one
796 mov 21, %o2 867 mov 17, %o2
797 868
798 sethi %hi(xcall_flush_tlb_kernel_range), %o0 869 sethi %hi(xcall_flush_tlb_kernel_range), %o0
799 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 870 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 70c0f3da0476..15b5cef4aa38 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1549,6 +1549,7 @@ config X86_SMAP
1549config EFI 1549config EFI
1550 bool "EFI runtime service support" 1550 bool "EFI runtime service support"
1551 depends on ACPI 1551 depends on ACPI
1552 select UCS2_STRING
1552 ---help--- 1553 ---help---
1553 This enables the kernel to use EFI runtime services that are 1554 This enables the kernel to use EFI runtime services that are
1554 available (such as the EFI variable services). 1555 available (such as the EFI variable services).
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index c205035a6b96..8615f7581820 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -251,6 +251,51 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
251 *size = len; 251 *size = len;
252} 252}
253 253
254static efi_status_t setup_efi_vars(struct boot_params *params)
255{
256 struct setup_data *data;
257 struct efi_var_bootdata *efidata;
258 u64 store_size, remaining_size, var_size;
259 efi_status_t status;
260
261 if (!sys_table->runtime->query_variable_info)
262 return EFI_UNSUPPORTED;
263
264 data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
265
266 while (data && data->next)
267 data = (struct setup_data *)(unsigned long)data->next;
268
269 status = efi_call_phys4(sys_table->runtime->query_variable_info,
270 EFI_VARIABLE_NON_VOLATILE |
271 EFI_VARIABLE_BOOTSERVICE_ACCESS |
272 EFI_VARIABLE_RUNTIME_ACCESS, &store_size,
273 &remaining_size, &var_size);
274
275 if (status != EFI_SUCCESS)
276 return status;
277
278 status = efi_call_phys3(sys_table->boottime->allocate_pool,
279 EFI_LOADER_DATA, sizeof(*efidata), &efidata);
280
281 if (status != EFI_SUCCESS)
282 return status;
283
284 efidata->data.type = SETUP_EFI_VARS;
285 efidata->data.len = sizeof(struct efi_var_bootdata) -
286 sizeof(struct setup_data);
287 efidata->data.next = 0;
288 efidata->store_size = store_size;
289 efidata->remaining_size = remaining_size;
290 efidata->max_var_size = var_size;
291
292 if (data)
293 data->next = (unsigned long)efidata;
294 else
295 params->hdr.setup_data = (unsigned long)efidata;
296
297}
298
254static efi_status_t setup_efi_pci(struct boot_params *params) 299static efi_status_t setup_efi_pci(struct boot_params *params)
255{ 300{
256 efi_pci_io_protocol *pci; 301 efi_pci_io_protocol *pci;
@@ -1157,6 +1202,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
1157 1202
1158 setup_graphics(boot_params); 1203 setup_graphics(boot_params);
1159 1204
1205 setup_efi_vars(boot_params);
1206
1160 setup_efi_pci(boot_params); 1207 setup_efi_pci(boot_params);
1161 1208
1162 status = efi_call_phys3(sys_table->boottime->allocate_pool, 1209 status = efi_call_phys3(sys_table->boottime->allocate_pool,
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 60c89f30c727..2fb5d5884e23 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -102,6 +102,13 @@ extern void efi_call_phys_epilog(void);
102extern void efi_unmap_memmap(void); 102extern void efi_unmap_memmap(void);
103extern void efi_memory_uc(u64 addr, unsigned long size); 103extern void efi_memory_uc(u64 addr, unsigned long size);
104 104
105struct efi_var_bootdata {
106 struct setup_data data;
107 u64 store_size;
108 u64 remaining_size;
109 u64 max_var_size;
110};
111
105#ifdef CONFIG_EFI 112#ifdef CONFIG_EFI
106 113
107static inline bool efi_is_native(void) 114static inline bool efi_is_native(void)
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index c15ddaf90710..08744242b8d2 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -6,6 +6,7 @@
6#define SETUP_E820_EXT 1 6#define SETUP_E820_EXT 1
7#define SETUP_DTB 2 7#define SETUP_DTB 2
8#define SETUP_PCI 3 8#define SETUP_PCI 3
9#define SETUP_EFI_VARS 4
9 10
10/* ram_size flags */ 11/* ram_size flags */
11#define RAMDISK_IMAGE_START_MASK 0x07FF 12#define RAMDISK_IMAGE_START_MASK 0x07FF
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index a7d26d83fb70..8f4be53ea04b 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -35,13 +35,6 @@ static bool __init ms_hyperv_platform(void)
35 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) 35 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
36 return false; 36 return false;
37 37
38 /*
39 * Xen emulates Hyper-V to support enlightened Windows.
40 * Check to see first if we are on a Xen Hypervisor.
41 */
42 if (xen_cpuid_base())
43 return false;
44
45 cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, 38 cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
46 &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); 39 &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
47 40
@@ -82,12 +75,6 @@ static void __init ms_hyperv_init_platform(void)
82 75
83 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) 76 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
84 clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); 77 clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
85#if IS_ENABLED(CONFIG_HYPERV)
86 /*
87 * Setup the IDT for hypervisor callback.
88 */
89 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
90#endif
91} 78}
92 79
93const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { 80const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
@@ -103,6 +90,11 @@ static irq_handler_t vmbus_isr;
103 90
104void hv_register_vmbus_handler(int irq, irq_handler_t handler) 91void hv_register_vmbus_handler(int irq, irq_handler_t handler)
105{ 92{
93 /*
94 * Setup the IDT for hypervisor callback.
95 */
96 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
97
106 vmbus_irq = irq; 98 vmbus_irq = irq;
107 vmbus_isr = handler; 99 vmbus_isr = handler;
108} 100}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index dab7580c47ae..cc45deb791b0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -153,8 +153,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
153}; 153};
154 154
155static struct extra_reg intel_snb_extra_regs[] __read_mostly = { 155static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
156 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), 156 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
157 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), 157 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
158 EVENT_EXTRA_END
159};
160
161static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
162 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
163 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
158 EVENT_EXTRA_END 164 EVENT_EXTRA_END
159}; 165};
160 166
@@ -2097,7 +2103,10 @@ __init int intel_pmu_init(void)
2097 x86_pmu.event_constraints = intel_snb_event_constraints; 2103 x86_pmu.event_constraints = intel_snb_event_constraints;
2098 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; 2104 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
2099 x86_pmu.pebs_aliases = intel_pebs_aliases_snb; 2105 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2100 x86_pmu.extra_regs = intel_snb_extra_regs; 2106 if (boot_cpu_data.x86_model == 45)
2107 x86_pmu.extra_regs = intel_snbep_extra_regs;
2108 else
2109 x86_pmu.extra_regs = intel_snb_extra_regs;
2101 /* all extra regs are per-cpu when HT is on */ 2110 /* all extra regs are per-cpu when HT is on */
2102 x86_pmu.er_flags |= ERF_HAS_RSP_1; 2111 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2103 x86_pmu.er_flags |= ERF_NO_HT_SHARING; 2112 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
@@ -2123,7 +2132,10 @@ __init int intel_pmu_init(void)
2123 x86_pmu.event_constraints = intel_ivb_event_constraints; 2132 x86_pmu.event_constraints = intel_ivb_event_constraints;
2124 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; 2133 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
2125 x86_pmu.pebs_aliases = intel_pebs_aliases_snb; 2134 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2126 x86_pmu.extra_regs = intel_snb_extra_regs; 2135 if (boot_cpu_data.x86_model == 62)
2136 x86_pmu.extra_regs = intel_snbep_extra_regs;
2137 else
2138 x86_pmu.extra_regs = intel_snb_extra_regs;
2127 /* all extra regs are per-cpu when HT is on */ 2139 /* all extra regs are per-cpu when HT is on */
2128 x86_pmu.er_flags |= ERF_HAS_RSP_1; 2140 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2129 x86_pmu.er_flags |= ERF_NO_HT_SHARING; 2141 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c
index 577db8417d15..833d51d6ee06 100644
--- a/arch/x86/kernel/microcode_core_early.c
+++ b/arch/x86/kernel/microcode_core_early.c
@@ -45,9 +45,6 @@ static int __cpuinit x86_vendor(void)
45 u32 eax = 0x00000000; 45 u32 eax = 0x00000000;
46 u32 ebx, ecx = 0, edx; 46 u32 ebx, ecx = 0, edx;
47 47
48 if (!have_cpuid_p())
49 return X86_VENDOR_UNKNOWN;
50
51 native_cpuid(&eax, &ebx, &ecx, &edx); 48 native_cpuid(&eax, &ebx, &ecx, &edx);
52 49
53 if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) 50 if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
@@ -59,18 +56,45 @@ static int __cpuinit x86_vendor(void)
59 return X86_VENDOR_UNKNOWN; 56 return X86_VENDOR_UNKNOWN;
60} 57}
61 58
59static int __cpuinit x86_family(void)
60{
61 u32 eax = 0x00000001;
62 u32 ebx, ecx = 0, edx;
63 int x86;
64
65 native_cpuid(&eax, &ebx, &ecx, &edx);
66
67 x86 = (eax >> 8) & 0xf;
68 if (x86 == 15)
69 x86 += (eax >> 20) & 0xff;
70
71 return x86;
72}
73
62void __init load_ucode_bsp(void) 74void __init load_ucode_bsp(void)
63{ 75{
64 int vendor = x86_vendor(); 76 int vendor, x86;
77
78 if (!have_cpuid_p())
79 return;
65 80
66 if (vendor == X86_VENDOR_INTEL) 81 vendor = x86_vendor();
82 x86 = x86_family();
83
84 if (vendor == X86_VENDOR_INTEL && x86 >= 6)
67 load_ucode_intel_bsp(); 85 load_ucode_intel_bsp();
68} 86}
69 87
70void __cpuinit load_ucode_ap(void) 88void __cpuinit load_ucode_ap(void)
71{ 89{
72 int vendor = x86_vendor(); 90 int vendor, x86;
91
92 if (!have_cpuid_p())
93 return;
94
95 vendor = x86_vendor();
96 x86 = x86_family();
73 97
74 if (vendor == X86_VENDOR_INTEL) 98 if (vendor == X86_VENDOR_INTEL && x86 >= 6)
75 load_ucode_intel_ap(); 99 load_ucode_intel_ap();
76} 100}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 90d8cc930f5e..fae9134a2de9 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -507,11 +507,14 @@ static void __init memblock_x86_reserve_range_setup_data(void)
507/* 507/*
508 * Keep the crash kernel below this limit. On 32 bits earlier kernels 508 * Keep the crash kernel below this limit. On 32 bits earlier kernels
509 * would limit the kernel to the low 512 MiB due to mapping restrictions. 509 * would limit the kernel to the low 512 MiB due to mapping restrictions.
510 * On 64bit, old kexec-tools need to under 896MiB.
510 */ 511 */
511#ifdef CONFIG_X86_32 512#ifdef CONFIG_X86_32
512# define CRASH_KERNEL_ADDR_MAX (512 << 20) 513# define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20)
514# define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20)
513#else 515#else
514# define CRASH_KERNEL_ADDR_MAX MAXMEM 516# define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20)
517# define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM
515#endif 518#endif
516 519
517static void __init reserve_crashkernel_low(void) 520static void __init reserve_crashkernel_low(void)
@@ -521,19 +524,35 @@ static void __init reserve_crashkernel_low(void)
521 unsigned long long low_base = 0, low_size = 0; 524 unsigned long long low_base = 0, low_size = 0;
522 unsigned long total_low_mem; 525 unsigned long total_low_mem;
523 unsigned long long base; 526 unsigned long long base;
527 bool auto_set = false;
524 int ret; 528 int ret;
525 529
526 total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT)); 530 total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT));
531 /* crashkernel=Y,low */
527 ret = parse_crashkernel_low(boot_command_line, total_low_mem, 532 ret = parse_crashkernel_low(boot_command_line, total_low_mem,
528 &low_size, &base); 533 &low_size, &base);
529 if (ret != 0 || low_size <= 0) 534 if (ret != 0) {
530 return; 535 /*
536 * two parts from lib/swiotlb.c:
537 * swiotlb size: user specified with swiotlb= or default.
538 * swiotlb overflow buffer: now is hardcoded to 32k.
539 * We round it to 8M for other buffers that
540 * may need to stay low too.
541 */
542 low_size = swiotlb_size_or_default() + (8UL<<20);
543 auto_set = true;
544 } else {
545 /* passed with crashkernel=0,low ? */
546 if (!low_size)
547 return;
548 }
531 549
532 low_base = memblock_find_in_range(low_size, (1ULL<<32), 550 low_base = memblock_find_in_range(low_size, (1ULL<<32),
533 low_size, alignment); 551 low_size, alignment);
534 552
535 if (!low_base) { 553 if (!low_base) {
536 pr_info("crashkernel low reservation failed - No suitable area found.\n"); 554 if (!auto_set)
555 pr_info("crashkernel low reservation failed - No suitable area found.\n");
537 556
538 return; 557 return;
539 } 558 }
@@ -554,14 +573,22 @@ static void __init reserve_crashkernel(void)
554 const unsigned long long alignment = 16<<20; /* 16M */ 573 const unsigned long long alignment = 16<<20; /* 16M */
555 unsigned long long total_mem; 574 unsigned long long total_mem;
556 unsigned long long crash_size, crash_base; 575 unsigned long long crash_size, crash_base;
576 bool high = false;
557 int ret; 577 int ret;
558 578
559 total_mem = memblock_phys_mem_size(); 579 total_mem = memblock_phys_mem_size();
560 580
581 /* crashkernel=XM */
561 ret = parse_crashkernel(boot_command_line, total_mem, 582 ret = parse_crashkernel(boot_command_line, total_mem,
562 &crash_size, &crash_base); 583 &crash_size, &crash_base);
563 if (ret != 0 || crash_size <= 0) 584 if (ret != 0 || crash_size <= 0) {
564 return; 585 /* crashkernel=X,high */
586 ret = parse_crashkernel_high(boot_command_line, total_mem,
587 &crash_size, &crash_base);
588 if (ret != 0 || crash_size <= 0)
589 return;
590 high = true;
591 }
565 592
566 /* 0 means: find the address automatically */ 593 /* 0 means: find the address automatically */
567 if (crash_base <= 0) { 594 if (crash_base <= 0) {
@@ -569,7 +596,9 @@ static void __init reserve_crashkernel(void)
569 * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX 596 * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
570 */ 597 */
571 crash_base = memblock_find_in_range(alignment, 598 crash_base = memblock_find_in_range(alignment,
572 CRASH_KERNEL_ADDR_MAX, crash_size, alignment); 599 high ? CRASH_KERNEL_ADDR_HIGH_MAX :
600 CRASH_KERNEL_ADDR_LOW_MAX,
601 crash_size, alignment);
573 602
574 if (!crash_base) { 603 if (!crash_base) {
575 pr_info("crashkernel reservation failed - No suitable area found.\n"); 604 pr_info("crashkernel reservation failed - No suitable area found.\n");
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 5f2ecaf3f9d8..e4a86a677ce1 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -41,6 +41,7 @@
41#include <linux/io.h> 41#include <linux/io.h>
42#include <linux/reboot.h> 42#include <linux/reboot.h>
43#include <linux/bcd.h> 43#include <linux/bcd.h>
44#include <linux/ucs2_string.h>
44 45
45#include <asm/setup.h> 46#include <asm/setup.h>
46#include <asm/efi.h> 47#include <asm/efi.h>
@@ -51,6 +52,13 @@
51 52
52#define EFI_DEBUG 1 53#define EFI_DEBUG 1
53 54
55/*
56 * There's some additional metadata associated with each
57 * variable. Intel's reference implementation is 60 bytes - bump that
58 * to account for potential alignment constraints
59 */
60#define VAR_METADATA_SIZE 64
61
54struct efi __read_mostly efi = { 62struct efi __read_mostly efi = {
55 .mps = EFI_INVALID_TABLE_ADDR, 63 .mps = EFI_INVALID_TABLE_ADDR,
56 .acpi = EFI_INVALID_TABLE_ADDR, 64 .acpi = EFI_INVALID_TABLE_ADDR,
@@ -69,6 +77,13 @@ struct efi_memory_map memmap;
69static struct efi efi_phys __initdata; 77static struct efi efi_phys __initdata;
70static efi_system_table_t efi_systab __initdata; 78static efi_system_table_t efi_systab __initdata;
71 79
80static u64 efi_var_store_size;
81static u64 efi_var_remaining_size;
82static u64 efi_var_max_var_size;
83static u64 boot_used_size;
84static u64 boot_var_size;
85static u64 active_size;
86
72unsigned long x86_efi_facility; 87unsigned long x86_efi_facility;
73 88
74/* 89/*
@@ -98,6 +113,15 @@ static int __init setup_add_efi_memmap(char *arg)
98} 113}
99early_param("add_efi_memmap", setup_add_efi_memmap); 114early_param("add_efi_memmap", setup_add_efi_memmap);
100 115
116static bool efi_no_storage_paranoia;
117
118static int __init setup_storage_paranoia(char *arg)
119{
120 efi_no_storage_paranoia = true;
121 return 0;
122}
123early_param("efi_no_storage_paranoia", setup_storage_paranoia);
124
101 125
102static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) 126static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
103{ 127{
@@ -162,8 +186,53 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
162 efi_char16_t *name, 186 efi_char16_t *name,
163 efi_guid_t *vendor) 187 efi_guid_t *vendor)
164{ 188{
165 return efi_call_virt3(get_next_variable, 189 efi_status_t status;
166 name_size, name, vendor); 190 static bool finished = false;
191 static u64 var_size;
192
193 status = efi_call_virt3(get_next_variable,
194 name_size, name, vendor);
195
196 if (status == EFI_NOT_FOUND) {
197 finished = true;
198 if (var_size < boot_used_size) {
199 boot_var_size = boot_used_size - var_size;
200 active_size += boot_var_size;
201 } else {
202 printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n");
203 }
204 }
205
206 if (boot_used_size && !finished) {
207 unsigned long size;
208 u32 attr;
209 efi_status_t s;
210 void *tmp;
211
212 s = virt_efi_get_variable(name, vendor, &attr, &size, NULL);
213
214 if (s != EFI_BUFFER_TOO_SMALL || !size)
215 return status;
216
217 tmp = kmalloc(size, GFP_ATOMIC);
218
219 if (!tmp)
220 return status;
221
222 s = virt_efi_get_variable(name, vendor, &attr, &size, tmp);
223
224 if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) {
225 var_size += size;
226 var_size += ucs2_strsize(name, 1024);
227 active_size += size;
228 active_size += VAR_METADATA_SIZE;
229 active_size += ucs2_strsize(name, 1024);
230 }
231
232 kfree(tmp);
233 }
234
235 return status;
167} 236}
168 237
169static efi_status_t virt_efi_set_variable(efi_char16_t *name, 238static efi_status_t virt_efi_set_variable(efi_char16_t *name,
@@ -172,9 +241,34 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
172 unsigned long data_size, 241 unsigned long data_size,
173 void *data) 242 void *data)
174{ 243{
175 return efi_call_virt5(set_variable, 244 efi_status_t status;
176 name, vendor, attr, 245 u32 orig_attr = 0;
177 data_size, data); 246 unsigned long orig_size = 0;
247
248 status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size,
249 NULL);
250
251 if (status != EFI_BUFFER_TOO_SMALL)
252 orig_size = 0;
253
254 status = efi_call_virt5(set_variable,
255 name, vendor, attr,
256 data_size, data);
257
258 if (status == EFI_SUCCESS) {
259 if (orig_size) {
260 active_size -= orig_size;
261 active_size -= ucs2_strsize(name, 1024);
262 active_size -= VAR_METADATA_SIZE;
263 }
264 if (data_size) {
265 active_size += data_size;
266 active_size += ucs2_strsize(name, 1024);
267 active_size += VAR_METADATA_SIZE;
268 }
269 }
270
271 return status;
178} 272}
179 273
180static efi_status_t virt_efi_query_variable_info(u32 attr, 274static efi_status_t virt_efi_query_variable_info(u32 attr,
@@ -682,6 +776,9 @@ void __init efi_init(void)
682 char vendor[100] = "unknown"; 776 char vendor[100] = "unknown";
683 int i = 0; 777 int i = 0;
684 void *tmp; 778 void *tmp;
779 struct setup_data *data;
780 struct efi_var_bootdata *efi_var_data;
781 u64 pa_data;
685 782
686#ifdef CONFIG_X86_32 783#ifdef CONFIG_X86_32
687 if (boot_params.efi_info.efi_systab_hi || 784 if (boot_params.efi_info.efi_systab_hi ||
@@ -699,6 +796,22 @@ void __init efi_init(void)
699 if (efi_systab_init(efi_phys.systab)) 796 if (efi_systab_init(efi_phys.systab))
700 return; 797 return;
701 798
799 pa_data = boot_params.hdr.setup_data;
800 while (pa_data) {
801 data = early_ioremap(pa_data, sizeof(*efi_var_data));
802 if (data->type == SETUP_EFI_VARS) {
803 efi_var_data = (struct efi_var_bootdata *)data;
804
805 efi_var_store_size = efi_var_data->store_size;
806 efi_var_remaining_size = efi_var_data->remaining_size;
807 efi_var_max_var_size = efi_var_data->max_var_size;
808 }
809 pa_data = data->next;
810 early_iounmap(data, sizeof(*efi_var_data));
811 }
812
813 boot_used_size = efi_var_store_size - efi_var_remaining_size;
814
702 set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); 815 set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
703 816
704 /* 817 /*
@@ -999,3 +1112,48 @@ u64 efi_mem_attributes(unsigned long phys_addr)
999 } 1112 }
1000 return 0; 1113 return 0;
1001} 1114}
1115
1116/*
1117 * Some firmware has serious problems when using more than 50% of the EFI
1118 * variable store, i.e. it triggers bugs that can brick machines. Ensure that
1119 * we never use more than this safe limit.
1120 *
1121 * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
1122 * store.
1123 */
1124efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
1125{
1126 efi_status_t status;
1127 u64 storage_size, remaining_size, max_size;
1128
1129 status = efi.query_variable_info(attributes, &storage_size,
1130 &remaining_size, &max_size);
1131 if (status != EFI_SUCCESS)
1132 return status;
1133
1134 if (!max_size && remaining_size > size)
1135 printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
1136 " is returning MaxVariableSize=0\n");
1137 /*
1138 * Some firmware implementations refuse to boot if there's insufficient
1139 * space in the variable store. We account for that by refusing the
1140 * write if permitting it would reduce the available space to under
1141 * 50%. However, some firmware won't reclaim variable space until
1142 * after the used (not merely the actively used) space drops below
1143 * a threshold. We can approximate that case with the value calculated
1144 * above. If both the firmware and our calculations indicate that the
1145 * available space would drop below 50%, refuse the write.
1146 */
1147
1148 if (!storage_size || size > remaining_size ||
1149 (max_size && size > max_size))
1150 return EFI_OUT_OF_RESOURCES;
1151
1152 if (!efi_no_storage_paranoia &&
1153 ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) &&
1154 (remaining_size - size < storage_size / 2)))
1155 return EFI_OUT_OF_RESOURCES;
1156
1157 return EFI_SUCCESS;
1158}
1159EXPORT_SYMBOL_GPL(efi_query_variable_store);
diff --git a/block/blk-core.c b/block/blk-core.c
index 074b758efc42..7c288358a745 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -39,6 +39,7 @@
39 39
40EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); 40EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
41EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 41EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
42EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
42EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); 43EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
43 44
44DEFINE_IDA(blk_queue_ida); 45DEFINE_IDA(blk_queue_ida);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index f556f8a8b3f9..b7b7a88d9f68 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1742,9 +1742,10 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request)
1742 struct rbd_device *rbd_dev = img_request->rbd_dev; 1742 struct rbd_device *rbd_dev = img_request->rbd_dev;
1743 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 1743 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1744 struct rbd_obj_request *obj_request; 1744 struct rbd_obj_request *obj_request;
1745 struct rbd_obj_request *next_obj_request;
1745 1746
1746 dout("%s: img %p\n", __func__, img_request); 1747 dout("%s: img %p\n", __func__, img_request);
1747 for_each_obj_request(img_request, obj_request) { 1748 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
1748 int ret; 1749 int ret;
1749 1750
1750 obj_request->callback = rbd_img_obj_callback; 1751 obj_request->callback = rbd_img_obj_callback;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index e3f9a99b8522..d784650d14f0 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -373,26 +373,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
373 struct hpet_dev *devp; 373 struct hpet_dev *devp;
374 unsigned long addr; 374 unsigned long addr;
375 375
376 if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
377 return -EINVAL;
378
379 devp = file->private_data; 376 devp = file->private_data;
380 addr = devp->hd_hpets->hp_hpet_phys; 377 addr = devp->hd_hpets->hp_hpet_phys;
381 378
382 if (addr & (PAGE_SIZE - 1)) 379 if (addr & (PAGE_SIZE - 1))
383 return -ENOSYS; 380 return -ENOSYS;
384 381
385 vma->vm_flags |= VM_IO;
386 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 382 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
387 383 return vm_iomap_memory(vma, addr, PAGE_SIZE);
388 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
389 PAGE_SIZE, vma->vm_page_prot)) {
390 printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
391 __func__);
392 return -EAGAIN;
393 }
394
395 return 0;
396#else 384#else
397 return -ENOSYS; 385 return -ENOSYS;
398#endif 386#endif
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 6e13f262139a..88cfc61329d2 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -310,8 +310,6 @@ static void atc_complete_all(struct at_dma_chan *atchan)
310 310
311 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); 311 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
312 312
313 BUG_ON(atc_chan_is_enabled(atchan));
314
315 /* 313 /*
316 * Submit queued descriptors ASAP, i.e. before we go through 314 * Submit queued descriptors ASAP, i.e. before we go through
317 * the completed ones. 315 * the completed ones.
@@ -368,6 +366,9 @@ static void atc_advance_work(struct at_dma_chan *atchan)
368{ 366{
369 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); 367 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
370 368
369 if (atc_chan_is_enabled(atchan))
370 return;
371
371 if (list_empty(&atchan->active_list) || 372 if (list_empty(&atchan->active_list) ||
372 list_is_singular(&atchan->active_list)) { 373 list_is_singular(&atchan->active_list)) {
373 atc_complete_all(atchan); 374 atc_complete_all(atchan);
@@ -1078,9 +1079,7 @@ static void atc_issue_pending(struct dma_chan *chan)
1078 return; 1079 return;
1079 1080
1080 spin_lock_irqsave(&atchan->lock, flags); 1081 spin_lock_irqsave(&atchan->lock, flags);
1081 if (!atc_chan_is_enabled(atchan)) { 1082 atc_advance_work(atchan);
1082 atc_advance_work(atchan);
1083 }
1084 spin_unlock_irqrestore(&atchan->lock, flags); 1083 spin_unlock_irqrestore(&atchan->lock, flags);
1085} 1084}
1086 1085
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 42c759a4d047..3e532002e4d1 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -39,6 +39,7 @@ config FIRMWARE_MEMMAP
39config EFI_VARS 39config EFI_VARS
40 tristate "EFI Variable Support via sysfs" 40 tristate "EFI Variable Support via sysfs"
41 depends on EFI 41 depends on EFI
42 select UCS2_STRING
42 default n 43 default n
43 help 44 help
44 If you say Y here, you are able to get EFI (Extensible Firmware 45 If you say Y here, you are able to get EFI (Extensible Firmware
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 7acafb80fd4c..182ce9471175 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -80,6 +80,7 @@
80#include <linux/slab.h> 80#include <linux/slab.h>
81#include <linux/pstore.h> 81#include <linux/pstore.h>
82#include <linux/ctype.h> 82#include <linux/ctype.h>
83#include <linux/ucs2_string.h>
83 84
84#include <linux/fs.h> 85#include <linux/fs.h>
85#include <linux/ramfs.h> 86#include <linux/ramfs.h>
@@ -172,51 +173,6 @@ static void efivar_update_sysfs_entries(struct work_struct *);
172static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries); 173static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
173static bool efivar_wq_enabled = true; 174static bool efivar_wq_enabled = true;
174 175
175/* Return the number of unicode characters in data */
176static unsigned long
177utf16_strnlen(efi_char16_t *s, size_t maxlength)
178{
179 unsigned long length = 0;
180
181 while (*s++ != 0 && length < maxlength)
182 length++;
183 return length;
184}
185
186static inline unsigned long
187utf16_strlen(efi_char16_t *s)
188{
189 return utf16_strnlen(s, ~0UL);
190}
191
192/*
193 * Return the number of bytes is the length of this string
194 * Note: this is NOT the same as the number of unicode characters
195 */
196static inline unsigned long
197utf16_strsize(efi_char16_t *data, unsigned long maxlength)
198{
199 return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
200}
201
202static inline int
203utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
204{
205 while (1) {
206 if (len == 0)
207 return 0;
208 if (*a < *b)
209 return -1;
210 if (*a > *b)
211 return 1;
212 if (*a == 0) /* implies *b == 0 */
213 return 0;
214 a++;
215 b++;
216 len--;
217 }
218}
219
220static bool 176static bool
221validate_device_path(struct efi_variable *var, int match, u8 *buffer, 177validate_device_path(struct efi_variable *var, int match, u8 *buffer,
222 unsigned long len) 178 unsigned long len)
@@ -268,7 +224,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer,
268 u16 filepathlength; 224 u16 filepathlength;
269 int i, desclength = 0, namelen; 225 int i, desclength = 0, namelen;
270 226
271 namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName)); 227 namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName));
272 228
273 /* Either "Boot" or "Driver" followed by four digits of hex */ 229 /* Either "Boot" or "Driver" followed by four digits of hex */
274 for (i = match; i < match+4; i++) { 230 for (i = match; i < match+4; i++) {
@@ -291,7 +247,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer,
291 * There's no stored length for the description, so it has to be 247 * There's no stored length for the description, so it has to be
292 * found by hand 248 * found by hand
293 */ 249 */
294 desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; 250 desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
295 251
296 /* Each boot entry must have a descriptor */ 252 /* Each boot entry must have a descriptor */
297 if (!desclength) 253 if (!desclength)
@@ -436,24 +392,12 @@ static efi_status_t
436check_var_size_locked(struct efivars *efivars, u32 attributes, 392check_var_size_locked(struct efivars *efivars, u32 attributes,
437 unsigned long size) 393 unsigned long size)
438{ 394{
439 u64 storage_size, remaining_size, max_size;
440 efi_status_t status;
441 const struct efivar_operations *fops = efivars->ops; 395 const struct efivar_operations *fops = efivars->ops;
442 396
443 if (!efivars->ops->query_variable_info) 397 if (!efivars->ops->query_variable_store)
444 return EFI_UNSUPPORTED; 398 return EFI_UNSUPPORTED;
445 399
446 status = fops->query_variable_info(attributes, &storage_size, 400 return fops->query_variable_store(attributes, size);
447 &remaining_size, &max_size);
448
449 if (status != EFI_SUCCESS)
450 return status;
451
452 if (!storage_size || size > remaining_size || size > max_size ||
453 (remaining_size - size) < (storage_size / 2))
454 return EFI_OUT_OF_RESOURCES;
455
456 return status;
457} 401}
458 402
459 403
@@ -593,7 +537,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
593 spin_lock_irq(&efivars->lock); 537 spin_lock_irq(&efivars->lock);
594 538
595 status = check_var_size_locked(efivars, new_var->Attributes, 539 status = check_var_size_locked(efivars, new_var->Attributes,
596 new_var->DataSize + utf16_strsize(new_var->VariableName, 1024)); 540 new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));
597 541
598 if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED) 542 if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
599 status = efivars->ops->set_variable(new_var->VariableName, 543 status = efivars->ops->set_variable(new_var->VariableName,
@@ -771,7 +715,7 @@ static ssize_t efivarfs_file_write(struct file *file,
771 * QueryVariableInfo() isn't supported by the firmware. 715 * QueryVariableInfo() isn't supported by the firmware.
772 */ 716 */
773 717
774 varsize = datasize + utf16_strsize(var->var.VariableName, 1024); 718 varsize = datasize + ucs2_strsize(var->var.VariableName, 1024);
775 status = check_var_size(efivars, attributes, varsize); 719 status = check_var_size(efivars, attributes, varsize);
776 720
777 if (status != EFI_SUCCESS) { 721 if (status != EFI_SUCCESS) {
@@ -1223,7 +1167,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
1223 1167
1224 inode = NULL; 1168 inode = NULL;
1225 1169
1226 len = utf16_strlen(entry->var.VariableName); 1170 len = ucs2_strlen(entry->var.VariableName);
1227 1171
1228 /* name, plus '-', plus GUID, plus NUL*/ 1172 /* name, plus '-', plus GUID, plus NUL*/
1229 name = kmalloc(len + 1 + GUID_LEN + 1, GFP_ATOMIC); 1173 name = kmalloc(len + 1 + GUID_LEN + 1, GFP_ATOMIC);
@@ -1481,8 +1425,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
1481 1425
1482 if (efi_guidcmp(entry->var.VendorGuid, vendor)) 1426 if (efi_guidcmp(entry->var.VendorGuid, vendor))
1483 continue; 1427 continue;
1484 if (utf16_strncmp(entry->var.VariableName, efi_name, 1428 if (ucs2_strncmp(entry->var.VariableName, efi_name,
1485 utf16_strlen(efi_name))) { 1429 ucs2_strlen(efi_name))) {
1486 /* 1430 /*
1487 * Check if an old format, 1431 * Check if an old format,
1488 * which doesn't support holding 1432 * which doesn't support holding
@@ -1494,8 +1438,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
1494 for (i = 0; i < DUMP_NAME_LEN; i++) 1438 for (i = 0; i < DUMP_NAME_LEN; i++)
1495 efi_name_old[i] = name_old[i]; 1439 efi_name_old[i] = name_old[i];
1496 1440
1497 if (utf16_strncmp(entry->var.VariableName, efi_name_old, 1441 if (ucs2_strncmp(entry->var.VariableName, efi_name_old,
1498 utf16_strlen(efi_name_old))) 1442 ucs2_strlen(efi_name_old)))
1499 continue; 1443 continue;
1500 } 1444 }
1501 1445
@@ -1573,8 +1517,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
1573 * Does this variable already exist? 1517 * Does this variable already exist?
1574 */ 1518 */
1575 list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { 1519 list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
1576 strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024); 1520 strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024);
1577 strsize2 = utf16_strsize(new_var->VariableName, 1024); 1521 strsize2 = ucs2_strsize(new_var->VariableName, 1024);
1578 if (strsize1 == strsize2 && 1522 if (strsize1 == strsize2 &&
1579 !memcmp(&(search_efivar->var.VariableName), 1523 !memcmp(&(search_efivar->var.VariableName),
1580 new_var->VariableName, strsize1) && 1524 new_var->VariableName, strsize1) &&
@@ -1590,7 +1534,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
1590 } 1534 }
1591 1535
1592 status = check_var_size_locked(efivars, new_var->Attributes, 1536 status = check_var_size_locked(efivars, new_var->Attributes,
1593 new_var->DataSize + utf16_strsize(new_var->VariableName, 1024)); 1537 new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));
1594 1538
1595 if (status && status != EFI_UNSUPPORTED) { 1539 if (status && status != EFI_UNSUPPORTED) {
1596 spin_unlock_irq(&efivars->lock); 1540 spin_unlock_irq(&efivars->lock);
@@ -1614,7 +1558,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
1614 1558
1615 /* Create the entry in sysfs. Locking is not required here */ 1559 /* Create the entry in sysfs. Locking is not required here */
1616 status = efivar_create_sysfs_entry(efivars, 1560 status = efivar_create_sysfs_entry(efivars,
1617 utf16_strsize(new_var->VariableName, 1561 ucs2_strsize(new_var->VariableName,
1618 1024), 1562 1024),
1619 new_var->VariableName, 1563 new_var->VariableName,
1620 &new_var->VendorGuid); 1564 &new_var->VendorGuid);
@@ -1644,8 +1588,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
1644 * Does this variable already exist? 1588 * Does this variable already exist?
1645 */ 1589 */
1646 list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { 1590 list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
1647 strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024); 1591 strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024);
1648 strsize2 = utf16_strsize(del_var->VariableName, 1024); 1592 strsize2 = ucs2_strsize(del_var->VariableName, 1024);
1649 if (strsize1 == strsize2 && 1593 if (strsize1 == strsize2 &&
1650 !memcmp(&(search_efivar->var.VariableName), 1594 !memcmp(&(search_efivar->var.VariableName),
1651 del_var->VariableName, strsize1) && 1595 del_var->VariableName, strsize1) &&
@@ -1691,9 +1635,9 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
1691 unsigned long strsize1, strsize2; 1635 unsigned long strsize1, strsize2;
1692 bool found = false; 1636 bool found = false;
1693 1637
1694 strsize1 = utf16_strsize(variable_name, 1024); 1638 strsize1 = ucs2_strsize(variable_name, 1024);
1695 list_for_each_entry_safe(entry, n, &efivars->list, list) { 1639 list_for_each_entry_safe(entry, n, &efivars->list, list) {
1696 strsize2 = utf16_strsize(entry->var.VariableName, 1024); 1640 strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
1697 if (strsize1 == strsize2 && 1641 if (strsize1 == strsize2 &&
1698 !memcmp(variable_name, &(entry->var.VariableName), 1642 !memcmp(variable_name, &(entry->var.VariableName),
1699 strsize2) && 1643 strsize2) &&
@@ -2131,7 +2075,7 @@ efivars_init(void)
2131 ops.get_variable = efi.get_variable; 2075 ops.get_variable = efi.get_variable;
2132 ops.set_variable = efi.set_variable; 2076 ops.set_variable = efi.set_variable;
2133 ops.get_next_variable = efi.get_next_variable; 2077 ops.get_next_variable = efi.get_next_variable;
2134 ops.query_variable_info = efi.query_variable_info; 2078 ops.query_variable_store = efi_query_variable_store;
2135 2079
2136 error = register_efivars(&__efivars, &ops, efi_kobj); 2080 error = register_efivars(&__efivars, &ops, efi_kobj);
2137 if (error) 2081 if (error)
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 5d6675013864..1a38dd7dfe4e 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -465,6 +465,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
465 ICPU(0x3c, idle_cpu_hsw), 465 ICPU(0x3c, idle_cpu_hsw),
466 ICPU(0x3f, idle_cpu_hsw), 466 ICPU(0x3f, idle_cpu_hsw),
467 ICPU(0x45, idle_cpu_hsw), 467 ICPU(0x45, idle_cpu_hsw),
468 ICPU(0x46, idle_cpu_hsw),
468 {} 469 {}
469}; 470};
470MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); 471MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 1daa97913b7d..0bfd8cf25200 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -359,7 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
359 case 0x802: /* Intuos4 General Pen */ 359 case 0x802: /* Intuos4 General Pen */
360 case 0x804: /* Intuos4 Marker Pen */ 360 case 0x804: /* Intuos4 Marker Pen */
361 case 0x40802: /* Intuos4 Classic Pen */ 361 case 0x40802: /* Intuos4 Classic Pen */
362 case 0x18803: /* DTH2242 Grip Pen */ 362 case 0x18802: /* DTH2242 Grip Pen */
363 case 0x022: 363 case 0x022:
364 wacom->tool[idx] = BTN_TOOL_PEN; 364 wacom->tool[idx] = BTN_TOOL_PEN;
365 break; 365 break;
@@ -1912,7 +1912,7 @@ static const struct wacom_features wacom_features_0xBB =
1912 { "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047, 1912 { "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047,
1913 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1913 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
1914static const struct wacom_features wacom_features_0xBC = 1914static const struct wacom_features wacom_features_0xBC =
1915 { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047, 1915 { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40640, 25400, 2047,
1916 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1916 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
1917static const struct wacom_features wacom_features_0x26 = 1917static const struct wacom_features wacom_features_0x26 =
1918 { "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, 1918 { "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
@@ -2144,7 +2144,7 @@ const struct usb_device_id wacom_ids[] = {
2144 { USB_DEVICE_WACOM(0x44) }, 2144 { USB_DEVICE_WACOM(0x44) },
2145 { USB_DEVICE_WACOM(0x45) }, 2145 { USB_DEVICE_WACOM(0x45) },
2146 { USB_DEVICE_WACOM(0x59) }, 2146 { USB_DEVICE_WACOM(0x59) },
2147 { USB_DEVICE_WACOM(0x5D) }, 2147 { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) },
2148 { USB_DEVICE_WACOM(0xB0) }, 2148 { USB_DEVICE_WACOM(0xB0) },
2149 { USB_DEVICE_WACOM(0xB1) }, 2149 { USB_DEVICE_WACOM(0xB1) },
2150 { USB_DEVICE_WACOM(0xB2) }, 2150 { USB_DEVICE_WACOM(0xB2) },
@@ -2209,7 +2209,7 @@ const struct usb_device_id wacom_ids[] = {
2209 { USB_DEVICE_WACOM(0x47) }, 2209 { USB_DEVICE_WACOM(0x47) },
2210 { USB_DEVICE_WACOM(0xF4) }, 2210 { USB_DEVICE_WACOM(0xF4) },
2211 { USB_DEVICE_WACOM(0xF8) }, 2211 { USB_DEVICE_WACOM(0xF8) },
2212 { USB_DEVICE_WACOM(0xF6) }, 2212 { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },
2213 { USB_DEVICE_WACOM(0xFA) }, 2213 { USB_DEVICE_WACOM(0xFA) },
2214 { USB_DEVICE_LENOVO(0x6004) }, 2214 { USB_DEVICE_LENOVO(0x6004) },
2215 { } 2215 { }
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index a32e0d5aa45f..fc6aebf1e4b2 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -236,7 +236,8 @@ static int gic_retrigger(struct irq_data *d)
236 if (gic_arch_extn.irq_retrigger) 236 if (gic_arch_extn.irq_retrigger)
237 return gic_arch_extn.irq_retrigger(d); 237 return gic_arch_extn.irq_retrigger(d);
238 238
239 return -ENXIO; 239 /* the genirq layer expects 0 if we can't retrigger in hardware */
240 return 0;
240} 241}
241 242
242#ifdef CONFIG_SMP 243#ifdef CONFIG_SMP
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 7e469260fe5e..9a0bdad9ad8f 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -611,6 +611,7 @@ static void dec_pending(struct dm_io *io, int error)
611 queue_io(md, bio); 611 queue_io(md, bio);
612 } else { 612 } else {
613 /* done with normal IO or empty flush */ 613 /* done with normal IO or empty flush */
614 trace_block_bio_complete(md->queue, bio, io_error);
614 bio_endio(bio, io_error); 615 bio_endio(bio, io_error);
615 } 616 }
616 } 617 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 24909eb13fec..f4e87bfc7567 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -184,6 +184,8 @@ static void return_io(struct bio *return_bi)
184 return_bi = bi->bi_next; 184 return_bi = bi->bi_next;
185 bi->bi_next = NULL; 185 bi->bi_next = NULL;
186 bi->bi_size = 0; 186 bi->bi_size = 0;
187 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
188 bi, 0);
187 bio_endio(bi, 0); 189 bio_endio(bi, 0);
188 bi = return_bi; 190 bi = return_bi;
189 } 191 }
@@ -3914,6 +3916,8 @@ static void raid5_align_endio(struct bio *bi, int error)
3914 rdev_dec_pending(rdev, conf->mddev); 3916 rdev_dec_pending(rdev, conf->mddev);
3915 3917
3916 if (!error && uptodate) { 3918 if (!error && uptodate) {
3919 trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
3920 raid_bi, 0);
3917 bio_endio(raid_bi, 0); 3921 bio_endio(raid_bi, 0);
3918 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3922 if (atomic_dec_and_test(&conf->active_aligned_reads))
3919 wake_up(&conf->wait_for_stripe); 3923 wake_up(&conf->wait_for_stripe);
@@ -4382,6 +4386,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4382 if ( rw == WRITE ) 4386 if ( rw == WRITE )
4383 md_write_end(mddev); 4387 md_write_end(mddev);
4384 4388
4389 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
4390 bi, 0);
4385 bio_endio(bi, 0); 4391 bio_endio(bi, 0);
4386 } 4392 }
4387} 4393}
@@ -4758,8 +4764,11 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
4758 handled++; 4764 handled++;
4759 } 4765 }
4760 remaining = raid5_dec_bi_active_stripes(raid_bio); 4766 remaining = raid5_dec_bi_active_stripes(raid_bio);
4761 if (remaining == 0) 4767 if (remaining == 0) {
4768 trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
4769 raid_bio, 0);
4762 bio_endio(raid_bio, 0); 4770 bio_endio(raid_bio, 0);
4771 }
4763 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4772 if (atomic_dec_and_test(&conf->active_aligned_reads))
4764 wake_up(&conf->wait_for_stripe); 4773 wake_up(&conf->wait_for_stripe);
4765 return handled; 4774 return handled;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 92ab30ab00dc..dc571ebc1aa0 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1123,33 +1123,6 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
1123} 1123}
1124#endif 1124#endif
1125 1125
1126static inline unsigned long get_vm_size(struct vm_area_struct *vma)
1127{
1128 return vma->vm_end - vma->vm_start;
1129}
1130
1131static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
1132{
1133 return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
1134}
1135
1136/*
1137 * Set a new vm offset.
1138 *
1139 * Verify that the incoming offset really works as a page offset,
1140 * and that the offset and size fit in a resource_size_t.
1141 */
1142static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
1143{
1144 pgoff_t pgoff = off >> PAGE_SHIFT;
1145 if (off != (resource_size_t) pgoff << PAGE_SHIFT)
1146 return -EINVAL;
1147 if (off + get_vm_size(vma) - 1 < off)
1148 return -EINVAL;
1149 vma->vm_pgoff = pgoff;
1150 return 0;
1151}
1152
1153/* 1126/*
1154 * set up a mapping for shared memory segments 1127 * set up a mapping for shared memory segments
1155 */ 1128 */
@@ -1159,45 +1132,17 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
1159 struct mtd_file_info *mfi = file->private_data; 1132 struct mtd_file_info *mfi = file->private_data;
1160 struct mtd_info *mtd = mfi->mtd; 1133 struct mtd_info *mtd = mfi->mtd;
1161 struct map_info *map = mtd->priv; 1134 struct map_info *map = mtd->priv;
1162 resource_size_t start, off;
1163 unsigned long len, vma_len;
1164 1135
1165 /* This is broken because it assumes the MTD device is map-based 1136 /* This is broken because it assumes the MTD device is map-based
1166 and that mtd->priv is a valid struct map_info. It should be 1137 and that mtd->priv is a valid struct map_info. It should be
1167 replaced with something that uses the mtd_get_unmapped_area() 1138 replaced with something that uses the mtd_get_unmapped_area()
1168 operation properly. */ 1139 operation properly. */
1169 if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { 1140 if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
1170 off = get_vm_offset(vma);
1171 start = map->phys;
1172 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
1173 start &= PAGE_MASK;
1174 vma_len = get_vm_size(vma);
1175
1176 /* Overflow in off+len? */
1177 if (vma_len + off < off)
1178 return -EINVAL;
1179 /* Does it fit in the mapping? */
1180 if (vma_len + off > len)
1181 return -EINVAL;
1182
1183 off += start;
1184 /* Did that overflow? */
1185 if (off < start)
1186 return -EINVAL;
1187 if (set_vm_offset(vma, off) < 0)
1188 return -EINVAL;
1189 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
1190
1191#ifdef pgprot_noncached 1141#ifdef pgprot_noncached
1192 if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) 1142 if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
1193 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1143 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1194#endif 1144#endif
1195 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, 1145 return vm_iomap_memory(vma, map->phys, map->size);
1196 vma->vm_end - vma->vm_start,
1197 vma->vm_page_prot))
1198 return -EAGAIN;
1199
1200 return 0;
1201 } 1146 }
1202 return -ENOSYS; 1147 return -ENOSYS;
1203#else 1148#else
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 07401a3e256b..dbbea0eec134 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -846,8 +846,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
846 if (bond->dev->flags & IFF_ALLMULTI) 846 if (bond->dev->flags & IFF_ALLMULTI)
847 dev_set_allmulti(old_active->dev, -1); 847 dev_set_allmulti(old_active->dev, -1);
848 848
849 netif_addr_lock_bh(bond->dev);
849 netdev_for_each_mc_addr(ha, bond->dev) 850 netdev_for_each_mc_addr(ha, bond->dev)
850 dev_mc_del(old_active->dev, ha->addr); 851 dev_mc_del(old_active->dev, ha->addr);
852 netif_addr_unlock_bh(bond->dev);
851 } 853 }
852 854
853 if (new_active) { 855 if (new_active) {
@@ -858,8 +860,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
858 if (bond->dev->flags & IFF_ALLMULTI) 860 if (bond->dev->flags & IFF_ALLMULTI)
859 dev_set_allmulti(new_active->dev, 1); 861 dev_set_allmulti(new_active->dev, 1);
860 862
863 netif_addr_lock_bh(bond->dev);
861 netdev_for_each_mc_addr(ha, bond->dev) 864 netdev_for_each_mc_addr(ha, bond->dev)
862 dev_mc_add(new_active->dev, ha->addr); 865 dev_mc_add(new_active->dev, ha->addr);
866 netif_addr_unlock_bh(bond->dev);
863 } 867 }
864} 868}
865 869
@@ -1901,11 +1905,29 @@ err_dest_symlinks:
1901 bond_destroy_slave_symlinks(bond_dev, slave_dev); 1905 bond_destroy_slave_symlinks(bond_dev, slave_dev);
1902 1906
1903err_detach: 1907err_detach:
1908 if (!USES_PRIMARY(bond->params.mode)) {
1909 netif_addr_lock_bh(bond_dev);
1910 bond_mc_list_flush(bond_dev, slave_dev);
1911 netif_addr_unlock_bh(bond_dev);
1912 }
1913 bond_del_vlans_from_slave(bond, slave_dev);
1904 write_lock_bh(&bond->lock); 1914 write_lock_bh(&bond->lock);
1905 bond_detach_slave(bond, new_slave); 1915 bond_detach_slave(bond, new_slave);
1916 if (bond->primary_slave == new_slave)
1917 bond->primary_slave = NULL;
1906 write_unlock_bh(&bond->lock); 1918 write_unlock_bh(&bond->lock);
1919 if (bond->curr_active_slave == new_slave) {
1920 read_lock(&bond->lock);
1921 write_lock_bh(&bond->curr_slave_lock);
1922 bond_change_active_slave(bond, NULL);
1923 bond_select_active_slave(bond);
1924 write_unlock_bh(&bond->curr_slave_lock);
1925 read_unlock(&bond->lock);
1926 }
1927 slave_disable_netpoll(new_slave);
1907 1928
1908err_close: 1929err_close:
1930 slave_dev->priv_flags &= ~IFF_BONDING;
1909 dev_close(slave_dev); 1931 dev_close(slave_dev);
1910 1932
1911err_unset_master: 1933err_unset_master:
@@ -3168,11 +3190,20 @@ static int bond_slave_netdev_event(unsigned long event,
3168 struct net_device *slave_dev) 3190 struct net_device *slave_dev)
3169{ 3191{
3170 struct slave *slave = bond_slave_get_rtnl(slave_dev); 3192 struct slave *slave = bond_slave_get_rtnl(slave_dev);
3171 struct bonding *bond = slave->bond; 3193 struct bonding *bond;
3172 struct net_device *bond_dev = slave->bond->dev; 3194 struct net_device *bond_dev;
3173 u32 old_speed; 3195 u32 old_speed;
3174 u8 old_duplex; 3196 u8 old_duplex;
3175 3197
3198 /* A netdev event can be generated while enslaving a device
3199 * before netdev_rx_handler_register is called in which case
3200 * slave will be NULL
3201 */
3202 if (!slave)
3203 return NOTIFY_DONE;
3204 bond_dev = slave->bond->dev;
3205 bond = slave->bond;
3206
3176 switch (event) { 3207 switch (event) {
3177 case NETDEV_UNREGISTER: 3208 case NETDEV_UNREGISTER:
3178 if (bond->setup_by_slave) 3209 if (bond->setup_by_slave)
@@ -3286,20 +3317,22 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
3286 */ 3317 */
3287static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) 3318static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
3288{ 3319{
3289 struct ethhdr *data = (struct ethhdr *)skb->data; 3320 const struct ethhdr *data;
3290 struct iphdr *iph; 3321 const struct iphdr *iph;
3291 struct ipv6hdr *ipv6h; 3322 const struct ipv6hdr *ipv6h;
3292 u32 v6hash; 3323 u32 v6hash;
3293 __be32 *s, *d; 3324 const __be32 *s, *d;
3294 3325
3295 if (skb->protocol == htons(ETH_P_IP) && 3326 if (skb->protocol == htons(ETH_P_IP) &&
3296 skb_network_header_len(skb) >= sizeof(*iph)) { 3327 pskb_network_may_pull(skb, sizeof(*iph))) {
3297 iph = ip_hdr(skb); 3328 iph = ip_hdr(skb);
3329 data = (struct ethhdr *)skb->data;
3298 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ 3330 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
3299 (data->h_dest[5] ^ data->h_source[5])) % count; 3331 (data->h_dest[5] ^ data->h_source[5])) % count;
3300 } else if (skb->protocol == htons(ETH_P_IPV6) && 3332 } else if (skb->protocol == htons(ETH_P_IPV6) &&
3301 skb_network_header_len(skb) >= sizeof(*ipv6h)) { 3333 pskb_network_may_pull(skb, sizeof(*ipv6h))) {
3302 ipv6h = ipv6_hdr(skb); 3334 ipv6h = ipv6_hdr(skb);
3335 data = (struct ethhdr *)skb->data;
3303 s = &ipv6h->saddr.s6_addr32[0]; 3336 s = &ipv6h->saddr.s6_addr32[0];
3304 d = &ipv6h->daddr.s6_addr32[0]; 3337 d = &ipv6h->daddr.s6_addr32[0];
3305 v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); 3338 v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
@@ -3318,33 +3351,36 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
3318static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) 3351static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
3319{ 3352{
3320 u32 layer4_xor = 0; 3353 u32 layer4_xor = 0;
3321 struct iphdr *iph; 3354 const struct iphdr *iph;
3322 struct ipv6hdr *ipv6h; 3355 const struct ipv6hdr *ipv6h;
3323 __be32 *s, *d; 3356 const __be32 *s, *d;
3324 __be16 *layer4hdr; 3357 const __be16 *l4 = NULL;
3358 __be16 _l4[2];
3359 int noff = skb_network_offset(skb);
3360 int poff;
3325 3361
3326 if (skb->protocol == htons(ETH_P_IP) && 3362 if (skb->protocol == htons(ETH_P_IP) &&
3327 skb_network_header_len(skb) >= sizeof(*iph)) { 3363 pskb_may_pull(skb, noff + sizeof(*iph))) {
3328 iph = ip_hdr(skb); 3364 iph = ip_hdr(skb);
3329 if (!ip_is_fragment(iph) && 3365 poff = proto_ports_offset(iph->protocol);
3330 (iph->protocol == IPPROTO_TCP || 3366
3331 iph->protocol == IPPROTO_UDP) && 3367 if (!ip_is_fragment(iph) && poff >= 0) {
3332 (skb_headlen(skb) - skb_network_offset(skb) >= 3368 l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
3333 iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) { 3369 sizeof(_l4), &_l4);
3334 layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); 3370 if (l4)
3335 layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); 3371 layer4_xor = ntohs(l4[0] ^ l4[1]);
3336 } 3372 }
3337 return (layer4_xor ^ 3373 return (layer4_xor ^
3338 ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; 3374 ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
3339 } else if (skb->protocol == htons(ETH_P_IPV6) && 3375 } else if (skb->protocol == htons(ETH_P_IPV6) &&
3340 skb_network_header_len(skb) >= sizeof(*ipv6h)) { 3376 pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
3341 ipv6h = ipv6_hdr(skb); 3377 ipv6h = ipv6_hdr(skb);
3342 if ((ipv6h->nexthdr == IPPROTO_TCP || 3378 poff = proto_ports_offset(ipv6h->nexthdr);
3343 ipv6h->nexthdr == IPPROTO_UDP) && 3379 if (poff >= 0) {
3344 (skb_headlen(skb) - skb_network_offset(skb) >= 3380 l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
3345 sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) { 3381 sizeof(_l4), &_l4);
3346 layer4hdr = (__be16 *)(ipv6h + 1); 3382 if (l4)
3347 layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); 3383 layer4_xor = ntohs(l4[0] ^ l4[1]);
3348 } 3384 }
3349 s = &ipv6h->saddr.s6_addr32[0]; 3385 s = &ipv6h->saddr.s6_addr32[0];
3350 d = &ipv6h->daddr.s6_addr32[0]; 3386 d = &ipv6h->daddr.s6_addr32[0];
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index f32b9fc6a983..9aa0c64c33c8 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -929,6 +929,7 @@ static int mcp251x_open(struct net_device *net)
929 struct mcp251x_priv *priv = netdev_priv(net); 929 struct mcp251x_priv *priv = netdev_priv(net);
930 struct spi_device *spi = priv->spi; 930 struct spi_device *spi = priv->spi;
931 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 931 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
932 unsigned long flags;
932 int ret; 933 int ret;
933 934
934 ret = open_candev(net); 935 ret = open_candev(net);
@@ -945,9 +946,14 @@ static int mcp251x_open(struct net_device *net)
945 priv->tx_skb = NULL; 946 priv->tx_skb = NULL;
946 priv->tx_len = 0; 947 priv->tx_len = 0;
947 948
949 flags = IRQF_ONESHOT;
950 if (pdata->irq_flags)
951 flags |= pdata->irq_flags;
952 else
953 flags |= IRQF_TRIGGER_FALLING;
954
948 ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, 955 ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
949 pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING, 956 flags, DEVICE_NAME, priv);
950 DEVICE_NAME, priv);
951 if (ret) { 957 if (ret) {
952 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); 958 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
953 if (pdata->transceiver_enable) 959 if (pdata->transceiver_enable)
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 6433b81256cd..8e0c4a001939 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -96,8 +96,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
96 struct net_device *dev; 96 struct net_device *dev;
97 struct sja1000_priv *priv; 97 struct sja1000_priv *priv;
98 struct resource res; 98 struct resource res;
99 const u32 *prop; 99 u32 prop;
100 int err, irq, res_size, prop_size; 100 int err, irq, res_size;
101 void __iomem *base; 101 void __iomem *base;
102 102
103 err = of_address_to_resource(np, 0, &res); 103 err = of_address_to_resource(np, 0, &res);
@@ -138,27 +138,27 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
138 priv->read_reg = sja1000_ofp_read_reg; 138 priv->read_reg = sja1000_ofp_read_reg;
139 priv->write_reg = sja1000_ofp_write_reg; 139 priv->write_reg = sja1000_ofp_write_reg;
140 140
141 prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size); 141 err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
142 if (prop && (prop_size == sizeof(u32))) 142 if (!err)
143 priv->can.clock.freq = *prop / 2; 143 priv->can.clock.freq = prop / 2;
144 else 144 else
145 priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */ 145 priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
146 146
147 prop = of_get_property(np, "nxp,tx-output-mode", &prop_size); 147 err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
148 if (prop && (prop_size == sizeof(u32))) 148 if (!err)
149 priv->ocr |= *prop & OCR_MODE_MASK; 149 priv->ocr |= prop & OCR_MODE_MASK;
150 else 150 else
151 priv->ocr |= OCR_MODE_NORMAL; /* default */ 151 priv->ocr |= OCR_MODE_NORMAL; /* default */
152 152
153 prop = of_get_property(np, "nxp,tx-output-config", &prop_size); 153 err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
154 if (prop && (prop_size == sizeof(u32))) 154 if (!err)
155 priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK; 155 priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
156 else 156 else
157 priv->ocr |= OCR_TX0_PULLDOWN; /* default */ 157 priv->ocr |= OCR_TX0_PULLDOWN; /* default */
158 158
159 prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size); 159 err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
160 if (prop && (prop_size == sizeof(u32)) && *prop) { 160 if (!err && prop) {
161 u32 divider = priv->can.clock.freq * 2 / *prop; 161 u32 divider = priv->can.clock.freq * 2 / prop;
162 162
163 if (divider > 1) 163 if (divider > 1)
164 priv->cdr |= divider / 2 - 1; 164 priv->cdr |= divider / 2 - 1;
@@ -168,8 +168,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
168 priv->cdr |= CDR_CLK_OFF; /* default */ 168 priv->cdr |= CDR_CLK_OFF; /* default */
169 } 169 }
170 170
171 prop = of_get_property(np, "nxp,no-comparator-bypass", NULL); 171 if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
172 if (!prop)
173 priv->cdr |= CDR_CBP; /* default */ 172 priv->cdr |= CDR_CBP; /* default */
174 173
175 priv->irq_flags = IRQF_SHARED; 174 priv->irq_flags = IRQF_SHARED;
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index cab306a9888e..e1d26433d619 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev)
828 struct ei_device *ei_local; 828 struct ei_device *ei_local;
829 struct ax_device *ax; 829 struct ax_device *ax;
830 struct resource *irq, *mem, *mem2; 830 struct resource *irq, *mem, *mem2;
831 resource_size_t mem_size, mem2_size = 0; 831 unsigned long mem_size, mem2_size = 0;
832 int ret = 0; 832 int ret = 0;
833 833
834 dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); 834 dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4046f97378c2..57619dd4a92b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2614,6 +2614,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2614 } 2614 }
2615 } 2615 }
2616 2616
2617 /* initialize FW coalescing state machines in RAM */
2618 bnx2x_update_coalesce(bp);
2619
2617 /* setup the leading queue */ 2620 /* setup the leading queue */
2618 rc = bnx2x_setup_leading(bp); 2621 rc = bnx2x_setup_leading(bp);
2619 if (rc) { 2622 if (rc) {
@@ -4580,11 +4583,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4580 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 4583 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4581 u32 addr = BAR_CSTRORM_INTMEM + 4584 u32 addr = BAR_CSTRORM_INTMEM +
4582 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); 4585 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4583 u16 flags = REG_RD16(bp, addr); 4586 u8 flags = REG_RD8(bp, addr);
4584 /* clear and set */ 4587 /* clear and set */
4585 flags &= ~HC_INDEX_DATA_HC_ENABLED; 4588 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4586 flags |= enable_flag; 4589 flags |= enable_flag;
4587 REG_WR16(bp, addr, flags); 4590 REG_WR8(bp, addr, flags);
4588 DP(NETIF_MSG_IFUP, 4591 DP(NETIF_MSG_IFUP,
4589 "port %x fw_sb_id %d sb_index %d disable %d\n", 4592 "port %x fw_sb_id %d sb_index %d disable %d\n",
4590 port, fw_sb_id, sb_index, disable); 4593 port, fw_sb_id, sb_index, disable);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 8e58da909f5c..c50696b396f1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9878,6 +9878,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
9878 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); 9878 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
9879 } 9879 }
9880 } 9880 }
9881 if (!CHIP_IS_E1x(bp))
9882 /* block FW from writing to host */
9883 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
9884
9881 /* wait until BRB is empty */ 9885 /* wait until BRB is empty */
9882 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 9886 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
9883 while (timer_count) { 9887 while (timer_count) {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 08e54f3d288b..2886c9b63f90 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -759,8 +759,9 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
759 759
760 if (vlan_tx_tag_present(skb)) { 760 if (vlan_tx_tag_present(skb)) {
761 vlan_tag = be_get_tx_vlan_tag(adapter, skb); 761 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
762 __vlan_put_tag(skb, vlan_tag); 762 skb = __vlan_put_tag(skb, vlan_tag);
763 skb->vlan_tci = 0; 763 if (skb)
764 skb->vlan_tci = 0;
764 } 765 }
765 766
766 return skb; 767 return skb;
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index f292c3aa423f..73195f643c9c 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -1002,6 +1002,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
1002 } else { 1002 } else {
1003 if (fep->link) { 1003 if (fep->link) {
1004 fec_stop(ndev); 1004 fec_stop(ndev);
1005 fep->link = phy_dev->link;
1005 status_change = 1; 1006 status_change = 1;
1006 } 1007 }
1007 } 1008 }
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 25151401c2ab..ab577a763a20 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -284,18 +284,10 @@ struct igb_q_vector {
284enum e1000_ring_flags_t { 284enum e1000_ring_flags_t {
285 IGB_RING_FLAG_RX_SCTP_CSUM, 285 IGB_RING_FLAG_RX_SCTP_CSUM,
286 IGB_RING_FLAG_RX_LB_VLAN_BSWAP, 286 IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
287 IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
288 IGB_RING_FLAG_TX_CTX_IDX, 287 IGB_RING_FLAG_TX_CTX_IDX,
289 IGB_RING_FLAG_TX_DETECT_HANG 288 IGB_RING_FLAG_TX_DETECT_HANG
290}; 289};
291 290
292#define ring_uses_build_skb(ring) \
293 test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
294#define set_ring_build_skb_enabled(ring) \
295 set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
296#define clear_ring_build_skb_enabled(ring) \
297 clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
298
299#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) 291#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
300 292
301#define IGB_RX_DESC(R, i) \ 293#define IGB_RX_DESC(R, i) \
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8496adfc6a68..64f75291e3a5 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
3350 wr32(E1000_RXDCTL(reg_idx), rxdctl); 3350 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3351} 3351}
3352 3352
3353static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
3354 struct igb_ring *rx_ring)
3355{
3356#define IGB_MAX_BUILD_SKB_SIZE \
3357 (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
3358 (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
3359
3360 /* set build_skb flag */
3361 if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
3362 set_ring_build_skb_enabled(rx_ring);
3363 else
3364 clear_ring_build_skb_enabled(rx_ring);
3365}
3366
3367/** 3353/**
3368 * igb_configure_rx - Configure receive Unit after Reset 3354 * igb_configure_rx - Configure receive Unit after Reset
3369 * @adapter: board private structure 3355 * @adapter: board private structure
@@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter)
3383 3369
3384 /* Setup the HW Rx Head and Tail Descriptor Pointers and 3370 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3385 * the Base and Length of the Rx Descriptor Ring */ 3371 * the Base and Length of the Rx Descriptor Ring */
3386 for (i = 0; i < adapter->num_rx_queues; i++) { 3372 for (i = 0; i < adapter->num_rx_queues; i++)
3387 struct igb_ring *rx_ring = adapter->rx_ring[i]; 3373 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
3388 igb_set_rx_buffer_len(adapter, rx_ring);
3389 igb_configure_rx_ring(adapter, rx_ring);
3390 }
3391} 3374}
3392 3375
3393/** 3376/**
@@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6203 return igb_can_reuse_rx_page(rx_buffer, page, truesize); 6186 return igb_can_reuse_rx_page(rx_buffer, page, truesize);
6204} 6187}
6205 6188
6206static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
6207 union e1000_adv_rx_desc *rx_desc)
6208{
6209 struct igb_rx_buffer *rx_buffer;
6210 struct sk_buff *skb;
6211 struct page *page;
6212 void *page_addr;
6213 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
6214#if (PAGE_SIZE < 8192)
6215 unsigned int truesize = IGB_RX_BUFSZ;
6216#else
6217 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
6218 SKB_DATA_ALIGN(NET_SKB_PAD +
6219 NET_IP_ALIGN +
6220 size);
6221#endif
6222
6223 /* If we spanned a buffer we have a huge mess so test for it */
6224 BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
6225
6226 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
6227 page = rx_buffer->page;
6228 prefetchw(page);
6229
6230 page_addr = page_address(page) + rx_buffer->page_offset;
6231
6232 /* prefetch first cache line of first page */
6233 prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
6234#if L1_CACHE_BYTES < 128
6235 prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
6236#endif
6237
6238 /* build an skb to around the page buffer */
6239 skb = build_skb(page_addr, truesize);
6240 if (unlikely(!skb)) {
6241 rx_ring->rx_stats.alloc_failed++;
6242 return NULL;
6243 }
6244
6245 /* we are reusing so sync this buffer for CPU use */
6246 dma_sync_single_range_for_cpu(rx_ring->dev,
6247 rx_buffer->dma,
6248 rx_buffer->page_offset,
6249 IGB_RX_BUFSZ,
6250 DMA_FROM_DEVICE);
6251
6252 /* update pointers within the skb to store the data */
6253 skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
6254 __skb_put(skb, size);
6255
6256 /* pull timestamp out of packet data */
6257 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
6258 igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
6259 __skb_pull(skb, IGB_TS_HDR_LEN);
6260 }
6261
6262 if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
6263 /* hand second half of page back to the ring */
6264 igb_reuse_rx_page(rx_ring, rx_buffer);
6265 } else {
6266 /* we are not reusing the buffer so unmap it */
6267 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
6268 PAGE_SIZE, DMA_FROM_DEVICE);
6269 }
6270
6271 /* clear contents of buffer_info */
6272 rx_buffer->dma = 0;
6273 rx_buffer->page = NULL;
6274
6275 return skb;
6276}
6277
6278static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, 6189static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6279 union e1000_adv_rx_desc *rx_desc, 6190 union e1000_adv_rx_desc *rx_desc,
6280 struct sk_buff *skb) 6191 struct sk_buff *skb)
@@ -6690,10 +6601,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
6690 rmb(); 6601 rmb();
6691 6602
6692 /* retrieve a buffer from the ring */ 6603 /* retrieve a buffer from the ring */
6693 if (ring_uses_build_skb(rx_ring)) 6604 skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
6694 skb = igb_build_rx_buffer(rx_ring, rx_desc);
6695 else
6696 skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
6697 6605
6698 /* exit if we failed to retrieve a buffer */ 6606 /* exit if we failed to retrieve a buffer */
6699 if (!skb) 6607 if (!skb)
@@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
6780 return true; 6688 return true;
6781} 6689}
6782 6690
6783static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
6784{
6785 if (ring_uses_build_skb(rx_ring))
6786 return NET_SKB_PAD + NET_IP_ALIGN;
6787 else
6788 return 0;
6789}
6790
6791/** 6691/**
6792 * igb_alloc_rx_buffers - Replace used receive buffers; packet split 6692 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
6793 * @adapter: address of board private structure 6693 * @adapter: address of board private structure
@@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
6814 * Refresh the desc even if buffer_addrs didn't change 6714 * Refresh the desc even if buffer_addrs didn't change
6815 * because each write-back erases this info. 6715 * because each write-back erases this info.
6816 */ 6716 */
6817 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + 6717 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
6818 bi->page_offset +
6819 igb_rx_offset(rx_ring));
6820 6718
6821 rx_desc++; 6719 rx_desc++;
6822 bi++; 6720 bi++;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index d44b4d21268c..97e33669c0b9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1049,6 +1049,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1049 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) 1049 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
1050 return -EINVAL; 1050 return -EINVAL;
1051 if (vlan || qos) { 1051 if (vlan || qos) {
1052 if (adapter->vfinfo[vf].pf_vlan)
1053 err = ixgbe_set_vf_vlan(adapter, false,
1054 adapter->vfinfo[vf].pf_vlan,
1055 vf);
1056 if (err)
1057 goto out;
1052 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); 1058 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
1053 if (err) 1059 if (err)
1054 goto out; 1060 goto out;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index edfba9370922..434e33c527df 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -33,6 +33,7 @@ config MV643XX_ETH
33 33
34config MVMDIO 34config MVMDIO
35 tristate "Marvell MDIO interface support" 35 tristate "Marvell MDIO interface support"
36 select PHYLIB
36 ---help--- 37 ---help---
37 This driver supports the MDIO interface found in the network 38 This driver supports the MDIO interface found in the network
38 interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, 39 interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
@@ -45,7 +46,6 @@ config MVMDIO
45config MVNETA 46config MVNETA
46 tristate "Marvell Armada 370/XP network interface support" 47 tristate "Marvell Armada 370/XP network interface support"
47 depends on MACH_ARMADA_370_XP 48 depends on MACH_ARMADA_370_XP
48 select PHYLIB
49 select MVMDIO 49 select MVMDIO
50 ---help--- 50 ---help---
51 This driver supports the network interface units in the 51 This driver supports the network interface units in the
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 1e628ce57201..a47a097c21e1 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -374,7 +374,6 @@ static int rxq_number = 8;
374static int txq_number = 8; 374static int txq_number = 8;
375 375
376static int rxq_def; 376static int rxq_def;
377static int txq_def;
378 377
379#define MVNETA_DRIVER_NAME "mvneta" 378#define MVNETA_DRIVER_NAME "mvneta"
380#define MVNETA_DRIVER_VERSION "1.0" 379#define MVNETA_DRIVER_VERSION "1.0"
@@ -1475,7 +1474,8 @@ error:
1475static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) 1474static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1476{ 1475{
1477 struct mvneta_port *pp = netdev_priv(dev); 1476 struct mvneta_port *pp = netdev_priv(dev);
1478 struct mvneta_tx_queue *txq = &pp->txqs[txq_def]; 1477 u16 txq_id = skb_get_queue_mapping(skb);
1478 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
1479 struct mvneta_tx_desc *tx_desc; 1479 struct mvneta_tx_desc *tx_desc;
1480 struct netdev_queue *nq; 1480 struct netdev_queue *nq;
1481 int frags = 0; 1481 int frags = 0;
@@ -1485,7 +1485,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1485 goto out; 1485 goto out;
1486 1486
1487 frags = skb_shinfo(skb)->nr_frags + 1; 1487 frags = skb_shinfo(skb)->nr_frags + 1;
1488 nq = netdev_get_tx_queue(dev, txq_def); 1488 nq = netdev_get_tx_queue(dev, txq_id);
1489 1489
1490 /* Get a descriptor for the first part of the packet */ 1490 /* Get a descriptor for the first part of the packet */
1491 tx_desc = mvneta_txq_next_desc_get(txq); 1491 tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2689,7 +2689,7 @@ static int mvneta_probe(struct platform_device *pdev)
2689 return -EINVAL; 2689 return -EINVAL;
2690 } 2690 }
2691 2691
2692 dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8); 2692 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
2693 if (!dev) 2693 if (!dev)
2694 return -ENOMEM; 2694 return -ENOMEM;
2695 2695
@@ -2844,4 +2844,3 @@ module_param(rxq_number, int, S_IRUGO);
2844module_param(txq_number, int, S_IRUGO); 2844module_param(txq_number, int, S_IRUGO);
2845 2845
2846module_param(rxq_def, int, S_IRUGO); 2846module_param(rxq_def, int, S_IRUGO);
2847module_param(txq_def, int, S_IRUGO);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index cd5ae8813cb3..edd63f1230f3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -1500,6 +1500,12 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1500 } 1500 }
1501 } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); 1501 } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
1502 1502
1503 /* Make sure carrier is off and queue is stopped during loopback */
1504 if (netif_running(netdev)) {
1505 netif_carrier_off(netdev);
1506 netif_stop_queue(netdev);
1507 }
1508
1503 ret = qlcnic_do_lb_test(adapter, mode); 1509 ret = qlcnic_do_lb_test(adapter, mode);
1504 1510
1505 qlcnic_83xx_clear_lb_mode(adapter, mode); 1511 qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -2780,6 +2786,7 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
2780void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) 2786void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
2781{ 2787{
2782 struct qlcnic_cmd_args cmd; 2788 struct qlcnic_cmd_args cmd;
2789 struct net_device *netdev = adapter->netdev;
2783 int ret = 0; 2790 int ret = 0;
2784 2791
2785 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); 2792 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
@@ -2789,7 +2796,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
2789 data = qlcnic_83xx_fill_stats(adapter, &cmd, data, 2796 data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
2790 QLC_83XX_STAT_TX, &ret); 2797 QLC_83XX_STAT_TX, &ret);
2791 if (ret) { 2798 if (ret) {
2792 dev_info(&adapter->pdev->dev, "Error getting MAC stats\n"); 2799 netdev_err(netdev, "Error getting Tx stats\n");
2793 goto out; 2800 goto out;
2794 } 2801 }
2795 /* Get MAC stats */ 2802 /* Get MAC stats */
@@ -2799,8 +2806,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
2799 data = qlcnic_83xx_fill_stats(adapter, &cmd, data, 2806 data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
2800 QLC_83XX_STAT_MAC, &ret); 2807 QLC_83XX_STAT_MAC, &ret);
2801 if (ret) { 2808 if (ret) {
2802 dev_info(&adapter->pdev->dev, 2809 netdev_err(netdev, "Error getting MAC stats\n");
2803 "Error getting Rx stats\n");
2804 goto out; 2810 goto out;
2805 } 2811 }
2806 /* Get Rx stats */ 2812 /* Get Rx stats */
@@ -2810,8 +2816,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
2810 data = qlcnic_83xx_fill_stats(adapter, &cmd, data, 2816 data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
2811 QLC_83XX_STAT_RX, &ret); 2817 QLC_83XX_STAT_RX, &ret);
2812 if (ret) 2818 if (ret)
2813 dev_info(&adapter->pdev->dev, 2819 netdev_err(netdev, "Error getting Rx stats\n");
2814 "Error getting Tx stats\n");
2815out: 2820out:
2816 qlcnic_free_mbx_args(&cmd); 2821 qlcnic_free_mbx_args(&cmd);
2817} 2822}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 0e630061bff3..5fa847fe388a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -358,8 +358,7 @@ set_flags:
358 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); 358 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
359 } 359 }
360 opcode = TX_ETHER_PKT; 360 opcode = TX_ETHER_PKT;
361 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && 361 if (skb_is_gso(skb)) {
362 skb_shinfo(skb)->gso_size > 0) {
363 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 362 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
364 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 363 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
365 first_desc->total_hdr_length = hdr_len; 364 first_desc->total_hdr_length = hdr_len;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 987fb6f8adc3..5ef328af61d0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -200,10 +200,10 @@ beacon_err:
200 } 200 }
201 201
202 err = qlcnic_config_led(adapter, b_state, b_rate); 202 err = qlcnic_config_led(adapter, b_state, b_rate);
203 if (!err) 203 if (!err) {
204 err = len; 204 err = len;
205 else
206 ahw->beacon_state = b_state; 205 ahw->beacon_state = b_state;
206 }
207 207
208 if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) 208 if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
209 qlcnic_diag_free_res(adapter->netdev, max_sds_rings); 209 qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index a131d7b5d2fe..7e8d68263963 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
18 */ 18 */
19#define DRV_NAME "qlge" 19#define DRV_NAME "qlge"
20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 20#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
21#define DRV_VERSION "v1.00.00.31" 21#define DRV_VERSION "v1.00.00.32"
22 22
23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 23#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
24 24
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 6f316ab23257..0780e039b271 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -379,13 +379,13 @@ static int ql_get_settings(struct net_device *ndev,
379 379
380 ecmd->supported = SUPPORTED_10000baseT_Full; 380 ecmd->supported = SUPPORTED_10000baseT_Full;
381 ecmd->advertising = ADVERTISED_10000baseT_Full; 381 ecmd->advertising = ADVERTISED_10000baseT_Full;
382 ecmd->autoneg = AUTONEG_ENABLE;
383 ecmd->transceiver = XCVR_EXTERNAL; 382 ecmd->transceiver = XCVR_EXTERNAL;
384 if ((qdev->link_status & STS_LINK_TYPE_MASK) == 383 if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
385 STS_LINK_TYPE_10GBASET) { 384 STS_LINK_TYPE_10GBASET) {
386 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); 385 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
387 ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); 386 ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
388 ecmd->port = PORT_TP; 387 ecmd->port = PORT_TP;
388 ecmd->autoneg = AUTONEG_ENABLE;
389 } else { 389 } else {
390 ecmd->supported |= SUPPORTED_FIBRE; 390 ecmd->supported |= SUPPORTED_FIBRE;
391 ecmd->advertising |= ADVERTISED_FIBRE; 391 ecmd->advertising |= ADVERTISED_FIBRE;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b13ab544a7eb..8033555e53c2 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1434,11 +1434,13 @@ map_error:
1434} 1434}
1435 1435
1436/* Categorizing receive firmware frame errors */ 1436/* Categorizing receive firmware frame errors */
1437static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err) 1437static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438 struct rx_ring *rx_ring)
1438{ 1439{
1439 struct nic_stats *stats = &qdev->nic_stats; 1440 struct nic_stats *stats = &qdev->nic_stats;
1440 1441
1441 stats->rx_err_count++; 1442 stats->rx_err_count++;
1443 rx_ring->rx_errors++;
1442 1444
1443 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) { 1445 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1444 case IB_MAC_IOCB_RSP_ERR_CODE_ERR: 1446 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
@@ -1474,6 +1476,12 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1474 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); 1476 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1475 struct napi_struct *napi = &rx_ring->napi; 1477 struct napi_struct *napi = &rx_ring->napi;
1476 1478
1479 /* Frame error, so drop the packet. */
1480 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1481 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1482 put_page(lbq_desc->p.pg_chunk.page);
1483 return;
1484 }
1477 napi->dev = qdev->ndev; 1485 napi->dev = qdev->ndev;
1478 1486
1479 skb = napi_get_frags(napi); 1487 skb = napi_get_frags(napi);
@@ -1529,6 +1537,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1529 addr = lbq_desc->p.pg_chunk.va; 1537 addr = lbq_desc->p.pg_chunk.va;
1530 prefetch(addr); 1538 prefetch(addr);
1531 1539
1540 /* Frame error, so drop the packet. */
1541 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1542 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1543 goto err_out;
1544 }
1545
1532 /* The max framesize filter on this chip is set higher than 1546 /* The max framesize filter on this chip is set higher than
1533 * MTU since FCoE uses 2k frames. 1547 * MTU since FCoE uses 2k frames.
1534 */ 1548 */
@@ -1614,6 +1628,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1614 memcpy(skb_put(new_skb, length), skb->data, length); 1628 memcpy(skb_put(new_skb, length), skb->data, length);
1615 skb = new_skb; 1629 skb = new_skb;
1616 1630
1631 /* Frame error, so drop the packet. */
1632 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1633 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1634 dev_kfree_skb_any(skb);
1635 return;
1636 }
1637
1617 /* loopback self test for ethtool */ 1638 /* loopback self test for ethtool */
1618 if (test_bit(QL_SELFTEST, &qdev->flags)) { 1639 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1619 ql_check_lb_frame(qdev, skb); 1640 ql_check_lb_frame(qdev, skb);
@@ -1919,6 +1940,13 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1919 return; 1940 return;
1920 } 1941 }
1921 1942
1943 /* Frame error, so drop the packet. */
1944 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1945 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1946 dev_kfree_skb_any(skb);
1947 return;
1948 }
1949
1922 /* The max framesize filter on this chip is set higher than 1950 /* The max framesize filter on this chip is set higher than
1923 * MTU since FCoE uses 2k frames. 1951 * MTU since FCoE uses 2k frames.
1924 */ 1952 */
@@ -2000,12 +2028,6 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2000 2028
2001 QL_DUMP_IB_MAC_RSP(ib_mac_rsp); 2029 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2002 2030
2003 /* Frame error, so drop the packet. */
2004 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
2005 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
2006 return (unsigned long)length;
2007 }
2008
2009 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { 2031 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2010 /* The data and headers are split into 2032 /* The data and headers are split into
2011 * separate buffers. 2033 * separate buffers.
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 0c74a702d461..50617c5a0bdb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -149,6 +149,7 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
149{ 149{
150 writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); 150 writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
151 writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); 151 writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
152 writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK);
152} 153}
153 154
154/* This reads the MAC core counters (if actaully supported). 155/* This reads the MAC core counters (if actaully supported).
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 80cad06e5eb2..4781d3d8e182 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1380,7 +1380,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1380 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); 1380 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
1381 1381
1382 if (data->dual_emac) { 1382 if (data->dual_emac) {
1383 if (of_property_read_u32(node, "dual_emac_res_vlan", 1383 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
1384 &prop)) { 1384 &prop)) {
1385 pr_err("Missing dual_emac_res_vlan in DT.\n"); 1385 pr_err("Missing dual_emac_res_vlan in DT.\n");
1386 slave_data->dual_emac_res_vlan = i+1; 1386 slave_data->dual_emac_res_vlan = i+1;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b7c457adc0dc..729ed533bb33 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1594,7 +1594,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1594 1594
1595 if (tun->flags & TUN_TAP_MQ && 1595 if (tun->flags & TUN_TAP_MQ &&
1596 (tun->numqueues + tun->numdisabled > 1)) 1596 (tun->numqueues + tun->numdisabled > 1))
1597 return err; 1597 return -EBUSY;
1598 } 1598 }
1599 else { 1599 else {
1600 char *name; 1600 char *name;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 16c842997291..6bd91676d2cb 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -134,7 +134,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
134 goto error; 134 goto error;
135 135
136 if (skb) { 136 if (skb) {
137 if (skb->len <= sizeof(ETH_HLEN)) 137 if (skb->len <= ETH_HLEN)
138 goto error; 138 goto error;
139 139
140 /* mapping VLANs to MBIM sessions: 140 /* mapping VLANs to MBIM sessions:
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 968d5d50751d..2a3579f67910 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/ethtool.h> 15#include <linux/ethtool.h>
16#include <linux/etherdevice.h>
16#include <linux/mii.h> 17#include <linux/mii.h>
17#include <linux/usb.h> 18#include <linux/usb.h>
18#include <linux/usb/cdc.h> 19#include <linux/usb/cdc.h>
@@ -52,6 +53,96 @@ struct qmi_wwan_state {
52 struct usb_interface *data; 53 struct usb_interface *data;
53}; 54};
54 55
56/* default ethernet address used by the modem */
57static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
58
59/* Make up an ethernet header if the packet doesn't have one.
60 *
61 * A firmware bug common among several devices cause them to send raw
62 * IP packets under some circumstances. There is no way for the
63 * driver/host to know when this will happen. And even when the bug
64 * hits, some packets will still arrive with an intact header.
65 *
66 * The supported devices are only capably of sending IPv4, IPv6 and
67 * ARP packets on a point-to-point link. Any packet with an ethernet
68 * header will have either our address or a broadcast/multicast
69 * address as destination. ARP packets will always have a header.
70 *
71 * This means that this function will reliably add the appropriate
72 * header iff necessary, provided our hardware address does not start
73 * with 4 or 6.
74 *
75 * Another common firmware bug results in all packets being addressed
76 * to 00:a0:c6:00:00:00 despite the host address being different.
77 * This function will also fixup such packets.
78 */
79static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
80{
81 __be16 proto;
82
83 /* usbnet rx_complete guarantees that skb->len is at least
84 * hard_header_len, so we can inspect the dest address without
85 * checking skb->len
86 */
87 switch (skb->data[0] & 0xf0) {
88 case 0x40:
89 proto = htons(ETH_P_IP);
90 break;
91 case 0x60:
92 proto = htons(ETH_P_IPV6);
93 break;
94 case 0x00:
95 if (is_multicast_ether_addr(skb->data))
96 return 1;
97 /* possibly bogus destination - rewrite just in case */
98 skb_reset_mac_header(skb);
99 goto fix_dest;
100 default:
101 /* pass along other packets without modifications */
102 return 1;
103 }
104 if (skb_headroom(skb) < ETH_HLEN)
105 return 0;
106 skb_push(skb, ETH_HLEN);
107 skb_reset_mac_header(skb);
108 eth_hdr(skb)->h_proto = proto;
109 memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
110fix_dest:
111 memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
112 return 1;
113}
114
115/* very simplistic detection of IPv4 or IPv6 headers */
116static bool possibly_iphdr(const char *data)
117{
118 return (data[0] & 0xd0) == 0x40;
119}
120
121/* disallow addresses which may be confused with IP headers */
122static int qmi_wwan_mac_addr(struct net_device *dev, void *p)
123{
124 int ret;
125 struct sockaddr *addr = p;
126
127 ret = eth_prepare_mac_addr_change(dev, p);
128 if (ret < 0)
129 return ret;
130 if (possibly_iphdr(addr->sa_data))
131 return -EADDRNOTAVAIL;
132 eth_commit_mac_addr_change(dev, p);
133 return 0;
134}
135
136static const struct net_device_ops qmi_wwan_netdev_ops = {
137 .ndo_open = usbnet_open,
138 .ndo_stop = usbnet_stop,
139 .ndo_start_xmit = usbnet_start_xmit,
140 .ndo_tx_timeout = usbnet_tx_timeout,
141 .ndo_change_mtu = usbnet_change_mtu,
142 .ndo_set_mac_address = qmi_wwan_mac_addr,
143 .ndo_validate_addr = eth_validate_addr,
144};
145
55/* using a counter to merge subdriver requests with our own into a combined state */ 146/* using a counter to merge subdriver requests with our own into a combined state */
56static int qmi_wwan_manage_power(struct usbnet *dev, int on) 147static int qmi_wwan_manage_power(struct usbnet *dev, int on)
57{ 148{
@@ -229,6 +320,18 @@ next_desc:
229 usb_driver_release_interface(driver, info->data); 320 usb_driver_release_interface(driver, info->data);
230 } 321 }
231 322
323 /* Never use the same address on both ends of the link, even
324 * if the buggy firmware told us to.
325 */
326 if (!compare_ether_addr(dev->net->dev_addr, default_modem_addr))
327 eth_hw_addr_random(dev->net);
328
329 /* make MAC addr easily distinguishable from an IP header */
330 if (possibly_iphdr(dev->net->dev_addr)) {
331 dev->net->dev_addr[0] |= 0x02; /* set local assignment bit */
332 dev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */
333 }
334 dev->net->netdev_ops = &qmi_wwan_netdev_ops;
232err: 335err:
233 return status; 336 return status;
234} 337}
@@ -307,6 +410,7 @@ static const struct driver_info qmi_wwan_info = {
307 .bind = qmi_wwan_bind, 410 .bind = qmi_wwan_bind,
308 .unbind = qmi_wwan_unbind, 411 .unbind = qmi_wwan_unbind,
309 .manage_power = qmi_wwan_manage_power, 412 .manage_power = qmi_wwan_manage_power,
413 .rx_fixup = qmi_wwan_rx_fixup,
310}; 414};
311 415
312#define HUAWEI_VENDOR_ID 0x12D1 416#define HUAWEI_VENDOR_ID 0x12D1
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index 28fd99203f64..bdee2ed67219 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -519,7 +519,7 @@ static const u32 ar9580_1p0_mac_core[][2] = {
519 {0x00008258, 0x00000000}, 519 {0x00008258, 0x00000000},
520 {0x0000825c, 0x40000000}, 520 {0x0000825c, 0x40000000},
521 {0x00008260, 0x00080922}, 521 {0x00008260, 0x00080922},
522 {0x00008264, 0x9bc00010}, 522 {0x00008264, 0x9d400010},
523 {0x00008268, 0xffffffff}, 523 {0x00008268, 0xffffffff},
524 {0x0000826c, 0x0000ffff}, 524 {0x0000826c, 0x0000ffff},
525 {0x00008270, 0x00000000}, 525 {0x00008270, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
index 467b60014b7b..73fe8d6db566 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
@@ -143,14 +143,14 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
143 u32 sz, i; 143 u32 sz, i;
144 struct channel_detector *cd; 144 struct channel_detector *cd;
145 145
146 cd = kmalloc(sizeof(*cd), GFP_KERNEL); 146 cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
147 if (cd == NULL) 147 if (cd == NULL)
148 goto fail; 148 goto fail;
149 149
150 INIT_LIST_HEAD(&cd->head); 150 INIT_LIST_HEAD(&cd->head);
151 cd->freq = freq; 151 cd->freq = freq;
152 sz = sizeof(cd->detectors) * dpd->num_radar_types; 152 sz = sizeof(cd->detectors) * dpd->num_radar_types;
153 cd->detectors = kzalloc(sz, GFP_KERNEL); 153 cd->detectors = kzalloc(sz, GFP_ATOMIC);
154 if (cd->detectors == NULL) 154 if (cd->detectors == NULL)
155 goto fail; 155 goto fail;
156 156
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
index 91b8dceeadb1..5e48c5515b8c 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
@@ -218,7 +218,7 @@ static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
218{ 218{
219 struct pulse_elem *p = pool_get_pulse_elem(); 219 struct pulse_elem *p = pool_get_pulse_elem();
220 if (p == NULL) { 220 if (p == NULL) {
221 p = kmalloc(sizeof(*p), GFP_KERNEL); 221 p = kmalloc(sizeof(*p), GFP_ATOMIC);
222 if (p == NULL) { 222 if (p == NULL) {
223 DFS_POOL_STAT_INC(pulse_alloc_error); 223 DFS_POOL_STAT_INC(pulse_alloc_error);
224 return false; 224 return false;
@@ -299,7 +299,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde,
299 ps.deadline_ts = ps.first_ts + ps.dur; 299 ps.deadline_ts = ps.first_ts + ps.dur;
300 new_ps = pool_get_pseq_elem(); 300 new_ps = pool_get_pseq_elem();
301 if (new_ps == NULL) { 301 if (new_ps == NULL) {
302 new_ps = kmalloc(sizeof(*new_ps), GFP_KERNEL); 302 new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
303 if (new_ps == NULL) { 303 if (new_ps == NULL) {
304 DFS_POOL_STAT_INC(pseq_alloc_error); 304 DFS_POOL_STAT_INC(pseq_alloc_error);
305 return false; 305 return false;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 716058b67557..a47f5e05fc04 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -796,7 +796,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
796 * required version. 796 * required version.
797 */ 797 */
798 if (priv->fw_version_major != MAJOR_VERSION_REQ || 798 if (priv->fw_version_major != MAJOR_VERSION_REQ ||
799 priv->fw_version_minor != MINOR_VERSION_REQ) { 799 priv->fw_version_minor < MINOR_VERSION_REQ) {
800 dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n", 800 dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n",
801 MAJOR_VERSION_REQ, MINOR_VERSION_REQ); 801 MAJOR_VERSION_REQ, MINOR_VERSION_REQ);
802 return -EINVAL; 802 return -EINVAL;
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index e8486c1e091a..b70f220bc4b3 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -5165,7 +5165,8 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
5165#endif 5165#endif
5166#ifdef CONFIG_B43_SSB 5166#ifdef CONFIG_B43_SSB
5167 case B43_BUS_SSB: 5167 case B43_BUS_SSB:
5168 /* FIXME */ 5168 ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco,
5169 avoid);
5169 break; 5170 break;
5170#endif 5171#endif
5171 } 5172 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index ec46ffff5409..78da3eff75e8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4128,10 +4128,6 @@ static const struct ieee80211_iface_limit brcmf_iface_limits[] = {
4128 }, 4128 },
4129 { 4129 {
4130 .max = 1, 4130 .max = 1,
4131 .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
4132 },
4133 {
4134 .max = 1,
4135 .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | 4131 .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
4136 BIT(NL80211_IFTYPE_P2P_GO) 4132 BIT(NL80211_IFTYPE_P2P_GO)
4137 }, 4133 },
@@ -4187,8 +4183,7 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
4187 BIT(NL80211_IFTYPE_ADHOC) | 4183 BIT(NL80211_IFTYPE_ADHOC) |
4188 BIT(NL80211_IFTYPE_AP) | 4184 BIT(NL80211_IFTYPE_AP) |
4189 BIT(NL80211_IFTYPE_P2P_CLIENT) | 4185 BIT(NL80211_IFTYPE_P2P_CLIENT) |
4190 BIT(NL80211_IFTYPE_P2P_GO) | 4186 BIT(NL80211_IFTYPE_P2P_GO);
4191 BIT(NL80211_IFTYPE_P2P_DEVICE);
4192 wiphy->iface_combinations = brcmf_iface_combos; 4187 wiphy->iface_combinations = brcmf_iface_combos;
4193 wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos); 4188 wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
4194 wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; 4189 wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index c6451c61407a..e2340b231aa1 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -274,6 +274,130 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
274 } 274 }
275} 275}
276 276
277/**
278 * This function frees the WL per-device resources.
279 *
280 * This function frees resources owned by the WL device pointed to
281 * by the wl parameter.
282 *
283 * precondition: can both be called locked and unlocked
284 *
285 */
286static void brcms_free(struct brcms_info *wl)
287{
288 struct brcms_timer *t, *next;
289
290 /* free ucode data */
291 if (wl->fw.fw_cnt)
292 brcms_ucode_data_free(&wl->ucode);
293 if (wl->irq)
294 free_irq(wl->irq, wl);
295
296 /* kill dpc */
297 tasklet_kill(&wl->tasklet);
298
299 if (wl->pub) {
300 brcms_debugfs_detach(wl->pub);
301 brcms_c_module_unregister(wl->pub, "linux", wl);
302 }
303
304 /* free common resources */
305 if (wl->wlc) {
306 brcms_c_detach(wl->wlc);
307 wl->wlc = NULL;
308 wl->pub = NULL;
309 }
310
311 /* virtual interface deletion is deferred so we cannot spinwait */
312
313 /* wait for all pending callbacks to complete */
314 while (atomic_read(&wl->callbacks) > 0)
315 schedule();
316
317 /* free timers */
318 for (t = wl->timers; t; t = next) {
319 next = t->next;
320#ifdef DEBUG
321 kfree(t->name);
322#endif
323 kfree(t);
324 }
325}
326
327/*
328* called from both kernel as from this kernel module (error flow on attach)
329* precondition: perimeter lock is not acquired.
330*/
331static void brcms_remove(struct bcma_device *pdev)
332{
333 struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
334 struct brcms_info *wl = hw->priv;
335
336 if (wl->wlc) {
337 wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
338 wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
339 ieee80211_unregister_hw(hw);
340 }
341
342 brcms_free(wl);
343
344 bcma_set_drvdata(pdev, NULL);
345 ieee80211_free_hw(hw);
346}
347
348/*
349 * Precondition: Since this function is called in brcms_pci_probe() context,
350 * no locking is required.
351 */
352static void brcms_release_fw(struct brcms_info *wl)
353{
354 int i;
355 for (i = 0; i < MAX_FW_IMAGES; i++) {
356 release_firmware(wl->fw.fw_bin[i]);
357 release_firmware(wl->fw.fw_hdr[i]);
358 }
359}
360
361/*
362 * Precondition: Since this function is called in brcms_pci_probe() context,
363 * no locking is required.
364 */
365static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev)
366{
367 int status;
368 struct device *device = &pdev->dev;
369 char fw_name[100];
370 int i;
371
372 memset(&wl->fw, 0, sizeof(struct brcms_firmware));
373 for (i = 0; i < MAX_FW_IMAGES; i++) {
374 if (brcms_firmwares[i] == NULL)
375 break;
376 sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i],
377 UCODE_LOADER_API_VER);
378 status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
379 if (status) {
380 wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
381 KBUILD_MODNAME, fw_name);
382 return status;
383 }
384 sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i],
385 UCODE_LOADER_API_VER);
386 status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
387 if (status) {
388 wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
389 KBUILD_MODNAME, fw_name);
390 return status;
391 }
392 wl->fw.hdr_num_entries[i] =
393 wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr));
394 }
395 wl->fw.fw_cnt = i;
396 status = brcms_ucode_data_init(wl, &wl->ucode);
397 brcms_release_fw(wl);
398 return status;
399}
400
277static void brcms_ops_tx(struct ieee80211_hw *hw, 401static void brcms_ops_tx(struct ieee80211_hw *hw,
278 struct ieee80211_tx_control *control, 402 struct ieee80211_tx_control *control,
279 struct sk_buff *skb) 403 struct sk_buff *skb)
@@ -306,6 +430,14 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
306 if (!blocked) 430 if (!blocked)
307 wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); 431 wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
308 432
433 if (!wl->ucode.bcm43xx_bomminor) {
434 err = brcms_request_fw(wl, wl->wlc->hw->d11core);
435 if (err) {
436 brcms_remove(wl->wlc->hw->d11core);
437 return -ENOENT;
438 }
439 }
440
309 spin_lock_bh(&wl->lock); 441 spin_lock_bh(&wl->lock);
310 /* avoid acknowledging frames before a non-monitor device is added */ 442 /* avoid acknowledging frames before a non-monitor device is added */
311 wl->mute_tx = true; 443 wl->mute_tx = true;
@@ -793,128 +925,6 @@ void brcms_dpc(unsigned long data)
793 wake_up(&wl->tx_flush_wq); 925 wake_up(&wl->tx_flush_wq);
794} 926}
795 927
796/*
797 * Precondition: Since this function is called in brcms_pci_probe() context,
798 * no locking is required.
799 */
800static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev)
801{
802 int status;
803 struct device *device = &pdev->dev;
804 char fw_name[100];
805 int i;
806
807 memset(&wl->fw, 0, sizeof(struct brcms_firmware));
808 for (i = 0; i < MAX_FW_IMAGES; i++) {
809 if (brcms_firmwares[i] == NULL)
810 break;
811 sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i],
812 UCODE_LOADER_API_VER);
813 status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
814 if (status) {
815 wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
816 KBUILD_MODNAME, fw_name);
817 return status;
818 }
819 sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i],
820 UCODE_LOADER_API_VER);
821 status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
822 if (status) {
823 wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
824 KBUILD_MODNAME, fw_name);
825 return status;
826 }
827 wl->fw.hdr_num_entries[i] =
828 wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr));
829 }
830 wl->fw.fw_cnt = i;
831 return brcms_ucode_data_init(wl, &wl->ucode);
832}
833
834/*
835 * Precondition: Since this function is called in brcms_pci_probe() context,
836 * no locking is required.
837 */
838static void brcms_release_fw(struct brcms_info *wl)
839{
840 int i;
841 for (i = 0; i < MAX_FW_IMAGES; i++) {
842 release_firmware(wl->fw.fw_bin[i]);
843 release_firmware(wl->fw.fw_hdr[i]);
844 }
845}
846
847/**
848 * This function frees the WL per-device resources.
849 *
850 * This function frees resources owned by the WL device pointed to
851 * by the wl parameter.
852 *
853 * precondition: can both be called locked and unlocked
854 *
855 */
856static void brcms_free(struct brcms_info *wl)
857{
858 struct brcms_timer *t, *next;
859
860 /* free ucode data */
861 if (wl->fw.fw_cnt)
862 brcms_ucode_data_free(&wl->ucode);
863 if (wl->irq)
864 free_irq(wl->irq, wl);
865
866 /* kill dpc */
867 tasklet_kill(&wl->tasklet);
868
869 if (wl->pub) {
870 brcms_debugfs_detach(wl->pub);
871 brcms_c_module_unregister(wl->pub, "linux", wl);
872 }
873
874 /* free common resources */
875 if (wl->wlc) {
876 brcms_c_detach(wl->wlc);
877 wl->wlc = NULL;
878 wl->pub = NULL;
879 }
880
881 /* virtual interface deletion is deferred so we cannot spinwait */
882
883 /* wait for all pending callbacks to complete */
884 while (atomic_read(&wl->callbacks) > 0)
885 schedule();
886
887 /* free timers */
888 for (t = wl->timers; t; t = next) {
889 next = t->next;
890#ifdef DEBUG
891 kfree(t->name);
892#endif
893 kfree(t);
894 }
895}
896
897/*
898* called from both kernel as from this kernel module (error flow on attach)
899* precondition: perimeter lock is not acquired.
900*/
901static void brcms_remove(struct bcma_device *pdev)
902{
903 struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
904 struct brcms_info *wl = hw->priv;
905
906 if (wl->wlc) {
907 wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
908 wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
909 ieee80211_unregister_hw(hw);
910 }
911
912 brcms_free(wl);
913
914 bcma_set_drvdata(pdev, NULL);
915 ieee80211_free_hw(hw);
916}
917
918static irqreturn_t brcms_isr(int irq, void *dev_id) 928static irqreturn_t brcms_isr(int irq, void *dev_id)
919{ 929{
920 struct brcms_info *wl; 930 struct brcms_info *wl;
@@ -1047,18 +1057,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
1047 spin_lock_init(&wl->lock); 1057 spin_lock_init(&wl->lock);
1048 spin_lock_init(&wl->isr_lock); 1058 spin_lock_init(&wl->isr_lock);
1049 1059
1050 /* prepare ucode */
1051 if (brcms_request_fw(wl, pdev) < 0) {
1052 wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
1053 "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
1054 brcms_release_fw(wl);
1055 brcms_remove(pdev);
1056 return NULL;
1057 }
1058
1059 /* common load-time initialization */ 1060 /* common load-time initialization */
1060 wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err); 1061 wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err);
1061 brcms_release_fw(wl);
1062 if (!wl->wlc) { 1062 if (!wl->wlc) {
1063 wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n", 1063 wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
1064 KBUILD_MODNAME, err); 1064 KBUILD_MODNAME, err);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 45cacf79f3a7..1a779bbfb87d 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -134,7 +134,6 @@ static const struct key_entry hp_wmi_keymap[] = {
134 { KE_KEY, 0x2142, { KEY_MEDIA } }, 134 { KE_KEY, 0x2142, { KEY_MEDIA } },
135 { KE_KEY, 0x213b, { KEY_INFO } }, 135 { KE_KEY, 0x213b, { KEY_INFO } },
136 { KE_KEY, 0x2169, { KEY_DIRECTION } }, 136 { KE_KEY, 0x2169, { KEY_DIRECTION } },
137 { KE_KEY, 0x216a, { KEY_SETUP } },
138 { KE_KEY, 0x231b, { KEY_HELP } }, 137 { KE_KEY, 0x231b, { KEY_HELP } },
139 { KE_END, 0 } 138 { KE_END, 0 }
140}; 139};
@@ -925,9 +924,6 @@ static int __init hp_wmi_init(void)
925 err = hp_wmi_input_setup(); 924 err = hp_wmi_input_setup();
926 if (err) 925 if (err)
927 return err; 926 return err;
928
929 //Enable magic for hotkeys that run on the SMBus
930 ec_write(0xe6,0x6e);
931 } 927 }
932 928
933 if (bios_capable) { 929 if (bios_capable) {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 9a907567f41e..edec135b1685 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1964,9 +1964,6 @@ struct tp_nvram_state {
1964/* kthread for the hotkey poller */ 1964/* kthread for the hotkey poller */
1965static struct task_struct *tpacpi_hotkey_task; 1965static struct task_struct *tpacpi_hotkey_task;
1966 1966
1967/* Acquired while the poller kthread is running, use to sync start/stop */
1968static struct mutex hotkey_thread_mutex;
1969
1970/* 1967/*
1971 * Acquire mutex to write poller control variables as an 1968 * Acquire mutex to write poller control variables as an
1972 * atomic block. 1969 * atomic block.
@@ -2462,8 +2459,6 @@ static int hotkey_kthread(void *data)
2462 unsigned int poll_freq; 2459 unsigned int poll_freq;
2463 bool was_frozen; 2460 bool was_frozen;
2464 2461
2465 mutex_lock(&hotkey_thread_mutex);
2466
2467 if (tpacpi_lifecycle == TPACPI_LIFE_EXITING) 2462 if (tpacpi_lifecycle == TPACPI_LIFE_EXITING)
2468 goto exit; 2463 goto exit;
2469 2464
@@ -2523,7 +2518,6 @@ static int hotkey_kthread(void *data)
2523 } 2518 }
2524 2519
2525exit: 2520exit:
2526 mutex_unlock(&hotkey_thread_mutex);
2527 return 0; 2521 return 0;
2528} 2522}
2529 2523
@@ -2533,9 +2527,6 @@ static void hotkey_poll_stop_sync(void)
2533 if (tpacpi_hotkey_task) { 2527 if (tpacpi_hotkey_task) {
2534 kthread_stop(tpacpi_hotkey_task); 2528 kthread_stop(tpacpi_hotkey_task);
2535 tpacpi_hotkey_task = NULL; 2529 tpacpi_hotkey_task = NULL;
2536 mutex_lock(&hotkey_thread_mutex);
2537 /* at this point, the thread did exit */
2538 mutex_unlock(&hotkey_thread_mutex);
2539 } 2530 }
2540} 2531}
2541 2532
@@ -3234,7 +3225,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3234 mutex_init(&hotkey_mutex); 3225 mutex_init(&hotkey_mutex);
3235 3226
3236#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 3227#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
3237 mutex_init(&hotkey_thread_mutex);
3238 mutex_init(&hotkey_thread_data_mutex); 3228 mutex_init(&hotkey_thread_data_mutex);
3239#endif 3229#endif
3240 3230
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 1a9d1e3ce64c..c1441ed282eb 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -282,7 +282,7 @@ static irqreturn_t bbc_i2c_interrupt(int irq, void *dev_id)
282 return IRQ_HANDLED; 282 return IRQ_HANDLED;
283} 283}
284 284
285static void __init reset_one_i2c(struct bbc_i2c_bus *bp) 285static void reset_one_i2c(struct bbc_i2c_bus *bp)
286{ 286{
287 writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); 287 writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0);
288 writeb(bp->own, bp->i2c_control_regs + 0x1); 288 writeb(bp->own, bp->i2c_control_regs + 0x1);
@@ -291,7 +291,7 @@ static void __init reset_one_i2c(struct bbc_i2c_bus *bp)
291 writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0); 291 writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0);
292} 292}
293 293
294static struct bbc_i2c_bus * __init attach_one_i2c(struct platform_device *op, int index) 294static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index)
295{ 295{
296 struct bbc_i2c_bus *bp; 296 struct bbc_i2c_bus *bp;
297 struct device_node *dp; 297 struct device_node *dp;
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index 4c0f6d883dd3..7b0bce936762 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -675,3 +675,32 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc)
675 return 0; 675 return 0;
676 } 676 }
677} 677}
678
679void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid)
680{
681 u32 pmu_ctl = 0;
682
683 switch (cc->dev->bus->chip_id) {
684 case 0x4322:
685 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100070);
686 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x1014140a);
687 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888854);
688 if (spuravoid == 1)
689 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05201828);
690 else
691 ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05001828);
692 pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD;
693 break;
694 case 43222:
695 /* TODO: BCM43222 requires updating PLLs too */
696 return;
697 default:
698 ssb_printk(KERN_ERR PFX
699 "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
700 cc->dev->bus->chip_id);
701 return;
702 }
703
704 chipco_set32(cc, SSB_CHIPCO_PMU_CTL, pmu_ctl);
705}
706EXPORT_SYMBOL_GPL(ssb_pmu_spuravoid_pllupdate);
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 7c254084b6a0..86291dcd964a 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1373,15 +1373,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
1373{ 1373{
1374 struct fb_info *info = file_fb_info(file); 1374 struct fb_info *info = file_fb_info(file);
1375 struct fb_ops *fb; 1375 struct fb_ops *fb;
1376 unsigned long off; 1376 unsigned long mmio_pgoff;
1377 unsigned long start; 1377 unsigned long start;
1378 u32 len; 1378 u32 len;
1379 1379
1380 if (!info) 1380 if (!info)
1381 return -ENODEV; 1381 return -ENODEV;
1382 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
1383 return -EINVAL;
1384 off = vma->vm_pgoff << PAGE_SHIFT;
1385 fb = info->fbops; 1382 fb = info->fbops;
1386 if (!fb) 1383 if (!fb)
1387 return -ENODEV; 1384 return -ENODEV;
@@ -1393,32 +1390,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
1393 return res; 1390 return res;
1394 } 1391 }
1395 1392
1396 /* frame buffer memory */ 1393 /*
1394 * Ugh. This can be either the frame buffer mapping, or
1395 * if pgoff points past it, the mmio mapping.
1396 */
1397 start = info->fix.smem_start; 1397 start = info->fix.smem_start;
1398 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len); 1398 len = info->fix.smem_len;
1399 if (off >= len) { 1399 mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
1400 /* memory mapped io */ 1400 if (vma->vm_pgoff >= mmio_pgoff) {
1401 off -= len; 1401 vma->vm_pgoff -= mmio_pgoff;
1402 if (info->var.accel_flags) {
1403 mutex_unlock(&info->mm_lock);
1404 return -EINVAL;
1405 }
1406 start = info->fix.mmio_start; 1402 start = info->fix.mmio_start;
1407 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len); 1403 len = info->fix.mmio_len;
1408 } 1404 }
1409 mutex_unlock(&info->mm_lock); 1405 mutex_unlock(&info->mm_lock);
1410 start &= PAGE_MASK; 1406
1411 if ((vma->vm_end - vma->vm_start + off) > len)
1412 return -EINVAL;
1413 off += start;
1414 vma->vm_pgoff = off >> PAGE_SHIFT;
1415 /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/
1416 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 1407 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1417 fb_pgprotect(file, vma, off); 1408 fb_pgprotect(file, vma, start);
1418 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, 1409
1419 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 1410 return vm_iomap_memory(vma, start, len);
1420 return -EAGAIN;
1421 return 0;
1422} 1411}
1423 1412
1424static int 1413static int
diff --git a/drivers/video/mmp/core.c b/drivers/video/mmp/core.c
index 9ed83419038b..84de2632857a 100644
--- a/drivers/video/mmp/core.c
+++ b/drivers/video/mmp/core.c
@@ -252,7 +252,5 @@ void mmp_unregister_path(struct mmp_path *path)
252 252
253 kfree(path); 253 kfree(path);
254 mutex_unlock(&disp_lock); 254 mutex_unlock(&disp_lock);
255
256 dev_info(path->dev, "de-register %s\n", path->name);
257} 255}
258EXPORT_SYMBOL_GPL(mmp_unregister_path); 256EXPORT_SYMBOL_GPL(mmp_unregister_path);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 3939829f6c5c..86af964c2425 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1137,6 +1137,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
1137 goto whole; 1137 goto whole;
1138 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) 1138 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1139 goto whole; 1139 goto whole;
1140 return 0;
1140 } 1141 }
1141 1142
1142 /* Do not dump I/O mapped devices or special mappings */ 1143 /* Do not dump I/O mapped devices or special mappings */
diff --git a/fs/bio.c b/fs/bio.c
index bb5768f59b32..b96fc6ce4855 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1428,8 +1428,6 @@ void bio_endio(struct bio *bio, int error)
1428 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 1428 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1429 error = -EIO; 1429 error = -EIO;
1430 1430
1431 trace_block_bio_complete(bio, error);
1432
1433 if (bio->bi_end_io) 1431 if (bio->bi_end_io)
1434 bio->bi_end_io(bio, error); 1432 bio->bi_end_io(bio, error);
1435} 1433}
diff --git a/fs/exec.c b/fs/exec.c
index a96a4885bbbf..87e731f020fb 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -613,7 +613,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
613 * when the old and new regions overlap clear from new_end. 613 * when the old and new regions overlap clear from new_end.
614 */ 614 */
615 free_pgd_range(&tlb, new_end, old_end, new_end, 615 free_pgd_range(&tlb, new_end, old_end, new_end,
616 vma->vm_next ? vma->vm_next->vm_start : 0); 616 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
617 } else { 617 } else {
618 /* 618 /*
619 * otherwise, clean from old_start; this is done to not touch 619 * otherwise, clean from old_start; this is done to not touch
@@ -622,7 +622,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
622 * for the others its just a little faster. 622 * for the others its just a little faster.
623 */ 623 */
624 free_pgd_range(&tlb, old_start, old_end, new_end, 624 free_pgd_range(&tlb, old_start, old_end, new_end,
625 vma->vm_next ? vma->vm_next->vm_start : 0); 625 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
626 } 626 }
627 tlb_finish_mmu(&tlb, new_end, old_end); 627 tlb_finish_mmu(&tlb, new_end, old_end);
628 628
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index a94f0f779d5e..fe0a76213d9e 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -533,7 +533,7 @@ void hfsplus_file_truncate(struct inode *inode)
533 struct address_space *mapping = inode->i_mapping; 533 struct address_space *mapping = inode->i_mapping;
534 struct page *page; 534 struct page *page;
535 void *fsdata; 535 void *fsdata;
536 u32 size = inode->i_size; 536 loff_t size = inode->i_size;
537 537
538 res = pagecache_write_begin(NULL, mapping, size, 0, 538 res = pagecache_write_begin(NULL, mapping, size, 0,
539 AOP_FLAG_UNINTERRUPTIBLE, 539 AOP_FLAG_UNINTERRUPTIBLE,
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 84e3d856e91d..523464e62849 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
110 * way when do_mmap_pgoff unwinds (may be important on powerpc 110 * way when do_mmap_pgoff unwinds (may be important on powerpc
111 * and ia64). 111 * and ia64).
112 */ 112 */
113 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP; 113 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
114 vma->vm_ops = &hugetlb_vm_ops; 114 vma->vm_ops = &hugetlb_vm_ops;
115 115
116 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) 116 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
diff --git a/fs/proc/array.c b/fs/proc/array.c
index f7ed9ee46eb9..cbd0f1b324b9 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -143,6 +143,7 @@ static const char * const task_state_array[] = {
143 "x (dead)", /* 64 */ 143 "x (dead)", /* 64 */
144 "K (wakekill)", /* 128 */ 144 "K (wakekill)", /* 128 */
145 "W (waking)", /* 256 */ 145 "W (waking)", /* 256 */
146 "P (parked)", /* 512 */
146}; 147};
147 148
148static inline const char *get_task_state(struct task_struct *tsk) 149static inline const char *get_task_state(struct task_struct *tsk)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index bfd87685fc1f..a59ff51b0166 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -7,6 +7,16 @@
7#include <linux/mm_types.h> 7#include <linux/mm_types.h>
8#include <linux/bug.h> 8#include <linux/bug.h>
9 9
10/*
11 * On almost all architectures and configurations, 0 can be used as the
12 * upper ceiling to free_pgtables(): on many architectures it has the same
13 * effect as using TASK_SIZE. However, there is one configuration which
14 * must impose a more careful limit, to avoid freeing kernel pgtables.
15 */
16#ifndef USER_PGTABLES_CEILING
17#define USER_PGTABLES_CEILING 0UL
18#endif
19
10#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 20#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
11extern int ptep_set_access_flags(struct vm_area_struct *vma, 21extern int ptep_set_access_flags(struct vm_area_struct *vma,
12 unsigned long address, pte_t *ptep, 22 unsigned long address, pte_t *ptep,
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 0ea61e07a91c..7c2e030e72f1 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -12,7 +12,6 @@
12 12
13struct blk_trace { 13struct blk_trace {
14 int trace_state; 14 int trace_state;
15 bool rq_based;
16 struct rchan *rchan; 15 struct rchan *rchan;
17 unsigned long __percpu *sequence; 16 unsigned long __percpu *sequence;
18 unsigned char __percpu *msg_data; 17 unsigned char __percpu *msg_data;
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 9bf2f1fcae27..3d7df3d32c66 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -333,6 +333,7 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules,
333 unsigned long count, 333 unsigned long count,
334 u64 *max_size, 334 u64 *max_size,
335 int *reset_type); 335 int *reset_type);
336typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size);
336 337
337/* 338/*
338 * EFI Configuration Table and GUID definitions 339 * EFI Configuration Table and GUID definitions
@@ -575,9 +576,15 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
575#ifdef CONFIG_X86 576#ifdef CONFIG_X86
576extern void efi_late_init(void); 577extern void efi_late_init(void);
577extern void efi_free_boot_services(void); 578extern void efi_free_boot_services(void);
579extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);
578#else 580#else
579static inline void efi_late_init(void) {} 581static inline void efi_late_init(void) {}
580static inline void efi_free_boot_services(void) {} 582static inline void efi_free_boot_services(void) {}
583
584static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
585{
586 return EFI_SUCCESS;
587}
581#endif 588#endif
582extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); 589extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
583extern u64 efi_get_iobase (void); 590extern u64 efi_get_iobase (void);
@@ -731,7 +738,7 @@ struct efivar_operations {
731 efi_get_variable_t *get_variable; 738 efi_get_variable_t *get_variable;
732 efi_get_next_variable_t *get_next_variable; 739 efi_get_next_variable_t *get_next_variable;
733 efi_set_variable_t *set_variable; 740 efi_set_variable_t *set_variable;
734 efi_query_variable_info_t *query_variable_info; 741 efi_query_variable_store_t *query_variable_store;
735}; 742};
736 743
737struct efivars { 744struct efivars {
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d2e6927bbaae..d78d28a733b1 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -200,6 +200,8 @@ extern size_t vmcoreinfo_max_size;
200 200
201int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, 201int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
202 unsigned long long *crash_size, unsigned long long *crash_base); 202 unsigned long long *crash_size, unsigned long long *crash_base);
203int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
204 unsigned long long *crash_size, unsigned long long *crash_base);
203int parse_crashkernel_low(char *cmdline, unsigned long long system_ram, 205int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
204 unsigned long long *crash_size, unsigned long long *crash_base); 206 unsigned long long *crash_size, unsigned long long *crash_base);
205int crash_shrink_memory(unsigned long new_size); 207int crash_shrink_memory(unsigned long new_size);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e19ff30ad0a2..e2091b88d24c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1611,6 +1611,8 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1611 unsigned long pfn); 1611 unsigned long pfn);
1612int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 1612int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1613 unsigned long pfn); 1613 unsigned long pfn);
1614int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
1615
1614 1616
1615struct page *follow_page_mask(struct vm_area_struct *vma, 1617struct page *follow_page_mask(struct vm_area_struct *vma,
1616 unsigned long address, unsigned int foll_flags, 1618 unsigned long address, unsigned int foll_flags,
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
index 01d25e6fc792..0214c4c146fa 100644
--- a/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -291,6 +291,7 @@ ip_set_hash_destroy(struct ip_set *set)
291#define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist) 291#define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist)
292#define type_pf_data_next TOKEN(TYPE, PF, _data_next) 292#define type_pf_data_next TOKEN(TYPE, PF, _data_next)
293#define type_pf_data_flags TOKEN(TYPE, PF, _data_flags) 293#define type_pf_data_flags TOKEN(TYPE, PF, _data_flags)
294#define type_pf_data_reset_flags TOKEN(TYPE, PF, _data_reset_flags)
294#ifdef IP_SET_HASH_WITH_NETS 295#ifdef IP_SET_HASH_WITH_NETS
295#define type_pf_data_match TOKEN(TYPE, PF, _data_match) 296#define type_pf_data_match TOKEN(TYPE, PF, _data_match)
296#else 297#else
@@ -385,9 +386,9 @@ type_pf_resize(struct ip_set *set, bool retried)
385 struct ip_set_hash *h = set->data; 386 struct ip_set_hash *h = set->data;
386 struct htable *t, *orig = h->table; 387 struct htable *t, *orig = h->table;
387 u8 htable_bits = orig->htable_bits; 388 u8 htable_bits = orig->htable_bits;
388 const struct type_pf_elem *data; 389 struct type_pf_elem *data;
389 struct hbucket *n, *m; 390 struct hbucket *n, *m;
390 u32 i, j; 391 u32 i, j, flags = 0;
391 int ret; 392 int ret;
392 393
393retry: 394retry:
@@ -412,9 +413,16 @@ retry:
412 n = hbucket(orig, i); 413 n = hbucket(orig, i);
413 for (j = 0; j < n->pos; j++) { 414 for (j = 0; j < n->pos; j++) {
414 data = ahash_data(n, j); 415 data = ahash_data(n, j);
416#ifdef IP_SET_HASH_WITH_NETS
417 flags = 0;
418 type_pf_data_reset_flags(data, &flags);
419#endif
415 m = hbucket(t, HKEY(data, h->initval, htable_bits)); 420 m = hbucket(t, HKEY(data, h->initval, htable_bits));
416 ret = type_pf_elem_add(m, data, AHASH_MAX(h), 0); 421 ret = type_pf_elem_add(m, data, AHASH_MAX(h), flags);
417 if (ret < 0) { 422 if (ret < 0) {
423#ifdef IP_SET_HASH_WITH_NETS
424 type_pf_data_flags(data, flags);
425#endif
418 read_unlock_bh(&set->lock); 426 read_unlock_bh(&set->lock);
419 ahash_destroy(t); 427 ahash_destroy(t);
420 if (ret == -EAGAIN) 428 if (ret == -EAGAIN)
@@ -836,9 +844,9 @@ type_pf_tresize(struct ip_set *set, bool retried)
836 struct ip_set_hash *h = set->data; 844 struct ip_set_hash *h = set->data;
837 struct htable *t, *orig = h->table; 845 struct htable *t, *orig = h->table;
838 u8 htable_bits = orig->htable_bits; 846 u8 htable_bits = orig->htable_bits;
839 const struct type_pf_elem *data; 847 struct type_pf_elem *data;
840 struct hbucket *n, *m; 848 struct hbucket *n, *m;
841 u32 i, j; 849 u32 i, j, flags = 0;
842 int ret; 850 int ret;
843 851
844 /* Try to cleanup once */ 852 /* Try to cleanup once */
@@ -873,10 +881,17 @@ retry:
873 n = hbucket(orig, i); 881 n = hbucket(orig, i);
874 for (j = 0; j < n->pos; j++) { 882 for (j = 0; j < n->pos; j++) {
875 data = ahash_tdata(n, j); 883 data = ahash_tdata(n, j);
884#ifdef IP_SET_HASH_WITH_NETS
885 flags = 0;
886 type_pf_data_reset_flags(data, &flags);
887#endif
876 m = hbucket(t, HKEY(data, h->initval, htable_bits)); 888 m = hbucket(t, HKEY(data, h->initval, htable_bits));
877 ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0, 889 ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), flags,
878 ip_set_timeout_get(type_pf_data_timeout(data))); 890 ip_set_timeout_get(type_pf_data_timeout(data)));
879 if (ret < 0) { 891 if (ret < 0) {
892#ifdef IP_SET_HASH_WITH_NETS
893 type_pf_data_flags(data, flags);
894#endif
880 read_unlock_bh(&set->lock); 895 read_unlock_bh(&set->lock);
881 ahash_destroy(t); 896 ahash_destroy(t);
882 if (ret == -EAGAIN) 897 if (ret == -EAGAIN)
@@ -1187,6 +1202,7 @@ type_pf_gc_init(struct ip_set *set)
1187#undef type_pf_data_tlist 1202#undef type_pf_data_tlist
1188#undef type_pf_data_next 1203#undef type_pf_data_next
1189#undef type_pf_data_flags 1204#undef type_pf_data_flags
1205#undef type_pf_data_reset_flags
1190#undef type_pf_data_match 1206#undef type_pf_data_match
1191 1207
1192#undef type_pf_elem 1208#undef type_pf_elem
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d35d2b6ddbfb..e692a022527b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -163,9 +163,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
163#define TASK_DEAD 64 163#define TASK_DEAD 64
164#define TASK_WAKEKILL 128 164#define TASK_WAKEKILL 128
165#define TASK_WAKING 256 165#define TASK_WAKING 256
166#define TASK_STATE_MAX 512 166#define TASK_PARKED 512
167#define TASK_STATE_MAX 1024
167 168
168#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" 169#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
169 170
170extern char ___assert_task_state[1 - 2*!!( 171extern char ___assert_task_state[1 - 2*!!(
171 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; 172 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h
index 9e492be5244b..6fcfe99bd999 100644
--- a/include/linux/ssb/ssb_driver_chipcommon.h
+++ b/include/linux/ssb/ssb_driver_chipcommon.h
@@ -219,6 +219,7 @@
219#define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */ 219#define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */
220#define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ 220#define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */
221#define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16 221#define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16
222#define SSB_CHIPCO_PMU_CTL_PLL_UPD 0x00000400
222#define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ 223#define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */
223#define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ 224#define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */
224#define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ 225#define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */
@@ -667,5 +668,6 @@ enum ssb_pmu_ldo_volt_id {
667void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc, 668void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc,
668 enum ssb_pmu_ldo_volt_id id, u32 voltage); 669 enum ssb_pmu_ldo_volt_id id, u32 voltage);
669void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on); 670void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on);
671void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid);
670 672
671#endif /* LINUX_SSB_CHIPCO_H_ */ 673#endif /* LINUX_SSB_CHIPCO_H_ */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 2de42f9401d2..a5ffd32642fd 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -25,6 +25,7 @@ extern int swiotlb_force;
25extern void swiotlb_init(int verbose); 25extern void swiotlb_init(int verbose);
26int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); 26int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
27extern unsigned long swiotlb_nr_tbl(void); 27extern unsigned long swiotlb_nr_tbl(void);
28unsigned long swiotlb_size_or_default(void);
28extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); 29extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
29 30
30/* 31/*
diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
new file mode 100644
index 000000000000..cbb20afdbc01
--- /dev/null
+++ b/include/linux/ucs2_string.h
@@ -0,0 +1,14 @@
1#ifndef _LINUX_UCS2_STRING_H_
2#define _LINUX_UCS2_STRING_H_
3
4#include <linux/types.h> /* for size_t */
5#include <linux/stddef.h> /* for NULL */
6
7typedef u16 ucs2_char_t;
8
9unsigned long ucs2_strnlen(const ucs2_char_t *s, size_t maxlength);
10unsigned long ucs2_strlen(const ucs2_char_t *s);
11unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
12int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
13
14#endif /* _LINUX_UCS2_STRING_H_ */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 40be2a0d8ae1..84a6440f1f19 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -199,6 +199,7 @@ extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
199/* Device notifier */ 199/* Device notifier */
200extern int register_inet6addr_notifier(struct notifier_block *nb); 200extern int register_inet6addr_notifier(struct notifier_block *nb);
201extern int unregister_inet6addr_notifier(struct notifier_block *nb); 201extern int unregister_inet6addr_notifier(struct notifier_block *nb);
202extern int inet6addr_notifier_call_chain(unsigned long val, void *v);
202 203
203extern void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex, 204extern void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
204 struct ipv6_devconf *devconf); 205 struct ipv6_devconf *devconf);
diff --git a/include/net/irda/irlmp.h b/include/net/irda/irlmp.h
index f74109144d3f..f132924cc9da 100644
--- a/include/net/irda/irlmp.h
+++ b/include/net/irda/irlmp.h
@@ -256,7 +256,8 @@ static inline __u32 irlmp_get_daddr(const struct lsap_cb *self)
256 return (self && self->lap) ? self->lap->daddr : 0; 256 return (self && self->lap) ? self->lap->daddr : 0;
257} 257}
258 258
259extern const char *irlmp_reasons[]; 259const char *irlmp_reason_str(LM_REASON reason);
260
260extern int sysctl_discovery_timeout; 261extern int sysctl_discovery_timeout;
261extern int sysctl_discovery_slots; 262extern int sysctl_discovery_slots;
262extern int sysctl_discovery; 263extern int sysctl_discovery;
diff --git a/include/net/scm.h b/include/net/scm.h
index 975cca01048b..b11708105681 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -56,8 +56,8 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm,
56 scm->pid = get_pid(pid); 56 scm->pid = get_pid(pid);
57 scm->cred = cred ? get_cred(cred) : NULL; 57 scm->cred = cred ? get_cred(cred) : NULL;
58 scm->creds.pid = pid_vnr(pid); 58 scm->creds.pid = pid_vnr(pid);
59 scm->creds.uid = cred ? cred->euid : INVALID_UID; 59 scm->creds.uid = cred ? cred->uid : INVALID_UID;
60 scm->creds.gid = cred ? cred->egid : INVALID_GID; 60 scm->creds.gid = cred ? cred->gid : INVALID_GID;
61} 61}
62 62
63static __inline__ void scm_destroy_cred(struct scm_cookie *scm) 63static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 9961726523d0..9c1467357b03 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -257,6 +257,7 @@ TRACE_EVENT(block_bio_bounce,
257 257
258/** 258/**
259 * block_bio_complete - completed all work on the block operation 259 * block_bio_complete - completed all work on the block operation
260 * @q: queue holding the block operation
260 * @bio: block operation completed 261 * @bio: block operation completed
261 * @error: io error value 262 * @error: io error value
262 * 263 *
@@ -265,9 +266,9 @@ TRACE_EVENT(block_bio_bounce,
265 */ 266 */
266TRACE_EVENT(block_bio_complete, 267TRACE_EVENT(block_bio_complete,
267 268
268 TP_PROTO(struct bio *bio, int error), 269 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
269 270
270 TP_ARGS(bio, error), 271 TP_ARGS(q, bio, error),
271 272
272 TP_STRUCT__entry( 273 TP_STRUCT__entry(
273 __field( dev_t, dev ) 274 __field( dev_t, dev )
@@ -278,8 +279,7 @@ TRACE_EVENT(block_bio_complete,
278 ), 279 ),
279 280
280 TP_fast_assign( 281 TP_fast_assign(
281 __entry->dev = bio->bi_bdev ? 282 __entry->dev = bio->bi_bdev->bd_dev;
282 bio->bi_bdev->bd_dev : 0;
283 __entry->sector = bio->bi_sector; 283 __entry->sector = bio->bi_sector;
284 __entry->nr_sector = bio->bi_size >> 9; 284 __entry->nr_sector = bio->bi_size >> 9;
285 __entry->error = error; 285 __entry->error = error;
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 5a8671e8a67f..e5586caff67a 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch,
147 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|", 147 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
148 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, 148 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
149 { 16, "Z" }, { 32, "X" }, { 64, "x" }, 149 { 16, "Z" }, { 32, "X" }, { 64, "x" },
150 { 128, "W" }) : "R", 150 { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
151 __entry->prev_state & TASK_STATE_MAX ? "+" : "", 151 __entry->prev_state & TASK_STATE_MAX ? "+" : "",
152 __entry->next_comm, __entry->next_pid, __entry->next_prio) 152 __entry->next_comm, __entry->next_pid, __entry->next_prio)
153); 153);
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 4c43b4448792..706d035fa748 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -95,15 +95,10 @@
95#ifndef _LINUX_FUSE_H 95#ifndef _LINUX_FUSE_H
96#define _LINUX_FUSE_H 96#define _LINUX_FUSE_H
97 97
98#ifdef __linux__ 98#ifdef __KERNEL__
99#include <linux/types.h> 99#include <linux/types.h>
100#else 100#else
101#include <stdint.h> 101#include <stdint.h>
102#define __u64 uint64_t
103#define __s64 int64_t
104#define __u32 uint32_t
105#define __s32 int32_t
106#define __u16 uint16_t
107#endif 102#endif
108 103
109/* 104/*
@@ -139,42 +134,42 @@
139 userspace works under 64bit kernels */ 134 userspace works under 64bit kernels */
140 135
141struct fuse_attr { 136struct fuse_attr {
142 __u64 ino; 137 uint64_t ino;
143 __u64 size; 138 uint64_t size;
144 __u64 blocks; 139 uint64_t blocks;
145 __u64 atime; 140 uint64_t atime;
146 __u64 mtime; 141 uint64_t mtime;
147 __u64 ctime; 142 uint64_t ctime;
148 __u32 atimensec; 143 uint32_t atimensec;
149 __u32 mtimensec; 144 uint32_t mtimensec;
150 __u32 ctimensec; 145 uint32_t ctimensec;
151 __u32 mode; 146 uint32_t mode;
152 __u32 nlink; 147 uint32_t nlink;
153 __u32 uid; 148 uint32_t uid;
154 __u32 gid; 149 uint32_t gid;
155 __u32 rdev; 150 uint32_t rdev;
156 __u32 blksize; 151 uint32_t blksize;
157 __u32 padding; 152 uint32_t padding;
158}; 153};
159 154
160struct fuse_kstatfs { 155struct fuse_kstatfs {
161 __u64 blocks; 156 uint64_t blocks;
162 __u64 bfree; 157 uint64_t bfree;
163 __u64 bavail; 158 uint64_t bavail;
164 __u64 files; 159 uint64_t files;
165 __u64 ffree; 160 uint64_t ffree;
166 __u32 bsize; 161 uint32_t bsize;
167 __u32 namelen; 162 uint32_t namelen;
168 __u32 frsize; 163 uint32_t frsize;
169 __u32 padding; 164 uint32_t padding;
170 __u32 spare[6]; 165 uint32_t spare[6];
171}; 166};
172 167
173struct fuse_file_lock { 168struct fuse_file_lock {
174 __u64 start; 169 uint64_t start;
175 __u64 end; 170 uint64_t end;
176 __u32 type; 171 uint32_t type;
177 __u32 pid; /* tgid */ 172 uint32_t pid; /* tgid */
178}; 173};
179 174
180/** 175/**
@@ -364,143 +359,143 @@ enum fuse_notify_code {
364#define FUSE_COMPAT_ENTRY_OUT_SIZE 120 359#define FUSE_COMPAT_ENTRY_OUT_SIZE 120
365 360
366struct fuse_entry_out { 361struct fuse_entry_out {
367 __u64 nodeid; /* Inode ID */ 362 uint64_t nodeid; /* Inode ID */
368 __u64 generation; /* Inode generation: nodeid:gen must 363 uint64_t generation; /* Inode generation: nodeid:gen must
369 be unique for the fs's lifetime */ 364 be unique for the fs's lifetime */
370 __u64 entry_valid; /* Cache timeout for the name */ 365 uint64_t entry_valid; /* Cache timeout for the name */
371 __u64 attr_valid; /* Cache timeout for the attributes */ 366 uint64_t attr_valid; /* Cache timeout for the attributes */
372 __u32 entry_valid_nsec; 367 uint32_t entry_valid_nsec;
373 __u32 attr_valid_nsec; 368 uint32_t attr_valid_nsec;
374 struct fuse_attr attr; 369 struct fuse_attr attr;
375}; 370};
376 371
377struct fuse_forget_in { 372struct fuse_forget_in {
378 __u64 nlookup; 373 uint64_t nlookup;
379}; 374};
380 375
381struct fuse_forget_one { 376struct fuse_forget_one {
382 __u64 nodeid; 377 uint64_t nodeid;
383 __u64 nlookup; 378 uint64_t nlookup;
384}; 379};
385 380
386struct fuse_batch_forget_in { 381struct fuse_batch_forget_in {
387 __u32 count; 382 uint32_t count;
388 __u32 dummy; 383 uint32_t dummy;
389}; 384};
390 385
391struct fuse_getattr_in { 386struct fuse_getattr_in {
392 __u32 getattr_flags; 387 uint32_t getattr_flags;
393 __u32 dummy; 388 uint32_t dummy;
394 __u64 fh; 389 uint64_t fh;
395}; 390};
396 391
397#define FUSE_COMPAT_ATTR_OUT_SIZE 96 392#define FUSE_COMPAT_ATTR_OUT_SIZE 96
398 393
399struct fuse_attr_out { 394struct fuse_attr_out {
400 __u64 attr_valid; /* Cache timeout for the attributes */ 395 uint64_t attr_valid; /* Cache timeout for the attributes */
401 __u32 attr_valid_nsec; 396 uint32_t attr_valid_nsec;
402 __u32 dummy; 397 uint32_t dummy;
403 struct fuse_attr attr; 398 struct fuse_attr attr;
404}; 399};
405 400
406#define FUSE_COMPAT_MKNOD_IN_SIZE 8 401#define FUSE_COMPAT_MKNOD_IN_SIZE 8
407 402
408struct fuse_mknod_in { 403struct fuse_mknod_in {
409 __u32 mode; 404 uint32_t mode;
410 __u32 rdev; 405 uint32_t rdev;
411 __u32 umask; 406 uint32_t umask;
412 __u32 padding; 407 uint32_t padding;
413}; 408};
414 409
415struct fuse_mkdir_in { 410struct fuse_mkdir_in {
416 __u32 mode; 411 uint32_t mode;
417 __u32 umask; 412 uint32_t umask;
418}; 413};
419 414
420struct fuse_rename_in { 415struct fuse_rename_in {
421 __u64 newdir; 416 uint64_t newdir;
422}; 417};
423 418
424struct fuse_link_in { 419struct fuse_link_in {
425 __u64 oldnodeid; 420 uint64_t oldnodeid;
426}; 421};
427 422
428struct fuse_setattr_in { 423struct fuse_setattr_in {
429 __u32 valid; 424 uint32_t valid;
430 __u32 padding; 425 uint32_t padding;
431 __u64 fh; 426 uint64_t fh;
432 __u64 size; 427 uint64_t size;
433 __u64 lock_owner; 428 uint64_t lock_owner;
434 __u64 atime; 429 uint64_t atime;
435 __u64 mtime; 430 uint64_t mtime;
436 __u64 unused2; 431 uint64_t unused2;
437 __u32 atimensec; 432 uint32_t atimensec;
438 __u32 mtimensec; 433 uint32_t mtimensec;
439 __u32 unused3; 434 uint32_t unused3;
440 __u32 mode; 435 uint32_t mode;
441 __u32 unused4; 436 uint32_t unused4;
442 __u32 uid; 437 uint32_t uid;
443 __u32 gid; 438 uint32_t gid;
444 __u32 unused5; 439 uint32_t unused5;
445}; 440};
446 441
447struct fuse_open_in { 442struct fuse_open_in {
448 __u32 flags; 443 uint32_t flags;
449 __u32 unused; 444 uint32_t unused;
450}; 445};
451 446
452struct fuse_create_in { 447struct fuse_create_in {
453 __u32 flags; 448 uint32_t flags;
454 __u32 mode; 449 uint32_t mode;
455 __u32 umask; 450 uint32_t umask;
456 __u32 padding; 451 uint32_t padding;
457}; 452};
458 453
459struct fuse_open_out { 454struct fuse_open_out {
460 __u64 fh; 455 uint64_t fh;
461 __u32 open_flags; 456 uint32_t open_flags;
462 __u32 padding; 457 uint32_t padding;
463}; 458};
464 459
465struct fuse_release_in { 460struct fuse_release_in {
466 __u64 fh; 461 uint64_t fh;
467 __u32 flags; 462 uint32_t flags;
468 __u32 release_flags; 463 uint32_t release_flags;
469 __u64 lock_owner; 464 uint64_t lock_owner;
470}; 465};
471 466
472struct fuse_flush_in { 467struct fuse_flush_in {
473 __u64 fh; 468 uint64_t fh;
474 __u32 unused; 469 uint32_t unused;
475 __u32 padding; 470 uint32_t padding;
476 __u64 lock_owner; 471 uint64_t lock_owner;
477}; 472};
478 473
479struct fuse_read_in { 474struct fuse_read_in {
480 __u64 fh; 475 uint64_t fh;
481 __u64 offset; 476 uint64_t offset;
482 __u32 size; 477 uint32_t size;
483 __u32 read_flags; 478 uint32_t read_flags;
484 __u64 lock_owner; 479 uint64_t lock_owner;
485 __u32 flags; 480 uint32_t flags;
486 __u32 padding; 481 uint32_t padding;
487}; 482};
488 483
489#define FUSE_COMPAT_WRITE_IN_SIZE 24 484#define FUSE_COMPAT_WRITE_IN_SIZE 24
490 485
491struct fuse_write_in { 486struct fuse_write_in {
492 __u64 fh; 487 uint64_t fh;
493 __u64 offset; 488 uint64_t offset;
494 __u32 size; 489 uint32_t size;
495 __u32 write_flags; 490 uint32_t write_flags;
496 __u64 lock_owner; 491 uint64_t lock_owner;
497 __u32 flags; 492 uint32_t flags;
498 __u32 padding; 493 uint32_t padding;
499}; 494};
500 495
501struct fuse_write_out { 496struct fuse_write_out {
502 __u32 size; 497 uint32_t size;
503 __u32 padding; 498 uint32_t padding;
504}; 499};
505 500
506#define FUSE_COMPAT_STATFS_SIZE 48 501#define FUSE_COMPAT_STATFS_SIZE 48
@@ -510,32 +505,32 @@ struct fuse_statfs_out {
510}; 505};
511 506
512struct fuse_fsync_in { 507struct fuse_fsync_in {
513 __u64 fh; 508 uint64_t fh;
514 __u32 fsync_flags; 509 uint32_t fsync_flags;
515 __u32 padding; 510 uint32_t padding;
516}; 511};
517 512
518struct fuse_setxattr_in { 513struct fuse_setxattr_in {
519 __u32 size; 514 uint32_t size;
520 __u32 flags; 515 uint32_t flags;
521}; 516};
522 517
523struct fuse_getxattr_in { 518struct fuse_getxattr_in {
524 __u32 size; 519 uint32_t size;
525 __u32 padding; 520 uint32_t padding;
526}; 521};
527 522
528struct fuse_getxattr_out { 523struct fuse_getxattr_out {
529 __u32 size; 524 uint32_t size;
530 __u32 padding; 525 uint32_t padding;
531}; 526};
532 527
533struct fuse_lk_in { 528struct fuse_lk_in {
534 __u64 fh; 529 uint64_t fh;
535 __u64 owner; 530 uint64_t owner;
536 struct fuse_file_lock lk; 531 struct fuse_file_lock lk;
537 __u32 lk_flags; 532 uint32_t lk_flags;
538 __u32 padding; 533 uint32_t padding;
539}; 534};
540 535
541struct fuse_lk_out { 536struct fuse_lk_out {
@@ -543,134 +538,135 @@ struct fuse_lk_out {
543}; 538};
544 539
545struct fuse_access_in { 540struct fuse_access_in {
546 __u32 mask; 541 uint32_t mask;
547 __u32 padding; 542 uint32_t padding;
548}; 543};
549 544
550struct fuse_init_in { 545struct fuse_init_in {
551 __u32 major; 546 uint32_t major;
552 __u32 minor; 547 uint32_t minor;
553 __u32 max_readahead; 548 uint32_t max_readahead;
554 __u32 flags; 549 uint32_t flags;
555}; 550};
556 551
557struct fuse_init_out { 552struct fuse_init_out {
558 __u32 major; 553 uint32_t major;
559 __u32 minor; 554 uint32_t minor;
560 __u32 max_readahead; 555 uint32_t max_readahead;
561 __u32 flags; 556 uint32_t flags;
562 __u16 max_background; 557 uint16_t max_background;
563 __u16 congestion_threshold; 558 uint16_t congestion_threshold;
564 __u32 max_write; 559 uint32_t max_write;
565}; 560};
566 561
567#define CUSE_INIT_INFO_MAX 4096 562#define CUSE_INIT_INFO_MAX 4096
568 563
569struct cuse_init_in { 564struct cuse_init_in {
570 __u32 major; 565 uint32_t major;
571 __u32 minor; 566 uint32_t minor;
572 __u32 unused; 567 uint32_t unused;
573 __u32 flags; 568 uint32_t flags;
574}; 569};
575 570
576struct cuse_init_out { 571struct cuse_init_out {
577 __u32 major; 572 uint32_t major;
578 __u32 minor; 573 uint32_t minor;
579 __u32 unused; 574 uint32_t unused;
580 __u32 flags; 575 uint32_t flags;
581 __u32 max_read; 576 uint32_t max_read;
582 __u32 max_write; 577 uint32_t max_write;
583 __u32 dev_major; /* chardev major */ 578 uint32_t dev_major; /* chardev major */
584 __u32 dev_minor; /* chardev minor */ 579 uint32_t dev_minor; /* chardev minor */
585 __u32 spare[10]; 580 uint32_t spare[10];
586}; 581};
587 582
588struct fuse_interrupt_in { 583struct fuse_interrupt_in {
589 __u64 unique; 584 uint64_t unique;
590}; 585};
591 586
592struct fuse_bmap_in { 587struct fuse_bmap_in {
593 __u64 block; 588 uint64_t block;
594 __u32 blocksize; 589 uint32_t blocksize;
595 __u32 padding; 590 uint32_t padding;
596}; 591};
597 592
598struct fuse_bmap_out { 593struct fuse_bmap_out {
599 __u64 block; 594 uint64_t block;
600}; 595};
601 596
602struct fuse_ioctl_in { 597struct fuse_ioctl_in {
603 __u64 fh; 598 uint64_t fh;
604 __u32 flags; 599 uint32_t flags;
605 __u32 cmd; 600 uint32_t cmd;
606 __u64 arg; 601 uint64_t arg;
607 __u32 in_size; 602 uint32_t in_size;
608 __u32 out_size; 603 uint32_t out_size;
609}; 604};
610 605
611struct fuse_ioctl_iovec { 606struct fuse_ioctl_iovec {
612 __u64 base; 607 uint64_t base;
613 __u64 len; 608 uint64_t len;
614}; 609};
615 610
616struct fuse_ioctl_out { 611struct fuse_ioctl_out {
617 __s32 result; 612 int32_t result;
618 __u32 flags; 613 uint32_t flags;
619 __u32 in_iovs; 614 uint32_t in_iovs;
620 __u32 out_iovs; 615 uint32_t out_iovs;
621}; 616};
622 617
623struct fuse_poll_in { 618struct fuse_poll_in {
624 __u64 fh; 619 uint64_t fh;
625 __u64 kh; 620 uint64_t kh;
626 __u32 flags; 621 uint32_t flags;
627 __u32 events; 622 uint32_t events;
628}; 623};
629 624
630struct fuse_poll_out { 625struct fuse_poll_out {
631 __u32 revents; 626 uint32_t revents;
632 __u32 padding; 627 uint32_t padding;
633}; 628};
634 629
635struct fuse_notify_poll_wakeup_out { 630struct fuse_notify_poll_wakeup_out {
636 __u64 kh; 631 uint64_t kh;
637}; 632};
638 633
639struct fuse_fallocate_in { 634struct fuse_fallocate_in {
640 __u64 fh; 635 uint64_t fh;
641 __u64 offset; 636 uint64_t offset;
642 __u64 length; 637 uint64_t length;
643 __u32 mode; 638 uint32_t mode;
644 __u32 padding; 639 uint32_t padding;
645}; 640};
646 641
647struct fuse_in_header { 642struct fuse_in_header {
648 __u32 len; 643 uint32_t len;
649 __u32 opcode; 644 uint32_t opcode;
650 __u64 unique; 645 uint64_t unique;
651 __u64 nodeid; 646 uint64_t nodeid;
652 __u32 uid; 647 uint32_t uid;
653 __u32 gid; 648 uint32_t gid;
654 __u32 pid; 649 uint32_t pid;
655 __u32 padding; 650 uint32_t padding;
656}; 651};
657 652
658struct fuse_out_header { 653struct fuse_out_header {
659 __u32 len; 654 uint32_t len;
660 __s32 error; 655 int32_t error;
661 __u64 unique; 656 uint64_t unique;
662}; 657};
663 658
664struct fuse_dirent { 659struct fuse_dirent {
665 __u64 ino; 660 uint64_t ino;
666 __u64 off; 661 uint64_t off;
667 __u32 namelen; 662 uint32_t namelen;
668 __u32 type; 663 uint32_t type;
669 char name[]; 664 char name[];
670}; 665};
671 666
672#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name) 667#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
673#define FUSE_DIRENT_ALIGN(x) (((x) + sizeof(__u64) - 1) & ~(sizeof(__u64) - 1)) 668#define FUSE_DIRENT_ALIGN(x) \
669 (((x) + sizeof(uint64_t) - 1) & ~(sizeof(uint64_t) - 1))
674#define FUSE_DIRENT_SIZE(d) \ 670#define FUSE_DIRENT_SIZE(d) \
675 FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen) 671 FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen)
676 672
@@ -685,47 +681,47 @@ struct fuse_direntplus {
685 FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET_DIRENTPLUS + (d)->dirent.namelen) 681 FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET_DIRENTPLUS + (d)->dirent.namelen)
686 682
687struct fuse_notify_inval_inode_out { 683struct fuse_notify_inval_inode_out {
688 __u64 ino; 684 uint64_t ino;
689 __s64 off; 685 int64_t off;
690 __s64 len; 686 int64_t len;
691}; 687};
692 688
693struct fuse_notify_inval_entry_out { 689struct fuse_notify_inval_entry_out {
694 __u64 parent; 690 uint64_t parent;
695 __u32 namelen; 691 uint32_t namelen;
696 __u32 padding; 692 uint32_t padding;
697}; 693};
698 694
699struct fuse_notify_delete_out { 695struct fuse_notify_delete_out {
700 __u64 parent; 696 uint64_t parent;
701 __u64 child; 697 uint64_t child;
702 __u32 namelen; 698 uint32_t namelen;
703 __u32 padding; 699 uint32_t padding;
704}; 700};
705 701
706struct fuse_notify_store_out { 702struct fuse_notify_store_out {
707 __u64 nodeid; 703 uint64_t nodeid;
708 __u64 offset; 704 uint64_t offset;
709 __u32 size; 705 uint32_t size;
710 __u32 padding; 706 uint32_t padding;
711}; 707};
712 708
713struct fuse_notify_retrieve_out { 709struct fuse_notify_retrieve_out {
714 __u64 notify_unique; 710 uint64_t notify_unique;
715 __u64 nodeid; 711 uint64_t nodeid;
716 __u64 offset; 712 uint64_t offset;
717 __u32 size; 713 uint32_t size;
718 __u32 padding; 714 uint32_t padding;
719}; 715};
720 716
721/* Matches the size of fuse_write_in */ 717/* Matches the size of fuse_write_in */
722struct fuse_notify_retrieve_in { 718struct fuse_notify_retrieve_in {
723 __u64 dummy1; 719 uint64_t dummy1;
724 __u64 offset; 720 uint64_t offset;
725 __u32 size; 721 uint32_t size;
726 __u32 dummy2; 722 uint32_t dummy2;
727 __u64 dummy3; 723 uint64_t dummy3;
728 __u64 dummy4; 724 uint64_t dummy4;
729}; 725};
730 726
731#endif /* _LINUX_FUSE_H */ 727#endif /* _LINUX_FUSE_H */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7e0962ed7f8a..4d3124b39277 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5331,7 +5331,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
5331 5331
5332static int perf_swevent_init(struct perf_event *event) 5332static int perf_swevent_init(struct perf_event *event)
5333{ 5333{
5334 int event_id = event->attr.config; 5334 u64 event_id = event->attr.config;
5335 5335
5336 if (event->attr.type != PERF_TYPE_SOFTWARE) 5336 if (event->attr.type != PERF_TYPE_SOFTWARE)
5337 return -ENOENT; 5337 return -ENOENT;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index cc47812d3feb..14be27feda49 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -63,6 +63,7 @@
63DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = 63DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
64{ 64{
65 65
66 .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
66 .clock_base = 67 .clock_base =
67 { 68 {
68 { 69 {
@@ -1642,8 +1643,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
1642 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); 1643 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1643 int i; 1644 int i;
1644 1645
1645 raw_spin_lock_init(&cpu_base->lock);
1646
1647 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1646 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1648 cpu_base->clock_base[i].cpu_base = cpu_base; 1647 cpu_base->clock_base[i].cpu_base = cpu_base;
1649 timerqueue_init_head(&cpu_base->clock_base[i].active); 1648 timerqueue_init_head(&cpu_base->clock_base[i].active);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index bddd3d7a74b6..ffd4e111fd67 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -55,7 +55,7 @@ struct resource crashk_res = {
55 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 55 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
56}; 56};
57struct resource crashk_low_res = { 57struct resource crashk_low_res = {
58 .name = "Crash kernel low", 58 .name = "Crash kernel",
59 .start = 0, 59 .start = 0,
60 .end = 0, 60 .end = 0,
61 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 61 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
@@ -1368,35 +1368,114 @@ static int __init parse_crashkernel_simple(char *cmdline,
1368 return 0; 1368 return 0;
1369} 1369}
1370 1370
1371#define SUFFIX_HIGH 0
1372#define SUFFIX_LOW 1
1373#define SUFFIX_NULL 2
1374static __initdata char *suffix_tbl[] = {
1375 [SUFFIX_HIGH] = ",high",
1376 [SUFFIX_LOW] = ",low",
1377 [SUFFIX_NULL] = NULL,
1378};
1379
1371/* 1380/*
1372 * That function is the entry point for command line parsing and should be 1381 * That function parses "suffix" crashkernel command lines like
1373 * called from the arch-specific code. 1382 *
1383 * crashkernel=size,[high|low]
1384 *
1385 * It returns 0 on success and -EINVAL on failure.
1374 */ 1386 */
1387static int __init parse_crashkernel_suffix(char *cmdline,
1388 unsigned long long *crash_size,
1389 unsigned long long *crash_base,
1390 const char *suffix)
1391{
1392 char *cur = cmdline;
1393
1394 *crash_size = memparse(cmdline, &cur);
1395 if (cmdline == cur) {
1396 pr_warn("crashkernel: memory value expected\n");
1397 return -EINVAL;
1398 }
1399
1400 /* check with suffix */
1401 if (strncmp(cur, suffix, strlen(suffix))) {
1402 pr_warn("crashkernel: unrecognized char\n");
1403 return -EINVAL;
1404 }
1405 cur += strlen(suffix);
1406 if (*cur != ' ' && *cur != '\0') {
1407 pr_warn("crashkernel: unrecognized char\n");
1408 return -EINVAL;
1409 }
1410
1411 return 0;
1412}
1413
1414static __init char *get_last_crashkernel(char *cmdline,
1415 const char *name,
1416 const char *suffix)
1417{
1418 char *p = cmdline, *ck_cmdline = NULL;
1419
1420 /* find crashkernel and use the last one if there are more */
1421 p = strstr(p, name);
1422 while (p) {
1423 char *end_p = strchr(p, ' ');
1424 char *q;
1425
1426 if (!end_p)
1427 end_p = p + strlen(p);
1428
1429 if (!suffix) {
1430 int i;
1431
1432 /* skip the one with any known suffix */
1433 for (i = 0; suffix_tbl[i]; i++) {
1434 q = end_p - strlen(suffix_tbl[i]);
1435 if (!strncmp(q, suffix_tbl[i],
1436 strlen(suffix_tbl[i])))
1437 goto next;
1438 }
1439 ck_cmdline = p;
1440 } else {
1441 q = end_p - strlen(suffix);
1442 if (!strncmp(q, suffix, strlen(suffix)))
1443 ck_cmdline = p;
1444 }
1445next:
1446 p = strstr(p+1, name);
1447 }
1448
1449 if (!ck_cmdline)
1450 return NULL;
1451
1452 return ck_cmdline;
1453}
1454
1375static int __init __parse_crashkernel(char *cmdline, 1455static int __init __parse_crashkernel(char *cmdline,
1376 unsigned long long system_ram, 1456 unsigned long long system_ram,
1377 unsigned long long *crash_size, 1457 unsigned long long *crash_size,
1378 unsigned long long *crash_base, 1458 unsigned long long *crash_base,
1379 const char *name) 1459 const char *name,
1460 const char *suffix)
1380{ 1461{
1381 char *p = cmdline, *ck_cmdline = NULL;
1382 char *first_colon, *first_space; 1462 char *first_colon, *first_space;
1463 char *ck_cmdline;
1383 1464
1384 BUG_ON(!crash_size || !crash_base); 1465 BUG_ON(!crash_size || !crash_base);
1385 *crash_size = 0; 1466 *crash_size = 0;
1386 *crash_base = 0; 1467 *crash_base = 0;
1387 1468
1388 /* find crashkernel and use the last one if there are more */ 1469 ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
1389 p = strstr(p, name);
1390 while (p) {
1391 ck_cmdline = p;
1392 p = strstr(p+1, name);
1393 }
1394 1470
1395 if (!ck_cmdline) 1471 if (!ck_cmdline)
1396 return -EINVAL; 1472 return -EINVAL;
1397 1473
1398 ck_cmdline += strlen(name); 1474 ck_cmdline += strlen(name);
1399 1475
1476 if (suffix)
1477 return parse_crashkernel_suffix(ck_cmdline, crash_size,
1478 crash_base, suffix);
1400 /* 1479 /*
1401 * if the commandline contains a ':', then that's the extended 1480 * if the commandline contains a ':', then that's the extended
1402 * syntax -- if not, it must be the classic syntax 1481 * syntax -- if not, it must be the classic syntax
@@ -1413,13 +1492,26 @@ static int __init __parse_crashkernel(char *cmdline,
1413 return 0; 1492 return 0;
1414} 1493}
1415 1494
1495/*
1496 * That function is the entry point for command line parsing and should be
1497 * called from the arch-specific code.
1498 */
1416int __init parse_crashkernel(char *cmdline, 1499int __init parse_crashkernel(char *cmdline,
1417 unsigned long long system_ram, 1500 unsigned long long system_ram,
1418 unsigned long long *crash_size, 1501 unsigned long long *crash_size,
1419 unsigned long long *crash_base) 1502 unsigned long long *crash_base)
1420{ 1503{
1421 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, 1504 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1422 "crashkernel="); 1505 "crashkernel=", NULL);
1506}
1507
1508int __init parse_crashkernel_high(char *cmdline,
1509 unsigned long long system_ram,
1510 unsigned long long *crash_size,
1511 unsigned long long *crash_base)
1512{
1513 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1514 "crashkernel=", suffix_tbl[SUFFIX_HIGH]);
1423} 1515}
1424 1516
1425int __init parse_crashkernel_low(char *cmdline, 1517int __init parse_crashkernel_low(char *cmdline,
@@ -1428,7 +1520,7 @@ int __init parse_crashkernel_low(char *cmdline,
1428 unsigned long long *crash_base) 1520 unsigned long long *crash_base)
1429{ 1521{
1430 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, 1522 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1431 "crashkernel_low="); 1523 "crashkernel=", suffix_tbl[SUFFIX_LOW]);
1432} 1524}
1433 1525
1434static void update_vmcoreinfo_note(void) 1526static void update_vmcoreinfo_note(void)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index e35be53f6613..3fed7f0cbcdf 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -794,16 +794,16 @@ out:
794} 794}
795 795
796#ifdef CONFIG_SYSCTL 796#ifdef CONFIG_SYSCTL
797/* This should be called with kprobe_mutex locked */
798static void __kprobes optimize_all_kprobes(void) 797static void __kprobes optimize_all_kprobes(void)
799{ 798{
800 struct hlist_head *head; 799 struct hlist_head *head;
801 struct kprobe *p; 800 struct kprobe *p;
802 unsigned int i; 801 unsigned int i;
803 802
803 mutex_lock(&kprobe_mutex);
804 /* If optimization is already allowed, just return */ 804 /* If optimization is already allowed, just return */
805 if (kprobes_allow_optimization) 805 if (kprobes_allow_optimization)
806 return; 806 goto out;
807 807
808 kprobes_allow_optimization = true; 808 kprobes_allow_optimization = true;
809 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 809 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
@@ -813,18 +813,22 @@ static void __kprobes optimize_all_kprobes(void)
813 optimize_kprobe(p); 813 optimize_kprobe(p);
814 } 814 }
815 printk(KERN_INFO "Kprobes globally optimized\n"); 815 printk(KERN_INFO "Kprobes globally optimized\n");
816out:
817 mutex_unlock(&kprobe_mutex);
816} 818}
817 819
818/* This should be called with kprobe_mutex locked */
819static void __kprobes unoptimize_all_kprobes(void) 820static void __kprobes unoptimize_all_kprobes(void)
820{ 821{
821 struct hlist_head *head; 822 struct hlist_head *head;
822 struct kprobe *p; 823 struct kprobe *p;
823 unsigned int i; 824 unsigned int i;
824 825
826 mutex_lock(&kprobe_mutex);
825 /* If optimization is already prohibited, just return */ 827 /* If optimization is already prohibited, just return */
826 if (!kprobes_allow_optimization) 828 if (!kprobes_allow_optimization) {
829 mutex_unlock(&kprobe_mutex);
827 return; 830 return;
831 }
828 832
829 kprobes_allow_optimization = false; 833 kprobes_allow_optimization = false;
830 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 834 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
@@ -834,11 +838,14 @@ static void __kprobes unoptimize_all_kprobes(void)
834 unoptimize_kprobe(p, false); 838 unoptimize_kprobe(p, false);
835 } 839 }
836 } 840 }
841 mutex_unlock(&kprobe_mutex);
842
837 /* Wait for unoptimizing completion */ 843 /* Wait for unoptimizing completion */
838 wait_for_kprobe_optimizer(); 844 wait_for_kprobe_optimizer();
839 printk(KERN_INFO "Kprobes globally unoptimized\n"); 845 printk(KERN_INFO "Kprobes globally unoptimized\n");
840} 846}
841 847
848static DEFINE_MUTEX(kprobe_sysctl_mutex);
842int sysctl_kprobes_optimization; 849int sysctl_kprobes_optimization;
843int proc_kprobes_optimization_handler(struct ctl_table *table, int write, 850int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
844 void __user *buffer, size_t *length, 851 void __user *buffer, size_t *length,
@@ -846,7 +853,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
846{ 853{
847 int ret; 854 int ret;
848 855
849 mutex_lock(&kprobe_mutex); 856 mutex_lock(&kprobe_sysctl_mutex);
850 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; 857 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
851 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 858 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
852 859
@@ -854,7 +861,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
854 optimize_all_kprobes(); 861 optimize_all_kprobes();
855 else 862 else
856 unoptimize_all_kprobes(); 863 unoptimize_all_kprobes();
857 mutex_unlock(&kprobe_mutex); 864 mutex_unlock(&kprobe_sysctl_mutex);
858 865
859 return ret; 866 return ret;
860} 867}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 691dc2ef9baf..9eb7fed0bbaa 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task)
124 124
125static void __kthread_parkme(struct kthread *self) 125static void __kthread_parkme(struct kthread *self)
126{ 126{
127 __set_current_state(TASK_INTERRUPTIBLE); 127 __set_current_state(TASK_PARKED);
128 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { 128 while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
129 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) 129 if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
130 complete(&self->parked); 130 complete(&self->parked);
131 schedule(); 131 schedule();
132 __set_current_state(TASK_INTERRUPTIBLE); 132 __set_current_state(TASK_PARKED);
133 } 133 }
134 clear_bit(KTHREAD_IS_PARKED, &self->flags); 134 clear_bit(KTHREAD_IS_PARKED, &self->flags);
135 __set_current_state(TASK_RUNNING); 135 __set_current_state(TASK_RUNNING);
@@ -256,8 +256,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
256} 256}
257EXPORT_SYMBOL(kthread_create_on_node); 257EXPORT_SYMBOL(kthread_create_on_node);
258 258
259static void __kthread_bind(struct task_struct *p, unsigned int cpu) 259static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
260{ 260{
261 /* Must have done schedule() in kthread() before we set_task_cpu */
262 if (!wait_task_inactive(p, state)) {
263 WARN_ON(1);
264 return;
265 }
261 /* It's safe because the task is inactive. */ 266 /* It's safe because the task is inactive. */
262 do_set_cpus_allowed(p, cpumask_of(cpu)); 267 do_set_cpus_allowed(p, cpumask_of(cpu));
263 p->flags |= PF_THREAD_BOUND; 268 p->flags |= PF_THREAD_BOUND;
@@ -274,12 +279,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu)
274 */ 279 */
275void kthread_bind(struct task_struct *p, unsigned int cpu) 280void kthread_bind(struct task_struct *p, unsigned int cpu)
276{ 281{
277 /* Must have done schedule() in kthread() before we set_task_cpu */ 282 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
278 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
279 WARN_ON(1);
280 return;
281 }
282 __kthread_bind(p, cpu);
283} 283}
284EXPORT_SYMBOL(kthread_bind); 284EXPORT_SYMBOL(kthread_bind);
285 285
@@ -324,6 +324,22 @@ static struct kthread *task_get_live_kthread(struct task_struct *k)
324 return NULL; 324 return NULL;
325} 325}
326 326
327static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
328{
329 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
330 /*
331 * We clear the IS_PARKED bit here as we don't wait
332 * until the task has left the park code. So if we'd
333 * park before that happens we'd see the IS_PARKED bit
334 * which might be about to be cleared.
335 */
336 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
337 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
338 __kthread_bind(k, kthread->cpu, TASK_PARKED);
339 wake_up_state(k, TASK_PARKED);
340 }
341}
342
327/** 343/**
328 * kthread_unpark - unpark a thread created by kthread_create(). 344 * kthread_unpark - unpark a thread created by kthread_create().
329 * @k: thread created by kthread_create(). 345 * @k: thread created by kthread_create().
@@ -336,20 +352,8 @@ void kthread_unpark(struct task_struct *k)
336{ 352{
337 struct kthread *kthread = task_get_live_kthread(k); 353 struct kthread *kthread = task_get_live_kthread(k);
338 354
339 if (kthread) { 355 if (kthread)
340 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 356 __kthread_unpark(k, kthread);
341 /*
342 * We clear the IS_PARKED bit here as we don't wait
343 * until the task has left the park code. So if we'd
344 * park before that happens we'd see the IS_PARKED bit
345 * which might be about to be cleared.
346 */
347 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
348 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
349 __kthread_bind(k, kthread->cpu);
350 wake_up_process(k);
351 }
352 }
353 put_task_struct(k); 357 put_task_struct(k);
354} 358}
355 359
@@ -407,7 +411,7 @@ int kthread_stop(struct task_struct *k)
407 trace_sched_kthread_stop(k); 411 trace_sched_kthread_stop(k);
408 if (kthread) { 412 if (kthread) {
409 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); 413 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
410 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 414 __kthread_unpark(k, kthread);
411 wake_up_process(k); 415 wake_up_process(k);
412 wait_for_completion(&kthread->exited); 416 wait_for_completion(&kthread->exited);
413 } 417 }
diff --git a/kernel/signal.c b/kernel/signal.c
index dd72567767d9..598dc06be421 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2948,7 +2948,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2948 2948
2949static int do_tkill(pid_t tgid, pid_t pid, int sig) 2949static int do_tkill(pid_t tgid, pid_t pid, int sig)
2950{ 2950{
2951 struct siginfo info; 2951 struct siginfo info = {};
2952 2952
2953 info.si_signo = sig; 2953 info.si_signo = sig;
2954 info.si_errno = 0; 2954 info.si_errno = 0;
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 8eaed9aa9cf0..02fc5c933673 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -185,8 +185,18 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
185 } 185 }
186 get_task_struct(tsk); 186 get_task_struct(tsk);
187 *per_cpu_ptr(ht->store, cpu) = tsk; 187 *per_cpu_ptr(ht->store, cpu) = tsk;
188 if (ht->create) 188 if (ht->create) {
189 ht->create(cpu); 189 /*
190 * Make sure that the task has actually scheduled out
191 * into park position, before calling the create
192 * callback. At least the migration thread callback
193 * requires that the task is off the runqueue.
194 */
195 if (!wait_task_inactive(tsk, TASK_PARKED))
196 WARN_ON(1);
197 else
198 ht->create(cpu);
199 }
190 return 0; 200 return 0;
191} 201}
192 202
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 9e5b8c272eec..5a0f781cd729 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -739,12 +739,6 @@ static void blk_add_trace_rq_complete(void *ignore,
739 struct request_queue *q, 739 struct request_queue *q,
740 struct request *rq) 740 struct request *rq)
741{ 741{
742 struct blk_trace *bt = q->blk_trace;
743
744 /* if control ever passes through here, it's a request based driver */
745 if (unlikely(bt && !bt->rq_based))
746 bt->rq_based = true;
747
748 blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); 742 blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
749} 743}
750 744
@@ -780,24 +774,10 @@ static void blk_add_trace_bio_bounce(void *ignore,
780 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); 774 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
781} 775}
782 776
783static void blk_add_trace_bio_complete(void *ignore, struct bio *bio, int error) 777static void blk_add_trace_bio_complete(void *ignore,
778 struct request_queue *q, struct bio *bio,
779 int error)
784{ 780{
785 struct request_queue *q;
786 struct blk_trace *bt;
787
788 if (!bio->bi_bdev)
789 return;
790
791 q = bdev_get_queue(bio->bi_bdev);
792 bt = q->blk_trace;
793
794 /*
795 * Request based drivers will generate both rq and bio completions.
796 * Ignore bio ones.
797 */
798 if (likely(!bt) || bt->rq_based)
799 return;
800
801 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); 781 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
802} 782}
803 783
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index a54f26f82eb2..e134d8f365dd 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -25,7 +25,8 @@
25 25
26static struct kmem_cache *user_ns_cachep __read_mostly; 26static struct kmem_cache *user_ns_cachep __read_mostly;
27 27
28static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, 28static bool new_idmap_permitted(const struct file *file,
29 struct user_namespace *ns, int cap_setid,
29 struct uid_gid_map *map); 30 struct uid_gid_map *map);
30 31
31static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns) 32static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
@@ -612,10 +613,10 @@ static ssize_t map_write(struct file *file, const char __user *buf,
612 if (map->nr_extents != 0) 613 if (map->nr_extents != 0)
613 goto out; 614 goto out;
614 615
615 /* Require the appropriate privilege CAP_SETUID or CAP_SETGID 616 /*
616 * over the user namespace in order to set the id mapping. 617 * Adjusting namespace settings requires capabilities on the target.
617 */ 618 */
618 if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid)) 619 if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
619 goto out; 620 goto out;
620 621
621 /* Get a buffer */ 622 /* Get a buffer */
@@ -700,7 +701,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
700 701
701 ret = -EPERM; 702 ret = -EPERM;
702 /* Validate the user is allowed to use user id's mapped to. */ 703 /* Validate the user is allowed to use user id's mapped to. */
703 if (!new_idmap_permitted(ns, cap_setid, &new_map)) 704 if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
704 goto out; 705 goto out;
705 706
706 /* Map the lower ids from the parent user namespace to the 707 /* Map the lower ids from the parent user namespace to the
@@ -787,7 +788,8 @@ ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t
787 &ns->projid_map, &ns->parent->projid_map); 788 &ns->projid_map, &ns->parent->projid_map);
788} 789}
789 790
790static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, 791static bool new_idmap_permitted(const struct file *file,
792 struct user_namespace *ns, int cap_setid,
791 struct uid_gid_map *new_map) 793 struct uid_gid_map *new_map)
792{ 794{
793 /* Allow mapping to your own filesystem ids */ 795 /* Allow mapping to your own filesystem ids */
@@ -795,12 +797,12 @@ static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
795 u32 id = new_map->extent[0].lower_first; 797 u32 id = new_map->extent[0].lower_first;
796 if (cap_setid == CAP_SETUID) { 798 if (cap_setid == CAP_SETUID) {
797 kuid_t uid = make_kuid(ns->parent, id); 799 kuid_t uid = make_kuid(ns->parent, id);
798 if (uid_eq(uid, current_fsuid())) 800 if (uid_eq(uid, file->f_cred->fsuid))
799 return true; 801 return true;
800 } 802 }
801 else if (cap_setid == CAP_SETGID) { 803 else if (cap_setid == CAP_SETGID) {
802 kgid_t gid = make_kgid(ns->parent, id); 804 kgid_t gid = make_kgid(ns->parent, id);
803 if (gid_eq(gid, current_fsgid())) 805 if (gid_eq(gid, file->f_cred->fsgid))
804 return true; 806 return true;
805 } 807 }
806 } 808 }
@@ -811,8 +813,10 @@ static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid,
811 813
812 /* Allow the specified ids if we have the appropriate capability 814 /* Allow the specified ids if we have the appropriate capability
813 * (CAP_SETUID or CAP_SETGID) over the parent user namespace. 815 * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
816 * And the opener of the id file also had the approprpiate capability.
814 */ 817 */
815 if (ns_capable(ns->parent, cap_setid)) 818 if (ns_capable(ns->parent, cap_setid) &&
819 file_ns_capable(file, ns->parent, cap_setid))
816 return true; 820 return true;
817 821
818 return false; 822 return false;
diff --git a/lib/Kconfig b/lib/Kconfig
index 3958dc4389f9..fe01d418b09a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -404,4 +404,7 @@ config OID_REGISTRY
404 help 404 help
405 Enable fast lookup object identifier registry. 405 Enable fast lookup object identifier registry.
406 406
407config UCS2_STRING
408 tristate
409
407endmenu 410endmenu
diff --git a/lib/Makefile b/lib/Makefile
index d7946ff75b2e..6e2cc561f761 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -174,3 +174,5 @@ quiet_cmd_build_OID_registry = GEN $@
174 cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@ 174 cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@
175 175
176clean-files += oid_registry_data.c 176clean-files += oid_registry_data.c
177
178obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index bfe02b8fc55b..d23762e6652c 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -105,9 +105,9 @@ setup_io_tlb_npages(char *str)
105 if (!strcmp(str, "force")) 105 if (!strcmp(str, "force"))
106 swiotlb_force = 1; 106 swiotlb_force = 1;
107 107
108 return 1; 108 return 0;
109} 109}
110__setup("swiotlb=", setup_io_tlb_npages); 110early_param("swiotlb", setup_io_tlb_npages);
111/* make io_tlb_overflow tunable too? */ 111/* make io_tlb_overflow tunable too? */
112 112
113unsigned long swiotlb_nr_tbl(void) 113unsigned long swiotlb_nr_tbl(void)
@@ -115,6 +115,18 @@ unsigned long swiotlb_nr_tbl(void)
115 return io_tlb_nslabs; 115 return io_tlb_nslabs;
116} 116}
117EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); 117EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
118
119/* default to 64MB */
120#define IO_TLB_DEFAULT_SIZE (64UL<<20)
121unsigned long swiotlb_size_or_default(void)
122{
123 unsigned long size;
124
125 size = io_tlb_nslabs << IO_TLB_SHIFT;
126
127 return size ? size : (IO_TLB_DEFAULT_SIZE);
128}
129
118/* Note that this doesn't work with highmem page */ 130/* Note that this doesn't work with highmem page */
119static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 131static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
120 volatile void *address) 132 volatile void *address)
@@ -188,8 +200,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
188void __init 200void __init
189swiotlb_init(int verbose) 201swiotlb_init(int verbose)
190{ 202{
191 /* default to 64MB */ 203 size_t default_size = IO_TLB_DEFAULT_SIZE;
192 size_t default_size = 64UL<<20;
193 unsigned char *vstart; 204 unsigned char *vstart;
194 unsigned long bytes; 205 unsigned long bytes;
195 206
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
new file mode 100644
index 000000000000..6f500ef2301d
--- /dev/null
+++ b/lib/ucs2_string.c
@@ -0,0 +1,51 @@
1#include <linux/ucs2_string.h>
2#include <linux/module.h>
3
4/* Return the number of unicode characters in data */
5unsigned long
6ucs2_strnlen(const ucs2_char_t *s, size_t maxlength)
7{
8 unsigned long length = 0;
9
10 while (*s++ != 0 && length < maxlength)
11 length++;
12 return length;
13}
14EXPORT_SYMBOL(ucs2_strnlen);
15
16unsigned long
17ucs2_strlen(const ucs2_char_t *s)
18{
19 return ucs2_strnlen(s, ~0UL);
20}
21EXPORT_SYMBOL(ucs2_strlen);
22
23/*
24 * Return the number of bytes is the length of this string
25 * Note: this is NOT the same as the number of unicode characters
26 */
27unsigned long
28ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength)
29{
30 return ucs2_strnlen(data, maxlength/sizeof(ucs2_char_t)) * sizeof(ucs2_char_t);
31}
32EXPORT_SYMBOL(ucs2_strsize);
33
34int
35ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
36{
37 while (1) {
38 if (len == 0)
39 return 0;
40 if (*a < *b)
41 return -1;
42 if (*a > *b)
43 return 1;
44 if (*a == 0) /* implies *b == 0 */
45 return 0;
46 a++;
47 b++;
48 len--;
49 }
50}
51EXPORT_SYMBOL(ucs2_strncmp);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ca9a7c6d7e97..1a12f5b9a0ab 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2961,7 +2961,17 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2961 break; 2961 break;
2962 } 2962 }
2963 2963
2964 if (absent || 2964 /*
2965 * We need call hugetlb_fault for both hugepages under migration
2966 * (in which case hugetlb_fault waits for the migration,) and
2967 * hwpoisoned hugepages (in which case we need to prevent the
2968 * caller from accessing to them.) In order to do this, we use
2969 * here is_swap_pte instead of is_hugetlb_entry_migration and
2970 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
2971 * both cases, and because we can't follow correct pages
2972 * directly from any kind of swap entries.
2973 */
2974 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
2965 ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { 2975 ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2966 int ret; 2976 int ret;
2967 2977
diff --git a/mm/memory.c b/mm/memory.c
index 13cbc420fead..ba94dec5b259 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2393,6 +2393,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2393} 2393}
2394EXPORT_SYMBOL(remap_pfn_range); 2394EXPORT_SYMBOL(remap_pfn_range);
2395 2395
2396/**
2397 * vm_iomap_memory - remap memory to userspace
2398 * @vma: user vma to map to
2399 * @start: start of area
2400 * @len: size of area
2401 *
2402 * This is a simplified io_remap_pfn_range() for common driver use. The
2403 * driver just needs to give us the physical memory range to be mapped,
2404 * we'll figure out the rest from the vma information.
2405 *
2406 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2407 * whatever write-combining details or similar.
2408 */
2409int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2410{
2411 unsigned long vm_len, pfn, pages;
2412
2413 /* Check that the physical memory area passed in looks valid */
2414 if (start + len < start)
2415 return -EINVAL;
2416 /*
2417 * You *really* shouldn't map things that aren't page-aligned,
2418 * but we've historically allowed it because IO memory might
2419 * just have smaller alignment.
2420 */
2421 len += start & ~PAGE_MASK;
2422 pfn = start >> PAGE_SHIFT;
2423 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2424 if (pfn + pages < pfn)
2425 return -EINVAL;
2426
2427 /* We start the mapping 'vm_pgoff' pages into the area */
2428 if (vma->vm_pgoff > pages)
2429 return -EINVAL;
2430 pfn += vma->vm_pgoff;
2431 pages -= vma->vm_pgoff;
2432
2433 /* Can we fit all of the mapping? */
2434 vm_len = vma->vm_end - vma->vm_start;
2435 if (vm_len >> PAGE_SHIFT > pages)
2436 return -EINVAL;
2437
2438 /* Ok, let it rip */
2439 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2440}
2441EXPORT_SYMBOL(vm_iomap_memory);
2442
2396static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, 2443static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2397 unsigned long addr, unsigned long end, 2444 unsigned long addr, unsigned long end,
2398 pte_fn_t fn, void *data) 2445 pte_fn_t fn, void *data)
diff --git a/mm/mmap.c b/mm/mmap.c
index 0db0de1c2fbe..033094ba62dc 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2305,7 +2305,7 @@ static void unmap_region(struct mm_struct *mm,
2305 update_hiwater_rss(mm); 2305 update_hiwater_rss(mm);
2306 unmap_vmas(&tlb, vma, start, end); 2306 unmap_vmas(&tlb, vma, start, end);
2307 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 2307 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2308 next ? next->vm_start : 0); 2308 next ? next->vm_start : USER_PGTABLES_CEILING);
2309 tlb_finish_mmu(&tlb, start, end); 2309 tlb_finish_mmu(&tlb, start, end);
2310} 2310}
2311 2311
@@ -2685,7 +2685,7 @@ void exit_mmap(struct mm_struct *mm)
2685 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2685 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2686 unmap_vmas(&tlb, vma, 0, -1); 2686 unmap_vmas(&tlb, vma, 0, -1);
2687 2687
2688 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); 2688 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
2689 tlb_finish_mmu(&tlb, 0, -1); 2689 tlb_finish_mmu(&tlb, 0, -1);
2690 2690
2691 /* 2691 /*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 88c5fed8b9a4..669fba39be1a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3188,9 +3188,9 @@ int kswapd_run(int nid)
3188 if (IS_ERR(pgdat->kswapd)) { 3188 if (IS_ERR(pgdat->kswapd)) {
3189 /* failure at boot is fatal */ 3189 /* failure at boot is fatal */
3190 BUG_ON(system_state == SYSTEM_BOOTING); 3190 BUG_ON(system_state == SYSTEM_BOOTING);
3191 pgdat->kswapd = NULL;
3192 pr_err("Failed to start kswapd on node %d\n", nid); 3191 pr_err("Failed to start kswapd on node %d\n", nid);
3193 ret = PTR_ERR(pgdat->kswapd); 3192 ret = PTR_ERR(pgdat->kswapd);
3193 pgdat->kswapd = NULL;
3194 } 3194 }
3195 return ret; 3195 return ret;
3196} 3196}
diff --git a/net/802/mrp.c b/net/802/mrp.c
index a4cc3229952a..e085bcc754f6 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -870,8 +870,12 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
870 * all pending messages before the applicant is gone. 870 * all pending messages before the applicant is gone.
871 */ 871 */
872 del_timer_sync(&app->join_timer); 872 del_timer_sync(&app->join_timer);
873
874 spin_lock(&app->lock);
873 mrp_mad_event(app, MRP_EVENT_TX); 875 mrp_mad_event(app, MRP_EVENT_TX);
874 mrp_pdu_queue(app); 876 mrp_pdu_queue(app);
877 spin_unlock(&app->lock);
878
875 mrp_queue_xmit(app); 879 mrp_queue_xmit(app);
876 880
877 dev_mc_del(dev, appl->group_address); 881 dev_mc_del(dev, appl->group_address);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 0488d70c8c35..fa563e497c48 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -169,7 +169,7 @@ void batadv_mesh_free(struct net_device *soft_iface)
169 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); 169 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
170} 170}
171 171
172int batadv_is_my_mac(const uint8_t *addr) 172int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
173{ 173{
174 const struct batadv_hard_iface *hard_iface; 174 const struct batadv_hard_iface *hard_iface;
175 175
@@ -178,6 +178,9 @@ int batadv_is_my_mac(const uint8_t *addr)
178 if (hard_iface->if_status != BATADV_IF_ACTIVE) 178 if (hard_iface->if_status != BATADV_IF_ACTIVE)
179 continue; 179 continue;
180 180
181 if (hard_iface->soft_iface != bat_priv->soft_iface)
182 continue;
183
181 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) { 184 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
182 rcu_read_unlock(); 185 rcu_read_unlock();
183 return 1; 186 return 1;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index ced08b936a96..d40910dfc8ea 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -162,7 +162,7 @@ extern struct workqueue_struct *batadv_event_workqueue;
162 162
163int batadv_mesh_init(struct net_device *soft_iface); 163int batadv_mesh_init(struct net_device *soft_iface);
164void batadv_mesh_free(struct net_device *soft_iface); 164void batadv_mesh_free(struct net_device *soft_iface);
165int batadv_is_my_mac(const uint8_t *addr); 165int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
166struct batadv_hard_iface * 166struct batadv_hard_iface *
167batadv_seq_print_text_primary_if_get(struct seq_file *seq); 167batadv_seq_print_text_primary_if_get(struct seq_file *seq);
168int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 168int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 5ee21cebbbb0..319f2906c71a 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -402,7 +402,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
402 goto out; 402 goto out;
403 403
404 /* not for me */ 404 /* not for me */
405 if (!batadv_is_my_mac(ethhdr->h_dest)) 405 if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
406 goto out; 406 goto out;
407 407
408 icmp_packet = (struct batadv_icmp_packet_rr *)skb->data; 408 icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
@@ -416,7 +416,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
416 } 416 }
417 417
418 /* packet for me */ 418 /* packet for me */
419 if (batadv_is_my_mac(icmp_packet->dst)) 419 if (batadv_is_my_mac(bat_priv, icmp_packet->dst))
420 return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size); 420 return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);
421 421
422 /* TTL exceeded */ 422 /* TTL exceeded */
@@ -548,7 +548,8 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
548 return router; 548 return router;
549} 549}
550 550
551static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size) 551static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
552 struct sk_buff *skb, int hdr_size)
552{ 553{
553 struct ethhdr *ethhdr; 554 struct ethhdr *ethhdr;
554 555
@@ -567,7 +568,7 @@ static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
567 return -1; 568 return -1;
568 569
569 /* not for me */ 570 /* not for me */
570 if (!batadv_is_my_mac(ethhdr->h_dest)) 571 if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
571 return -1; 572 return -1;
572 573
573 return 0; 574 return 0;
@@ -582,7 +583,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
582 char tt_flag; 583 char tt_flag;
583 size_t packet_size; 584 size_t packet_size;
584 585
585 if (batadv_check_unicast_packet(skb, hdr_size) < 0) 586 if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
586 return NET_RX_DROP; 587 return NET_RX_DROP;
587 588
588 /* I could need to modify it */ 589 /* I could need to modify it */
@@ -614,7 +615,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
614 case BATADV_TT_RESPONSE: 615 case BATADV_TT_RESPONSE:
615 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX); 616 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
616 617
617 if (batadv_is_my_mac(tt_query->dst)) { 618 if (batadv_is_my_mac(bat_priv, tt_query->dst)) {
618 /* packet needs to be linearized to access the TT 619 /* packet needs to be linearized to access the TT
619 * changes 620 * changes
620 */ 621 */
@@ -657,14 +658,15 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
657 struct batadv_roam_adv_packet *roam_adv_packet; 658 struct batadv_roam_adv_packet *roam_adv_packet;
658 struct batadv_orig_node *orig_node; 659 struct batadv_orig_node *orig_node;
659 660
660 if (batadv_check_unicast_packet(skb, sizeof(*roam_adv_packet)) < 0) 661 if (batadv_check_unicast_packet(bat_priv, skb,
662 sizeof(*roam_adv_packet)) < 0)
661 goto out; 663 goto out;
662 664
663 batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX); 665 batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
664 666
665 roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data; 667 roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data;
666 668
667 if (!batadv_is_my_mac(roam_adv_packet->dst)) 669 if (!batadv_is_my_mac(bat_priv, roam_adv_packet->dst))
668 return batadv_route_unicast_packet(skb, recv_if); 670 return batadv_route_unicast_packet(skb, recv_if);
669 671
670 /* check if it is a backbone gateway. we don't accept 672 /* check if it is a backbone gateway. we don't accept
@@ -967,7 +969,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
967 * last time) the packet had an updated information or not 969 * last time) the packet had an updated information or not
968 */ 970 */
969 curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); 971 curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
970 if (!batadv_is_my_mac(unicast_packet->dest)) { 972 if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
971 orig_node = batadv_orig_hash_find(bat_priv, 973 orig_node = batadv_orig_hash_find(bat_priv,
972 unicast_packet->dest); 974 unicast_packet->dest);
973 /* if it is not possible to find the orig_node representing the 975 /* if it is not possible to find the orig_node representing the
@@ -1044,14 +1046,14 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
1044 if (is4addr) 1046 if (is4addr)
1045 hdr_size = sizeof(*unicast_4addr_packet); 1047 hdr_size = sizeof(*unicast_4addr_packet);
1046 1048
1047 if (batadv_check_unicast_packet(skb, hdr_size) < 0) 1049 if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
1048 return NET_RX_DROP; 1050 return NET_RX_DROP;
1049 1051
1050 if (!batadv_check_unicast_ttvn(bat_priv, skb)) 1052 if (!batadv_check_unicast_ttvn(bat_priv, skb))
1051 return NET_RX_DROP; 1053 return NET_RX_DROP;
1052 1054
1053 /* packet for me */ 1055 /* packet for me */
1054 if (batadv_is_my_mac(unicast_packet->dest)) { 1056 if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
1055 if (is4addr) { 1057 if (is4addr) {
1056 batadv_dat_inc_counter(bat_priv, 1058 batadv_dat_inc_counter(bat_priv,
1057 unicast_4addr_packet->subtype); 1059 unicast_4addr_packet->subtype);
@@ -1088,7 +1090,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
1088 struct sk_buff *new_skb = NULL; 1090 struct sk_buff *new_skb = NULL;
1089 int ret; 1091 int ret;
1090 1092
1091 if (batadv_check_unicast_packet(skb, hdr_size) < 0) 1093 if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
1092 return NET_RX_DROP; 1094 return NET_RX_DROP;
1093 1095
1094 if (!batadv_check_unicast_ttvn(bat_priv, skb)) 1096 if (!batadv_check_unicast_ttvn(bat_priv, skb))
@@ -1097,7 +1099,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
1097 unicast_packet = (struct batadv_unicast_frag_packet *)skb->data; 1099 unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
1098 1100
1099 /* packet for me */ 1101 /* packet for me */
1100 if (batadv_is_my_mac(unicast_packet->dest)) { 1102 if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
1101 ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb); 1103 ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
1102 1104
1103 if (ret == NET_RX_DROP) 1105 if (ret == NET_RX_DROP)
@@ -1151,13 +1153,13 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
1151 goto out; 1153 goto out;
1152 1154
1153 /* ignore broadcasts sent by myself */ 1155 /* ignore broadcasts sent by myself */
1154 if (batadv_is_my_mac(ethhdr->h_source)) 1156 if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
1155 goto out; 1157 goto out;
1156 1158
1157 bcast_packet = (struct batadv_bcast_packet *)skb->data; 1159 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1158 1160
1159 /* ignore broadcasts originated by myself */ 1161 /* ignore broadcasts originated by myself */
1160 if (batadv_is_my_mac(bcast_packet->orig)) 1162 if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
1161 goto out; 1163 goto out;
1162 1164
1163 if (bcast_packet->header.ttl < 2) 1165 if (bcast_packet->header.ttl < 2)
@@ -1243,14 +1245,14 @@ int batadv_recv_vis_packet(struct sk_buff *skb,
1243 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1245 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1244 1246
1245 /* not for me */ 1247 /* not for me */
1246 if (!batadv_is_my_mac(ethhdr->h_dest)) 1248 if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
1247 return NET_RX_DROP; 1249 return NET_RX_DROP;
1248 1250
1249 /* ignore own packets */ 1251 /* ignore own packets */
1250 if (batadv_is_my_mac(vis_packet->vis_orig)) 1252 if (batadv_is_my_mac(bat_priv, vis_packet->vis_orig))
1251 return NET_RX_DROP; 1253 return NET_RX_DROP;
1252 1254
1253 if (batadv_is_my_mac(vis_packet->sender_orig)) 1255 if (batadv_is_my_mac(bat_priv, vis_packet->sender_orig))
1254 return NET_RX_DROP; 1256 return NET_RX_DROP;
1255 1257
1256 switch (vis_packet->vis_type) { 1258 switch (vis_packet->vis_type) {
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 98a66a021a60..7abee19567e9 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1953,7 +1953,7 @@ out:
1953bool batadv_send_tt_response(struct batadv_priv *bat_priv, 1953bool batadv_send_tt_response(struct batadv_priv *bat_priv,
1954 struct batadv_tt_query_packet *tt_request) 1954 struct batadv_tt_query_packet *tt_request)
1955{ 1955{
1956 if (batadv_is_my_mac(tt_request->dst)) { 1956 if (batadv_is_my_mac(bat_priv, tt_request->dst)) {
1957 /* don't answer backbone gws! */ 1957 /* don't answer backbone gws! */
1958 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src)) 1958 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
1959 return true; 1959 return true;
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index c053244b97bd..6a1e646be96d 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -477,7 +477,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
477 477
478 /* Are we the target for this VIS packet? */ 478 /* Are we the target for this VIS packet? */
479 if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && 479 if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC &&
480 batadv_is_my_mac(vis_packet->target_orig)) 480 batadv_is_my_mac(bat_priv, vis_packet->target_orig))
481 are_target = 1; 481 are_target = 1;
482 482
483 spin_lock_bh(&bat_priv->vis.hash_lock); 483 spin_lock_bh(&bat_priv->vis.hash_lock);
@@ -496,7 +496,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
496 batadv_send_list_add(bat_priv, info); 496 batadv_send_list_add(bat_priv, info);
497 497
498 /* ... we're not the recipient (and thus need to forward). */ 498 /* ... we're not the recipient (and thus need to forward). */
499 } else if (!batadv_is_my_mac(packet->target_orig)) { 499 } else if (!batadv_is_my_mac(bat_priv, packet->target_orig)) {
500 batadv_send_list_add(bat_priv, info); 500 batadv_send_list_add(bat_priv, info);
501 } 501 }
502 502
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index ef1b91431c6b..459dab22b3f6 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -67,7 +67,8 @@ void br_port_carrier_check(struct net_bridge_port *p)
67 struct net_device *dev = p->dev; 67 struct net_device *dev = p->dev;
68 struct net_bridge *br = p->br; 68 struct net_bridge *br = p->br;
69 69
70 if (netif_running(dev) && netif_oper_up(dev)) 70 if (!(p->flags & BR_ADMIN_COST) &&
71 netif_running(dev) && netif_oper_up(dev))
71 p->path_cost = port_cost(dev); 72 p->path_cost = port_cost(dev);
72 73
73 if (!netif_running(br->dev)) 74 if (!netif_running(br->dev))
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 3cbf5beb3d4b..d2c043a857b6 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -156,6 +156,7 @@ struct net_bridge_port
156#define BR_BPDU_GUARD 0x00000002 156#define BR_BPDU_GUARD 0x00000002
157#define BR_ROOT_BLOCK 0x00000004 157#define BR_ROOT_BLOCK 0x00000004
158#define BR_MULTICAST_FAST_LEAVE 0x00000008 158#define BR_MULTICAST_FAST_LEAVE 0x00000008
159#define BR_ADMIN_COST 0x00000010
159 160
160#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 161#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
161 u32 multicast_startup_queries_sent; 162 u32 multicast_startup_queries_sent;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 0bdb4ebd362b..d45e760141bb 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -288,6 +288,7 @@ int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost)
288 path_cost > BR_MAX_PATH_COST) 288 path_cost > BR_MAX_PATH_COST)
289 return -ERANGE; 289 return -ERANGE;
290 290
291 p->flags |= BR_ADMIN_COST;
291 p->path_cost = path_cost; 292 p->path_cost = path_cost;
292 br_configuration_update(p->br); 293 br_configuration_update(p->br);
293 br_port_state_selection(p->br); 294 br_port_state_selection(p->br);
diff --git a/net/core/dev.c b/net/core/dev.c
index e7d68ed8aafe..b24ab0e98eb4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2148,6 +2148,9 @@ static void skb_warn_bad_offload(const struct sk_buff *skb)
2148 struct net_device *dev = skb->dev; 2148 struct net_device *dev = skb->dev;
2149 const char *driver = ""; 2149 const char *driver = "";
2150 2150
2151 if (!net_ratelimit())
2152 return;
2153
2151 if (dev && dev->dev.parent) 2154 if (dev && dev->dev.parent)
2152 driver = dev_driver_string(dev->dev.parent); 2155 driver = dev_driver_string(dev->dev.parent);
2153 2156
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 3b4f0cd2e63e..4cfe34d4cc96 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -139,8 +139,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
139 139
140 /* skb is pure payload to encrypt */ 140 /* skb is pure payload to encrypt */
141 141
142 err = -ENOMEM;
143
144 esp = x->data; 142 esp = x->data;
145 aead = esp->aead; 143 aead = esp->aead;
146 alen = crypto_aead_authsize(aead); 144 alen = crypto_aead_authsize(aead);
@@ -176,8 +174,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
176 } 174 }
177 175
178 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); 176 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
179 if (!tmp) 177 if (!tmp) {
178 err = -ENOMEM;
180 goto error; 179 goto error;
180 }
181 181
182 seqhi = esp_tmp_seqhi(tmp); 182 seqhi = esp_tmp_seqhi(tmp);
183 iv = esp_tmp_iv(aead, tmp, seqhilen); 183 iv = esp_tmp_iv(aead, tmp, seqhilen);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index a6445b843ef4..52c273ea05c3 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -248,8 +248,7 @@ static void ip_expire(unsigned long arg)
248 if (!head->dev) 248 if (!head->dev)
249 goto out_rcu_unlock; 249 goto out_rcu_unlock;
250 250
251 /* skb dst is stale, drop it, and perform route lookup again */ 251 /* skb has no dst, perform route lookup again */
252 skb_dst_drop(head);
253 iph = ip_hdr(head); 252 iph = ip_hdr(head);
254 err = ip_route_input_noref(head, iph->daddr, iph->saddr, 253 err = ip_route_input_noref(head, iph->daddr, iph->saddr,
255 iph->tos, head->dev); 254 iph->tos, head->dev);
@@ -523,9 +522,16 @@ found:
523 qp->q.max_size = skb->len + ihl; 522 qp->q.max_size = skb->len + ihl;
524 523
525 if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 524 if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
526 qp->q.meat == qp->q.len) 525 qp->q.meat == qp->q.len) {
527 return ip_frag_reasm(qp, prev, dev); 526 unsigned long orefdst = skb->_skb_refdst;
528 527
528 skb->_skb_refdst = 0UL;
529 err = ip_frag_reasm(qp, prev, dev);
530 skb->_skb_refdst = orefdst;
531 return err;
532 }
533
534 skb_dst_drop(skb);
529 inet_frag_lru_move(&qp->q); 535 inet_frag_lru_move(&qp->q);
530 return -EINPROGRESS; 536 return -EINPROGRESS;
531 537
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index c30130062cd6..c49dcd0284a0 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -66,6 +66,12 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4,
66 return dev_match; 66 return dev_match;
67} 67}
68 68
69static bool rpfilter_is_local(const struct sk_buff *skb)
70{
71 const struct rtable *rt = skb_rtable(skb);
72 return rt && (rt->rt_flags & RTCF_LOCAL);
73}
74
69static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) 75static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
70{ 76{
71 const struct xt_rpfilter_info *info; 77 const struct xt_rpfilter_info *info;
@@ -76,7 +82,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
76 info = par->matchinfo; 82 info = par->matchinfo;
77 invert = info->flags & XT_RPFILTER_INVERT; 83 invert = info->flags & XT_RPFILTER_INVERT;
78 84
79 if (par->in->flags & IFF_LOOPBACK) 85 if (rpfilter_is_local(skb))
80 return true ^ invert; 86 return true ^ invert;
81 87
82 iph = ip_hdr(skb); 88 iph = ip_hdr(skb);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index ef54377fb11c..397e0f69435f 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -349,8 +349,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
349 * hasn't changed since we received the original syn, but I see 349 * hasn't changed since we received the original syn, but I see
350 * no easy way to do this. 350 * no easy way to do this.
351 */ 351 */
352 flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), 352 flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
353 RT_SCOPE_UNIVERSE, IPPROTO_TCP, 353 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
354 inet_sk_flowi_flags(sk), 354 inet_sk_flowi_flags(sk),
355 (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, 355 (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
356 ireq->loc_addr, th->source, th->dest); 356 ireq->loc_addr, th->source, th->dest);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3bd55bad230a..13b9c08fc158 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -113,6 +113,7 @@ int sysctl_tcp_early_retrans __read_mostly = 2;
113#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 113#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
114#define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ 114#define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */
115#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 115#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
116#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
116 117
117#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 118#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
118#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) 119#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
@@ -3564,6 +3565,27 @@ static void tcp_send_challenge_ack(struct sock *sk)
3564 } 3565 }
3565} 3566}
3566 3567
3568static void tcp_store_ts_recent(struct tcp_sock *tp)
3569{
3570 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
3571 tp->rx_opt.ts_recent_stamp = get_seconds();
3572}
3573
3574static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
3575{
3576 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
3577 /* PAWS bug workaround wrt. ACK frames, the PAWS discard
3578 * extra check below makes sure this can only happen
3579 * for pure ACK frames. -DaveM
3580 *
3581 * Not only, also it occurs for expired timestamps.
3582 */
3583
3584 if (tcp_paws_check(&tp->rx_opt, 0))
3585 tcp_store_ts_recent(tp);
3586 }
3587}
3588
3567/* This routine deals with incoming acks, but not outgoing ones. */ 3589/* This routine deals with incoming acks, but not outgoing ones. */
3568static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) 3590static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3569{ 3591{
@@ -3607,6 +3629,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3607 prior_fackets = tp->fackets_out; 3629 prior_fackets = tp->fackets_out;
3608 prior_in_flight = tcp_packets_in_flight(tp); 3630 prior_in_flight = tcp_packets_in_flight(tp);
3609 3631
3632 /* ts_recent update must be made after we are sure that the packet
3633 * is in window.
3634 */
3635 if (flag & FLAG_UPDATE_TS_RECENT)
3636 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
3637
3610 if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { 3638 if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
3611 /* Window is constant, pure forward advance. 3639 /* Window is constant, pure forward advance.
3612 * No more checks are required. 3640 * No more checks are required.
@@ -3927,27 +3955,6 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
3927EXPORT_SYMBOL(tcp_parse_md5sig_option); 3955EXPORT_SYMBOL(tcp_parse_md5sig_option);
3928#endif 3956#endif
3929 3957
3930static inline void tcp_store_ts_recent(struct tcp_sock *tp)
3931{
3932 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
3933 tp->rx_opt.ts_recent_stamp = get_seconds();
3934}
3935
3936static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
3937{
3938 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
3939 /* PAWS bug workaround wrt. ACK frames, the PAWS discard
3940 * extra check below makes sure this can only happen
3941 * for pure ACK frames. -DaveM
3942 *
3943 * Not only, also it occurs for expired timestamps.
3944 */
3945
3946 if (tcp_paws_check(&tp->rx_opt, 0))
3947 tcp_store_ts_recent(tp);
3948 }
3949}
3950
3951/* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM 3958/* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
3952 * 3959 *
3953 * It is not fatal. If this ACK does _not_ change critical state (seqs, window) 3960 * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
@@ -5543,14 +5550,9 @@ slow_path:
5543 return 0; 5550 return 0;
5544 5551
5545step5: 5552step5:
5546 if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) 5553 if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
5547 goto discard; 5554 goto discard;
5548 5555
5549 /* ts_recent update must be made after we are sure that the packet
5550 * is in window.
5551 */
5552 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
5553
5554 tcp_rcv_rtt_measure_ts(sk, skb); 5556 tcp_rcv_rtt_measure_ts(sk, skb);
5555 5557
5556 /* Process urgent data. */ 5558 /* Process urgent data. */
@@ -5986,7 +5988,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5986 5988
5987 /* step 5: check the ACK field */ 5989 /* step 5: check the ACK field */
5988 if (true) { 5990 if (true) {
5989 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; 5991 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
5992 FLAG_UPDATE_TS_RECENT) > 0;
5990 5993
5991 switch (sk->sk_state) { 5994 switch (sk->sk_state) {
5992 case TCP_SYN_RECV: 5995 case TCP_SYN_RECV:
@@ -6137,11 +6140,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6137 } 6140 }
6138 } 6141 }
6139 6142
6140 /* ts_recent update must be made after we are sure that the packet
6141 * is in window.
6142 */
6143 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
6144
6145 /* step 6: check the URG bit */ 6143 /* step 6: check the URG bit */
6146 tcp_urg(sk, skb, th); 6144 tcp_urg(sk, skb, th);
6147 6145
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b44cf81d8178..509912a5ff98 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2388,8 +2388,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2388 */ 2388 */
2389 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2389 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2390 2390
2391 /* make sure skb->data is aligned on arches that require it */ 2391 /* make sure skb->data is aligned on arches that require it
2392 if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { 2392 * and check if ack-trimming & collapsing extended the headroom
2393 * beyond what csum_start can cover.
2394 */
2395 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
2396 skb_headroom(skb) >= 0xFFFF)) {
2393 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, 2397 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2394 GFP_ATOMIC); 2398 GFP_ATOMIC);
2395 return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2399 return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a459c4f5b769..dae802c0af7c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -168,8 +168,6 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
168static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, 168static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
169 struct net_device *dev); 169 struct net_device *dev);
170 170
171static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
172
173static struct ipv6_devconf ipv6_devconf __read_mostly = { 171static struct ipv6_devconf ipv6_devconf __read_mostly = {
174 .forwarding = 0, 172 .forwarding = 0,
175 .hop_limit = IPV6_DEFAULT_HOPLIMIT, 173 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
@@ -837,7 +835,7 @@ out2:
837 rcu_read_unlock_bh(); 835 rcu_read_unlock_bh();
838 836
839 if (likely(err == 0)) 837 if (likely(err == 0))
840 atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa); 838 inet6addr_notifier_call_chain(NETDEV_UP, ifa);
841 else { 839 else {
842 kfree(ifa); 840 kfree(ifa);
843 ifa = ERR_PTR(err); 841 ifa = ERR_PTR(err);
@@ -927,7 +925,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
927 925
928 ipv6_ifa_notify(RTM_DELADDR, ifp); 926 ipv6_ifa_notify(RTM_DELADDR, ifp);
929 927
930 atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp); 928 inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
931 929
932 /* 930 /*
933 * Purge or update corresponding prefix 931 * Purge or update corresponding prefix
@@ -2988,7 +2986,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2988 2986
2989 if (state != INET6_IFADDR_STATE_DEAD) { 2987 if (state != INET6_IFADDR_STATE_DEAD) {
2990 __ipv6_ifa_notify(RTM_DELADDR, ifa); 2988 __ipv6_ifa_notify(RTM_DELADDR, ifa);
2991 atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); 2989 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
2992 } 2990 }
2993 in6_ifa_put(ifa); 2991 in6_ifa_put(ifa);
2994 2992
@@ -4869,22 +4867,6 @@ static struct pernet_operations addrconf_ops = {
4869 .exit = addrconf_exit_net, 4867 .exit = addrconf_exit_net,
4870}; 4868};
4871 4869
4872/*
4873 * Device notifier
4874 */
4875
4876int register_inet6addr_notifier(struct notifier_block *nb)
4877{
4878 return atomic_notifier_chain_register(&inet6addr_chain, nb);
4879}
4880EXPORT_SYMBOL(register_inet6addr_notifier);
4881
4882int unregister_inet6addr_notifier(struct notifier_block *nb)
4883{
4884 return atomic_notifier_chain_unregister(&inet6addr_chain, nb);
4885}
4886EXPORT_SYMBOL(unregister_inet6addr_notifier);
4887
4888static struct rtnl_af_ops inet6_ops = { 4870static struct rtnl_af_ops inet6_ops = {
4889 .family = AF_INET6, 4871 .family = AF_INET6,
4890 .fill_link_af = inet6_fill_link_af, 4872 .fill_link_af = inet6_fill_link_af,
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index d051e5f4bf34..72104562c864 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -78,3 +78,22 @@ int __ipv6_addr_type(const struct in6_addr *addr)
78} 78}
79EXPORT_SYMBOL(__ipv6_addr_type); 79EXPORT_SYMBOL(__ipv6_addr_type);
80 80
81static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
82
83int register_inet6addr_notifier(struct notifier_block *nb)
84{
85 return atomic_notifier_chain_register(&inet6addr_chain, nb);
86}
87EXPORT_SYMBOL(register_inet6addr_notifier);
88
89int unregister_inet6addr_notifier(struct notifier_block *nb)
90{
91 return atomic_notifier_chain_unregister(&inet6addr_chain, nb);
92}
93EXPORT_SYMBOL(unregister_inet6addr_notifier);
94
95int inet6addr_notifier_call_chain(unsigned long val, void *v)
96{
97 return atomic_notifier_call_chain(&inet6addr_chain, val, v);
98}
99EXPORT_SYMBOL(inet6addr_notifier_call_chain);
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index 5060d54199ab..e0983f3648a6 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -71,6 +71,12 @@ static bool rpfilter_lookup_reverse6(const struct sk_buff *skb,
71 return ret; 71 return ret;
72} 72}
73 73
74static bool rpfilter_is_local(const struct sk_buff *skb)
75{
76 const struct rt6_info *rt = (const void *) skb_dst(skb);
77 return rt && (rt->rt6i_flags & RTF_LOCAL);
78}
79
74static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) 80static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
75{ 81{
76 const struct xt_rpfilter_info *info = par->matchinfo; 82 const struct xt_rpfilter_info *info = par->matchinfo;
@@ -78,7 +84,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
78 struct ipv6hdr *iph; 84 struct ipv6hdr *iph;
79 bool invert = info->flags & XT_RPFILTER_INVERT; 85 bool invert = info->flags & XT_RPFILTER_INVERT;
80 86
81 if (par->in->flags & IFF_LOOPBACK) 87 if (rpfilter_is_local(skb))
82 return true ^ invert; 88 return true ^ invert;
83 89
84 iph = ipv6_hdr(skb); 90 iph = ipv6_hdr(skb);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 196ab9347ad1..0ba10e53a629 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -330,9 +330,17 @@ found:
330 } 330 }
331 331
332 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 332 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
333 fq->q.meat == fq->q.len) 333 fq->q.meat == fq->q.len) {
334 return ip6_frag_reasm(fq, prev, dev); 334 int res;
335 unsigned long orefdst = skb->_skb_refdst;
336
337 skb->_skb_refdst = 0UL;
338 res = ip6_frag_reasm(fq, prev, dev);
339 skb->_skb_refdst = orefdst;
340 return res;
341 }
335 342
343 skb_dst_drop(skb);
336 inet_frag_lru_move(&fq->q); 344 inet_frag_lru_move(&fq->q);
337 return -1; 345 return -1;
338 346
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 29340a9a6fb9..e1b37f5a2691 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -303,7 +303,8 @@ static void iriap_disconnect_indication(void *instance, void *sap,
303{ 303{
304 struct iriap_cb *self; 304 struct iriap_cb *self;
305 305
306 IRDA_DEBUG(4, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); 306 IRDA_DEBUG(4, "%s(), reason=%s [%d]\n", __func__,
307 irlmp_reason_str(reason), reason);
307 308
308 self = instance; 309 self = instance;
309 310
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 6115a44c0a24..1064621da6f6 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -66,8 +66,15 @@ const char *irlmp_reasons[] = {
66 "LM_LAP_RESET", 66 "LM_LAP_RESET",
67 "LM_INIT_DISCONNECT", 67 "LM_INIT_DISCONNECT",
68 "ERROR, NOT USED", 68 "ERROR, NOT USED",
69 "UNKNOWN",
69}; 70};
70 71
72const char *irlmp_reason_str(LM_REASON reason)
73{
74 reason = min_t(size_t, reason, ARRAY_SIZE(irlmp_reasons) - 1);
75 return irlmp_reasons[reason];
76}
77
71/* 78/*
72 * Function irlmp_init (void) 79 * Function irlmp_init (void)
73 * 80 *
@@ -747,7 +754,8 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
747{ 754{
748 struct lsap_cb *lsap; 755 struct lsap_cb *lsap;
749 756
750 IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); 757 IRDA_DEBUG(1, "%s(), reason=%s [%d]\n", __func__,
758 irlmp_reason_str(reason), reason);
751 IRDA_ASSERT(self != NULL, return;); 759 IRDA_ASSERT(self != NULL, return;);
752 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); 760 IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
753 761
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 58150f877ec3..9ed49ad0380f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -78,7 +78,7 @@ void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
78 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); 78 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
79} 79}
80 80
81u32 ieee80211_idle_off(struct ieee80211_local *local) 81static u32 __ieee80211_idle_off(struct ieee80211_local *local)
82{ 82{
83 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) 83 if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
84 return 0; 84 return 0;
@@ -87,7 +87,7 @@ u32 ieee80211_idle_off(struct ieee80211_local *local)
87 return IEEE80211_CONF_CHANGE_IDLE; 87 return IEEE80211_CONF_CHANGE_IDLE;
88} 88}
89 89
90static u32 ieee80211_idle_on(struct ieee80211_local *local) 90static u32 __ieee80211_idle_on(struct ieee80211_local *local)
91{ 91{
92 if (local->hw.conf.flags & IEEE80211_CONF_IDLE) 92 if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
93 return 0; 93 return 0;
@@ -98,16 +98,18 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
98 return IEEE80211_CONF_CHANGE_IDLE; 98 return IEEE80211_CONF_CHANGE_IDLE;
99} 99}
100 100
101void ieee80211_recalc_idle(struct ieee80211_local *local) 101static u32 __ieee80211_recalc_idle(struct ieee80211_local *local,
102 bool force_active)
102{ 103{
103 bool working = false, scanning, active; 104 bool working = false, scanning, active;
104 unsigned int led_trig_start = 0, led_trig_stop = 0; 105 unsigned int led_trig_start = 0, led_trig_stop = 0;
105 struct ieee80211_roc_work *roc; 106 struct ieee80211_roc_work *roc;
106 u32 change;
107 107
108 lockdep_assert_held(&local->mtx); 108 lockdep_assert_held(&local->mtx);
109 109
110 active = !list_empty(&local->chanctx_list) || local->monitors; 110 active = force_active ||
111 !list_empty(&local->chanctx_list) ||
112 local->monitors;
111 113
112 if (!local->ops->remain_on_channel) { 114 if (!local->ops->remain_on_channel) {
113 list_for_each_entry(roc, &local->roc_list, list) { 115 list_for_each_entry(roc, &local->roc_list, list) {
@@ -132,9 +134,18 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
132 ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); 134 ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
133 135
134 if (working || scanning || active) 136 if (working || scanning || active)
135 change = ieee80211_idle_off(local); 137 return __ieee80211_idle_off(local);
136 else 138 return __ieee80211_idle_on(local);
137 change = ieee80211_idle_on(local); 139}
140
141u32 ieee80211_idle_off(struct ieee80211_local *local)
142{
143 return __ieee80211_recalc_idle(local, true);
144}
145
146void ieee80211_recalc_idle(struct ieee80211_local *local)
147{
148 u32 change = __ieee80211_recalc_idle(local, false);
138 if (change) 149 if (change)
139 ieee80211_hw_config(local, change); 150 ieee80211_hw_config(local, change);
140} 151}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 82cc30318a86..346ad4cfb013 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3964,8 +3964,16 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
3964 /* prep auth_data so we don't go into idle on disassoc */ 3964 /* prep auth_data so we don't go into idle on disassoc */
3965 ifmgd->auth_data = auth_data; 3965 ifmgd->auth_data = auth_data;
3966 3966
3967 if (ifmgd->associated) 3967 if (ifmgd->associated) {
3968 ieee80211_set_disassoc(sdata, 0, 0, false, NULL); 3968 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
3969
3970 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
3971 WLAN_REASON_UNSPECIFIED,
3972 false, frame_buf);
3973
3974 __cfg80211_send_deauth(sdata->dev, frame_buf,
3975 sizeof(frame_buf));
3976 }
3969 3977
3970 sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); 3978 sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
3971 3979
@@ -4025,8 +4033,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
4025 4033
4026 mutex_lock(&ifmgd->mtx); 4034 mutex_lock(&ifmgd->mtx);
4027 4035
4028 if (ifmgd->associated) 4036 if (ifmgd->associated) {
4029 ieee80211_set_disassoc(sdata, 0, 0, false, NULL); 4037 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
4038
4039 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
4040 WLAN_REASON_UNSPECIFIED,
4041 false, frame_buf);
4042
4043 __cfg80211_send_deauth(sdata->dev, frame_buf,
4044 sizeof(frame_buf));
4045 }
4030 4046
4031 if (ifmgd->auth_data && !ifmgd->auth_data->done) { 4047 if (ifmgd->auth_data && !ifmgd->auth_data->done) {
4032 err = -EBUSY; 4048 err = -EBUSY;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 0f92dc24cb89..d7df6ac2c6f1 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -339,7 +339,11 @@ bitmap_ipmac_tlist(const struct ip_set *set,
339nla_put_failure: 339nla_put_failure:
340 nla_nest_cancel(skb, nested); 340 nla_nest_cancel(skb, nested);
341 ipset_nest_end(skb, atd); 341 ipset_nest_end(skb, atd);
342 return -EMSGSIZE; 342 if (unlikely(id == first)) {
343 cb->args[2] = 0;
344 return -EMSGSIZE;
345 }
346 return 0;
343} 347}
344 348
345static int 349static int
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index f2627226a087..10a30b4fc7db 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -104,6 +104,15 @@ hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags)
104 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); 104 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
105} 105}
106 106
107static inline void
108hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *dst, u32 *flags)
109{
110 if (dst->nomatch) {
111 *flags = IPSET_FLAG_NOMATCH;
112 dst->nomatch = 0;
113 }
114}
115
107static inline int 116static inline int
108hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem) 117hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem)
109{ 118{
@@ -414,6 +423,15 @@ hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags)
414 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); 423 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
415} 424}
416 425
426static inline void
427hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *dst, u32 *flags)
428{
429 if (dst->nomatch) {
430 *flags = IPSET_FLAG_NOMATCH;
431 dst->nomatch = 0;
432 }
433}
434
417static inline int 435static inline int
418hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem) 436hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem)
419{ 437{
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 4b677cf6bf7d..d6a59154d710 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -87,7 +87,16 @@ hash_net4_data_copy(struct hash_net4_elem *dst,
87static inline void 87static inline void
88hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags) 88hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags)
89{ 89{
90 dst->nomatch = flags & IPSET_FLAG_NOMATCH; 90 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
91}
92
93static inline void
94hash_net4_data_reset_flags(struct hash_net4_elem *dst, u32 *flags)
95{
96 if (dst->nomatch) {
97 *flags = IPSET_FLAG_NOMATCH;
98 dst->nomatch = 0;
99 }
91} 100}
92 101
93static inline int 102static inline int
@@ -308,7 +317,16 @@ hash_net6_data_copy(struct hash_net6_elem *dst,
308static inline void 317static inline void
309hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags) 318hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags)
310{ 319{
311 dst->nomatch = flags & IPSET_FLAG_NOMATCH; 320 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
321}
322
323static inline void
324hash_net6_data_reset_flags(struct hash_net6_elem *dst, u32 *flags)
325{
326 if (dst->nomatch) {
327 *flags = IPSET_FLAG_NOMATCH;
328 dst->nomatch = 0;
329 }
312} 330}
313 331
314static inline int 332static inline int
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 6ba985f1c96f..f2b0a3c30130 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -198,7 +198,16 @@ hash_netiface4_data_copy(struct hash_netiface4_elem *dst,
198static inline void 198static inline void
199hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags) 199hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags)
200{ 200{
201 dst->nomatch = flags & IPSET_FLAG_NOMATCH; 201 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
202}
203
204static inline void
205hash_netiface4_data_reset_flags(struct hash_netiface4_elem *dst, u32 *flags)
206{
207 if (dst->nomatch) {
208 *flags = IPSET_FLAG_NOMATCH;
209 dst->nomatch = 0;
210 }
202} 211}
203 212
204static inline int 213static inline int
@@ -494,7 +503,7 @@ hash_netiface6_data_copy(struct hash_netiface6_elem *dst,
494static inline void 503static inline void
495hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags) 504hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags)
496{ 505{
497 dst->nomatch = flags & IPSET_FLAG_NOMATCH; 506 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
498} 507}
499 508
500static inline int 509static inline int
@@ -504,6 +513,15 @@ hash_netiface6_data_match(const struct hash_netiface6_elem *elem)
504} 513}
505 514
506static inline void 515static inline void
516hash_netiface6_data_reset_flags(struct hash_netiface6_elem *dst, u32 *flags)
517{
518 if (dst->nomatch) {
519 *flags = IPSET_FLAG_NOMATCH;
520 dst->nomatch = 0;
521 }
522}
523
524static inline void
507hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem) 525hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)
508{ 526{
509 elem->elem = 0; 527 elem->elem = 0;
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index af20c0c5ced2..349deb672a2d 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -104,6 +104,15 @@ hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags)
104 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); 104 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
105} 105}
106 106
107static inline void
108hash_netport4_data_reset_flags(struct hash_netport4_elem *dst, u32 *flags)
109{
110 if (dst->nomatch) {
111 *flags = IPSET_FLAG_NOMATCH;
112 dst->nomatch = 0;
113 }
114}
115
107static inline int 116static inline int
108hash_netport4_data_match(const struct hash_netport4_elem *elem) 117hash_netport4_data_match(const struct hash_netport4_elem *elem)
109{ 118{
@@ -375,6 +384,15 @@ hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags)
375 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); 384 dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
376} 385}
377 386
387static inline void
388hash_netport6_data_reset_flags(struct hash_netport6_elem *dst, u32 *flags)
389{
390 if (dst->nomatch) {
391 *flags = IPSET_FLAG_NOMATCH;
392 dst->nomatch = 0;
393 }
394}
395
378static inline int 396static inline int
379hash_netport6_data_match(const struct hash_netport6_elem *elem) 397hash_netport6_data_match(const struct hash_netport6_elem *elem)
380{ 398{
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 8371c2bac2e4..09c744aa8982 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -174,9 +174,13 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
174{ 174{
175 const struct set_elem *e = list_set_elem(map, i); 175 const struct set_elem *e = list_set_elem(map, i);
176 176
177 if (i == map->size - 1 && e->id != IPSET_INVALID_ID) 177 if (e->id != IPSET_INVALID_ID) {
178 /* Last element replaced: e.g. add new,before,last */ 178 const struct set_elem *x = list_set_elem(map, map->size - 1);
179 ip_set_put_byindex(e->id); 179
180 /* Last element replaced or pushed off */
181 if (x->id != IPSET_INVALID_ID)
182 ip_set_put_byindex(x->id);
183 }
180 if (with_timeout(map->timeout)) 184 if (with_timeout(map->timeout))
181 list_elem_tadd(map, i, id, ip_set_timeout_set(timeout)); 185 list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
182 else 186 else
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 0e7d423324c3..e0c4373b4747 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1593,10 +1593,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1593 end += strlen("\r\n\r\n") + clen; 1593 end += strlen("\r\n\r\n") + clen;
1594 1594
1595 msglen = origlen = end - dptr; 1595 msglen = origlen = end - dptr;
1596 if (msglen > datalen) { 1596 if (msglen > datalen)
1597 nf_ct_helper_log(skb, ct, "incomplete/bad SIP message"); 1597 return NF_ACCEPT;
1598 return NF_DROP;
1599 }
1600 1598
1601 ret = process_sip_msg(skb, ct, protoff, dataoff, 1599 ret = process_sip_msg(skb, ct, protoff, dataoff,
1602 &dptr, &msglen); 1600 &dptr, &msglen);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 8d5769c6d16e..ad24be070e53 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -467,33 +467,22 @@ EXPORT_SYMBOL_GPL(nf_nat_packet);
467struct nf_nat_proto_clean { 467struct nf_nat_proto_clean {
468 u8 l3proto; 468 u8 l3proto;
469 u8 l4proto; 469 u8 l4proto;
470 bool hash;
471}; 470};
472 471
473/* Clear NAT section of all conntracks, in case we're loaded again. */ 472/* kill conntracks with affected NAT section */
474static int nf_nat_proto_clean(struct nf_conn *i, void *data) 473static int nf_nat_proto_remove(struct nf_conn *i, void *data)
475{ 474{
476 const struct nf_nat_proto_clean *clean = data; 475 const struct nf_nat_proto_clean *clean = data;
477 struct nf_conn_nat *nat = nfct_nat(i); 476 struct nf_conn_nat *nat = nfct_nat(i);
478 477
479 if (!nat) 478 if (!nat)
480 return 0; 479 return 0;
481 if (!(i->status & IPS_SRC_NAT_DONE)) 480
482 return 0;
483 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || 481 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
484 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) 482 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
485 return 0; 483 return 0;
486 484
487 if (clean->hash) { 485 return i->status & IPS_NAT_MASK ? 1 : 0;
488 spin_lock_bh(&nf_nat_lock);
489 hlist_del_rcu(&nat->bysource);
490 spin_unlock_bh(&nf_nat_lock);
491 } else {
492 memset(nat, 0, sizeof(*nat));
493 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK |
494 IPS_SEQ_ADJUST);
495 }
496 return 0;
497} 486}
498 487
499static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) 488static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
@@ -505,16 +494,8 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
505 struct net *net; 494 struct net *net;
506 495
507 rtnl_lock(); 496 rtnl_lock();
508 /* Step 1 - remove from bysource hash */
509 clean.hash = true;
510 for_each_net(net) 497 for_each_net(net)
511 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); 498 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
512 synchronize_rcu();
513
514 /* Step 2 - clean NAT section */
515 clean.hash = false;
516 for_each_net(net)
517 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
518 rtnl_unlock(); 499 rtnl_unlock();
519} 500}
520 501
@@ -526,16 +507,9 @@ static void nf_nat_l3proto_clean(u8 l3proto)
526 struct net *net; 507 struct net *net;
527 508
528 rtnl_lock(); 509 rtnl_lock();
529 /* Step 1 - remove from bysource hash */
530 clean.hash = true;
531 for_each_net(net)
532 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
533 synchronize_rcu();
534 510
535 /* Step 2 - clean NAT section */
536 clean.hash = false;
537 for_each_net(net) 511 for_each_net(net)
538 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); 512 nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
539 rtnl_unlock(); 513 rtnl_unlock();
540} 514}
541 515
@@ -773,7 +747,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
773{ 747{
774 struct nf_nat_proto_clean clean = {}; 748 struct nf_nat_proto_clean clean = {};
775 749
776 nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean); 750 nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);
777 synchronize_rcu(); 751 synchronize_rcu();
778 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); 752 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
779} 753}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index a4b724708a1a..6980c3e6f066 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1593,10 +1593,8 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1593 return ERR_PTR(-ENOMEM); 1593 return ERR_PTR(-ENOMEM);
1594 1594
1595 retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd); 1595 retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1596 if (retval < 0) { 1596 BUG_ON(retval < 0);
1597 kfree_skb(skb); 1597
1598 return ERR_PTR(retval);
1599 }
1600 return skb; 1598 return skb;
1601} 1599}
1602 1600
@@ -1726,24 +1724,32 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1726 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) 1724 nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
1727 err = -EINVAL; 1725 err = -EINVAL;
1728 1726
1727 reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1728 if (!reply) {
1729 err = -ENOMEM;
1730 goto exit_unlock;
1731 }
1732
1729 if (!err && a[OVS_VPORT_ATTR_OPTIONS]) 1733 if (!err && a[OVS_VPORT_ATTR_OPTIONS])
1730 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); 1734 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1731 if (err) 1735 if (err)
1732 goto exit_unlock; 1736 goto exit_free;
1737
1733 if (a[OVS_VPORT_ATTR_UPCALL_PID]) 1738 if (a[OVS_VPORT_ATTR_UPCALL_PID])
1734 vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); 1739 vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
1735 1740
1736 reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, 1741 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1737 OVS_VPORT_CMD_NEW); 1742 info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1738 if (IS_ERR(reply)) { 1743 BUG_ON(err < 0);
1739 netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
1740 ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
1741 goto exit_unlock;
1742 }
1743 1744
1744 genl_notify(reply, genl_info_net(info), info->snd_portid, 1745 genl_notify(reply, genl_info_net(info), info->snd_portid,
1745 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); 1746 ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
1746 1747
1748 rtnl_unlock();
1749 return 0;
1750
1751exit_free:
1752 kfree_skb(reply);
1747exit_unlock: 1753exit_unlock:
1748 rtnl_unlock(); 1754 rtnl_unlock();
1749 return err; 1755 return err;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index fe0e4215c73d..67a2b783fe70 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -795,9 +795,9 @@ void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
795 795
796void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) 796void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
797{ 797{
798 BUG_ON(table->count == 0);
798 hlist_del_rcu(&flow->hash_node[table->node_ver]); 799 hlist_del_rcu(&flow->hash_node[table->node_ver]);
799 table->count--; 800 table->count--;
800 BUG_ON(table->count < 0);
801} 801}
802 802
803/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ 803/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 1135d8227f9b..9b97172db84a 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -204,7 +204,6 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
204 if (err < 0) 204 if (err < 0)
205 return err; 205 return err;
206 206
207 err = -EINVAL;
208 if (tb[TCA_FW_CLASSID]) { 207 if (tb[TCA_FW_CLASSID]) {
209 f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); 208 f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
210 tcf_bind_filter(tp, &f->res, base); 209 tcf_bind_filter(tp, &f->res, base);
@@ -218,6 +217,7 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
218 } 217 }
219#endif /* CONFIG_NET_CLS_IND */ 218#endif /* CONFIG_NET_CLS_IND */
220 219
220 err = -EINVAL;
221 if (tb[TCA_FW_MASK]) { 221 if (tb[TCA_FW_MASK]) {
222 mask = nla_get_u32(tb[TCA_FW_MASK]); 222 mask = nla_get_u32(tb[TCA_FW_MASK]);
223 if (mask != head->mask) 223 if (mask != head->mask)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index b28cc384a5bc..4de4bc48493b 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3016,6 +3016,7 @@ sub process {
3016 $dstat !~ /^'X'$/ && # character constants 3016 $dstat !~ /^'X'$/ && # character constants
3017 $dstat !~ /$exceptions/ && 3017 $dstat !~ /$exceptions/ &&
3018 $dstat !~ /^\.$Ident\s*=/ && # .foo = 3018 $dstat !~ /^\.$Ident\s*=/ && # .foo =
3019 $dstat !~ /^(?:\#\s*$Ident|\#\s*$Constant)\s*$/ && # stringification #foo
3019 $dstat !~ /^do\s*$Constant\s*while\s*$Constant;?$/ && # do {...} while (...); // do {...} while (...) 3020 $dstat !~ /^do\s*$Constant\s*while\s*$Constant;?$/ && # do {...} while (...); // do {...} while (...)
3020 $dstat !~ /^for\s*$Constant$/ && # for (...) 3021 $dstat !~ /^for\s*$Constant$/ && # for (...)
3021 $dstat !~ /^for\s*$Constant\s+(?:$Ident|-?$Constant)$/ && # for (...) bar() 3022 $dstat !~ /^for\s*$Constant\s+(?:$Ident|-?$Constant)$/ && # for (...) bar()
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 71ae86ca64ac..eb560fa32321 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3222,18 +3222,10 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
3222int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, 3222int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
3223 struct vm_area_struct *area) 3223 struct vm_area_struct *area)
3224{ 3224{
3225 long size; 3225 struct snd_pcm_runtime *runtime = substream->runtime;;
3226 unsigned long offset;
3227 3226
3228 area->vm_page_prot = pgprot_noncached(area->vm_page_prot); 3227 area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
3229 area->vm_flags |= VM_IO; 3228 return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
3230 size = area->vm_end - area->vm_start;
3231 offset = area->vm_pgoff << PAGE_SHIFT;
3232 if (io_remap_pfn_range(area, area->vm_start,
3233 (substream->runtime->dma_addr + offset) >> PAGE_SHIFT,
3234 size, area->vm_page_prot))
3235 return -EAGAIN;
3236 return 0;
3237} 3229}
3238 3230
3239EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); 3231EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 6f3214ed4444..321e066a0753 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1421,6 +1421,7 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
1421 case 0x3C: /* HSW */ 1421 case 0x3C: /* HSW */
1422 case 0x3F: /* HSW */ 1422 case 0x3F: /* HSW */
1423 case 0x45: /* HSW */ 1423 case 0x45: /* HSW */
1424 case 0x46: /* HSW */
1424 return 1; 1425 return 1;
1425 case 0x2E: /* Nehalem-EX Xeon - Beckton */ 1426 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1426 case 0x2F: /* Westmere-EX Xeon - Eagleton */ 1427 case 0x2F: /* Westmere-EX Xeon - Eagleton */
@@ -1515,6 +1516,7 @@ void rapl_probe(unsigned int family, unsigned int model)
1515 case 0x3C: /* HSW */ 1516 case 0x3C: /* HSW */
1516 case 0x3F: /* HSW */ 1517 case 0x3F: /* HSW */
1517 case 0x45: /* HSW */ 1518 case 0x45: /* HSW */
1519 case 0x46: /* HSW */
1518 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX; 1520 do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX;
1519 break; 1521 break;
1520 case 0x2D: 1522 case 0x2D:
@@ -1754,6 +1756,7 @@ int is_snb(unsigned int family, unsigned int model)
1754 case 0x3C: /* HSW */ 1756 case 0x3C: /* HSW */
1755 case 0x3F: /* HSW */ 1757 case 0x3F: /* HSW */
1756 case 0x45: /* HSW */ 1758 case 0x45: /* HSW */
1759 case 0x46: /* HSW */
1757 return 1; 1760 return 1;
1758 } 1761 }
1759 return 0; 1762 return 0;
@@ -2276,7 +2279,7 @@ int main(int argc, char **argv)
2276 cmdline(argc, argv); 2279 cmdline(argc, argv);
2277 2280
2278 if (verbose) 2281 if (verbose)
2279 fprintf(stderr, "turbostat v3.2 February 11, 2013" 2282 fprintf(stderr, "turbostat v3.3 March 15, 2013"
2280 " - Len Brown <lenb@kernel.org>\n"); 2283 " - Len Brown <lenb@kernel.org>\n");
2281 2284
2282 turbostat_init(); 2285 turbostat_init();