aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cgroups/cgroups.txt11
-rw-r--r--Documentation/isdn/INTERFACE.CAPI83
-rw-r--r--Documentation/kernel-parameters.txt1
-rw-r--r--Documentation/networking/pktgen.txt8
-rw-r--r--Documentation/vm/ksm.txt13
-rw-r--r--Documentation/vm/page-types.c304
-rw-r--r--Documentation/vm/pagemap.txt8
-rw-r--r--MAINTAINERS8
-rw-r--r--arch/m68knommu/kernel/asm-offsets.c28
-rw-r--r--arch/m68knommu/kernel/entry.S6
-rw-r--r--arch/m68knommu/mm/init.c2
-rw-r--r--arch/m68knommu/platform/5206e/config.c1
-rw-r--r--arch/m68knommu/platform/68328/entry.S32
-rw-r--r--arch/m68knommu/platform/68360/entry.S16
-rw-r--r--arch/m68knommu/platform/coldfire/entry.S20
-rw-r--r--arch/microblaze/kernel/entry.S2
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S2
-rw-r--r--arch/microblaze/kernel/process.c2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/fixmap.h4
-rw-r--r--arch/parisc/include/asm/hardirq.h20
-rw-r--r--arch/parisc/include/asm/ptrace.h5
-rw-r--r--arch/parisc/include/asm/syscall.h40
-rw-r--r--arch/parisc/include/asm/thread_info.h14
-rw-r--r--arch/parisc/kernel/asm-offsets.c4
-rw-r--r--arch/parisc/kernel/entry.S21
-rw-r--r--arch/parisc/kernel/irq.c5
-rw-r--r--arch/parisc/kernel/module.c2
-rw-r--r--arch/parisc/kernel/ptrace.c42
-rw-r--r--arch/parisc/kernel/signal.c5
-rw-r--r--arch/parisc/kernel/syscall.S22
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S10
-rw-r--r--arch/parisc/mm/init.c11
-rw-r--r--arch/s390/kvm/kvm-s390.h2
-rw-r--r--arch/sparc/include/asm/hardirq_32.h12
-rw-r--r--arch/sparc/include/asm/irq_32.h4
-rw-r--r--arch/sparc/include/asm/pgtable_64.h4
-rw-r--r--arch/sparc/kernel/ktlb.S8
-rw-r--r--arch/sparc/kernel/perf_event.c577
-rw-r--r--arch/sparc/oprofile/init.c1
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/Kconfig.cpu3
-rw-r--r--arch/x86/ia32/ia32entry.S36
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kernel/early_printk.c5
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c2
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c84
-rw-r--r--arch/x86/kvm/paging_tmpl.h18
-rw-r--r--arch/x86/kvm/svm.c25
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/lib/Makefile6
-rw-r--r--drivers/char/agp/parisc-agp.c2
-rw-r--r--drivers/connector/cn_proc.c3
-rw-r--r--drivers/edac/amd64_edac.c104
-rw-r--r--drivers/edac/amd64_edac.h23
-rw-r--r--drivers/edac/amd64_edac_inj.c49
-rw-r--r--drivers/firmware/iscsi_ibft.c2
-rw-r--r--drivers/firmware/iscsi_ibft_find.c4
-rw-r--r--drivers/hwmon/lis3lv02d_spi.c3
-rw-r--r--drivers/ide/ide-proc.c8
-rw-r--r--drivers/ide/sis5513.c10
-rw-r--r--drivers/isdn/capi/capi.c2
-rw-r--r--drivers/isdn/capi/capidrv.c27
-rw-r--r--drivers/isdn/gigaset/asyncdata.c28
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c87
-rw-r--r--drivers/isdn/gigaset/common.c134
-rw-r--r--drivers/isdn/gigaset/ev-layer.c30
-rw-r--r--drivers/isdn/gigaset/i4l.c23
-rw-r--r--drivers/isdn/gigaset/interface.c9
-rw-r--r--drivers/isdn/gigaset/isocdata.c30
-rw-r--r--drivers/mmc/core/sdio_cis.c7
-rw-r--r--drivers/net/au1000_eth.c4
-rw-r--r--drivers/net/benet/be_cmds.c1
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_ethtool.c2
-rw-r--r--drivers/net/benet/be_main.c6
-rw-r--r--drivers/net/e1000e/82571.c4
-rw-r--r--drivers/net/ethoc.c81
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/pasemi_mac_ethtool.c3
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c10
-rw-r--r--drivers/net/qlge/qlge.h8
-rw-r--r--drivers/net/qlge/qlge_ethtool.c2
-rw-r--r--drivers/net/qlge/qlge_main.c18
-rw-r--r--drivers/net/qlge/qlge_mpi.c12
-rw-r--r--drivers/net/tg3.c41
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/usb/rndis_host.c1
-rw-r--r--drivers/serial/serial_cs.c12
-rw-r--r--drivers/video/da8xx-fb.c1
-rw-r--r--drivers/video/msm/mddi.c2
-rw-r--r--firmware/Makefile7
-rw-r--r--firmware/WHENCE5
-rw-r--r--firmware/cis/COMpad2.cis.ihex11
-rw-r--r--firmware/cis/COMpad4.cis.ihex9
-rw-r--r--firmware/cis/DP83903.cis.ihex14
-rw-r--r--firmware/cis/NE2K.cis.ihex8
-rw-r--r--firmware/cis/tamarack.cis.ihex10
-rw-r--r--fs/proc/kcore.c1
-rw-r--r--fs/proc/page.c5
-rw-r--r--include/linux/socket.h21
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/perf_event.c34
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/kmemtrace.c2
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/ksm.c10
-rw-r--r--mm/vmalloc.c1
-rw-r--r--net/core/net-sysfs.c4
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/ipv4/devinet.c16
-rw-r--r--tools/perf/Documentation/perf-timechart.txt3
-rw-r--r--tools/perf/Makefile2
-rw-r--r--tools/perf/builtin-timechart.c10
-rw-r--r--tools/perf/builtin-top.c1
-rw-r--r--tools/perf/util/svghelper.c14
-rw-r--r--virt/kvm/kvm_main.c14
122 files changed, 1834 insertions, 739 deletions
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 455d4e6d346d..0b33bfe7dde9 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -227,7 +227,14 @@ as the path relative to the root of the cgroup file system.
227Each cgroup is represented by a directory in the cgroup file system 227Each cgroup is represented by a directory in the cgroup file system
228containing the following files describing that cgroup: 228containing the following files describing that cgroup:
229 229
230 - tasks: list of tasks (by pid) attached to that cgroup 230 - tasks: list of tasks (by pid) attached to that cgroup. This list
231 is not guaranteed to be sorted. Writing a thread id into this file
232 moves the thread into this cgroup.
233 - cgroup.procs: list of tgids in the cgroup. This list is not
234 guaranteed to be sorted or free of duplicate tgids, and userspace
235 should sort/uniquify the list if this property is required.
236 Writing a tgid into this file moves all threads with that tgid into
237 this cgroup.
231 - notify_on_release flag: run the release agent on exit? 238 - notify_on_release flag: run the release agent on exit?
232 - release_agent: the path to use for release notifications (this file 239 - release_agent: the path to use for release notifications (this file
233 exists in the top cgroup only) 240 exists in the top cgroup only)
@@ -374,7 +381,7 @@ Now you want to do something with this cgroup.
374 381
375In this directory you can find several files: 382In this directory you can find several files:
376# ls 383# ls
377notify_on_release tasks 384cgroup.procs notify_on_release tasks
378(plus whatever files added by the attached subsystems) 385(plus whatever files added by the attached subsystems)
379 386
380Now attach your shell to this cgroup: 387Now attach your shell to this cgroup:
diff --git a/Documentation/isdn/INTERFACE.CAPI b/Documentation/isdn/INTERFACE.CAPI
index 686e107923ec..5fe8de5cc727 100644
--- a/Documentation/isdn/INTERFACE.CAPI
+++ b/Documentation/isdn/INTERFACE.CAPI
@@ -60,10 +60,9 @@ open() operation on regular files or character devices.
60 60
61After a successful return from register_appl(), CAPI messages from the 61After a successful return from register_appl(), CAPI messages from the
62application may be passed to the driver for the device via calls to the 62application may be passed to the driver for the device via calls to the
63send_message() callback function. The CAPI message to send is stored in the 63send_message() callback function. Conversely, the driver may call Kernel
64data portion of an skb. Conversely, the driver may call Kernel CAPI's 64CAPI's capi_ctr_handle_message() function to pass a received CAPI message to
65capi_ctr_handle_message() function to pass a received CAPI message to Kernel 65Kernel CAPI for forwarding to an application, specifying its ApplID.
66CAPI for forwarding to an application, specifying its ApplID.
67 66
68Deregistration requests (CAPI operation CAPI_RELEASE) from applications are 67Deregistration requests (CAPI operation CAPI_RELEASE) from applications are
69forwarded as calls to the release_appl() callback function, passing the same 68forwarded as calls to the release_appl() callback function, passing the same
@@ -142,6 +141,7 @@ u16 (*send_message)(struct capi_ctr *ctrlr, struct sk_buff *skb)
142 to accepting or queueing the message. Errors occurring during the 141 to accepting or queueing the message. Errors occurring during the
143 actual processing of the message should be signaled with an 142 actual processing of the message should be signaled with an
144 appropriate reply message. 143 appropriate reply message.
144 May be called in process or interrupt context.
145 Calls to this function are not serialized by Kernel CAPI, ie. it must 145 Calls to this function are not serialized by Kernel CAPI, ie. it must
146 be prepared to be re-entered. 146 be prepared to be re-entered.
147 147
@@ -154,7 +154,8 @@ read_proc_t *ctr_read_proc
154 system entry, /proc/capi/controllers/<n>; will be called with a 154 system entry, /proc/capi/controllers/<n>; will be called with a
155 pointer to the device's capi_ctr structure as the last (data) argument 155 pointer to the device's capi_ctr structure as the last (data) argument
156 156
157Note: Callback functions are never called in interrupt context. 157Note: Callback functions except send_message() are never called in interrupt
158context.
158 159
159- to be filled in before calling capi_ctr_ready(): 160- to be filled in before calling capi_ctr_ready():
160 161
@@ -171,14 +172,40 @@ u8 serial[CAPI_SERIAL_LEN]
171 value to return for CAPI_GET_SERIAL 172 value to return for CAPI_GET_SERIAL
172 173
173 174
1744.3 The _cmsg Structure 1754.3 SKBs
176
177CAPI messages are passed between Kernel CAPI and the driver via send_message()
178and capi_ctr_handle_message(), stored in the data portion of a socket buffer
179(skb). Each skb contains a single CAPI message coded according to the CAPI 2.0
180standard.
181
182For the data transfer messages, DATA_B3_REQ and DATA_B3_IND, the actual
183payload data immediately follows the CAPI message itself within the same skb.
184The Data and Data64 parameters are not used for processing. The Data64
185parameter may be omitted by setting the length field of the CAPI message to 22
186instead of 30.
187
188
1894.4 The _cmsg Structure
175 190
176(declared in <linux/isdn/capiutil.h>) 191(declared in <linux/isdn/capiutil.h>)
177 192
178The _cmsg structure stores the contents of a CAPI 2.0 message in an easily 193The _cmsg structure stores the contents of a CAPI 2.0 message in an easily
179accessible form. It contains members for all possible CAPI 2.0 parameters, of 194accessible form. It contains members for all possible CAPI 2.0 parameters,
180which only those appearing in the message type currently being processed are 195including subparameters of the Additional Info and B Protocol structured
181actually used. Unused members should be set to zero. 196parameters, with the following exceptions:
197
198* second Calling party number (CONNECT_IND)
199
200* Data64 (DATA_B3_REQ and DATA_B3_IND)
201
202* Sending complete (subparameter of Additional Info, CONNECT_REQ and INFO_REQ)
203
204* Global Configuration (subparameter of B Protocol, CONNECT_REQ, CONNECT_RESP
205 and SELECT_B_PROTOCOL_REQ)
206
207Only those parameters appearing in the message type currently being processed
208are actually used. Unused members should be set to zero.
182 209
183Members are named after the CAPI 2.0 standard names of the parameters they 210Members are named after the CAPI 2.0 standard names of the parameters they
184represent. See <linux/isdn/capiutil.h> for the exact spelling. Member data 211represent. See <linux/isdn/capiutil.h> for the exact spelling. Member data
@@ -190,18 +217,19 @@ u16 for CAPI parameters of type 'word'
190 217
191u32 for CAPI parameters of type 'dword' 218u32 for CAPI parameters of type 'dword'
192 219
193_cstruct for CAPI parameters of type 'struct' not containing any 220_cstruct for CAPI parameters of type 'struct'
194 variably-sized (struct) subparameters (eg. 'Called Party Number')
195 The member is a pointer to a buffer containing the parameter in 221 The member is a pointer to a buffer containing the parameter in
196 CAPI encoding (length + content). It may also be NULL, which will 222 CAPI encoding (length + content). It may also be NULL, which will
197 be taken to represent an empty (zero length) parameter. 223 be taken to represent an empty (zero length) parameter.
224 Subparameters are stored in encoded form within the content part.
198 225
199_cmstruct for CAPI parameters of type 'struct' containing 'struct' 226_cmstruct alternative representation for CAPI parameters of type 'struct'
200 subparameters ('Additional Info' and 'B Protocol') 227 (used only for the 'Additional Info' and 'B Protocol' parameters)
201 The representation is a single byte containing one of the values: 228 The representation is a single byte containing one of the values:
202 CAPI_DEFAULT: the parameter is empty 229 CAPI_DEFAULT: The parameter is empty/absent.
203 CAPI_COMPOSE: the values of the subparameters are stored 230 CAPI_COMPOSE: The parameter is present.
204 individually in the corresponding _cmsg structure members 231 Subparameter values are stored individually in the corresponding
232 _cmsg structure members.
205 233
206Functions capi_cmsg2message() and capi_message2cmsg() are provided to convert 234Functions capi_cmsg2message() and capi_message2cmsg() are provided to convert
207messages between their transport encoding described in the CAPI 2.0 standard 235messages between their transport encoding described in the CAPI 2.0 standard
@@ -297,3 +325,26 @@ char *capi_cmd2str(u8 Command, u8 Subcommand)
297 be NULL if the command/subcommand is not one of those defined in the 325 be NULL if the command/subcommand is not one of those defined in the
298 CAPI 2.0 standard. 326 CAPI 2.0 standard.
299 327
328
3297. Debugging
330
331The module kernelcapi has a module parameter showcapimsgs controlling some
332debugging output produced by the module. It can only be set when the module is
333loaded, via a parameter "showcapimsgs=<n>" to the modprobe command, either on
334the command line or in the configuration file.
335
336If the lowest bit of showcapimsgs is set, kernelcapi logs controller and
337application up and down events.
338
339In addition, every registered CAPI controller has an associated traceflag
340parameter controlling how CAPI messages sent from and to tha controller are
341logged. The traceflag parameter is initialized with the value of the
342showcapimsgs parameter when the controller is registered, but can later be
343changed via the MANUFACTURER_REQ command KCAPI_CMD_TRACE.
344
345If the value of traceflag is non-zero, CAPI messages are logged.
346DATA_B3 messages are only logged if the value of traceflag is > 2.
347
348If the lowest bit of traceflag is set, only the command/subcommand and message
349length are logged. Otherwise, kernelcapi logs a readable representation of
350the entire message.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 6fa7292947e5..9107b387e91f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -671,6 +671,7 @@ and is between 256 and 4096 characters. It is defined in the file
671 earlyprintk= [X86,SH,BLACKFIN] 671 earlyprintk= [X86,SH,BLACKFIN]
672 earlyprintk=vga 672 earlyprintk=vga
673 earlyprintk=serial[,ttySn[,baudrate]] 673 earlyprintk=serial[,ttySn[,baudrate]]
674 earlyprintk=ttySn[,baudrate]
674 earlyprintk=dbgp[debugController#] 675 earlyprintk=dbgp[debugController#]
675 676
676 Append ",keep" to not disable it when the real console 677 Append ",keep" to not disable it when the real console
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt
index c6cf4a3c16e0..61bb645d50e0 100644
--- a/Documentation/networking/pktgen.txt
+++ b/Documentation/networking/pktgen.txt
@@ -90,6 +90,11 @@ Examples:
90 pgset "dstmac 00:00:00:00:00:00" sets MAC destination address 90 pgset "dstmac 00:00:00:00:00:00" sets MAC destination address
91 pgset "srcmac 00:00:00:00:00:00" sets MAC source address 91 pgset "srcmac 00:00:00:00:00:00" sets MAC source address
92 92
93 pgset "queue_map_min 0" Sets the min value of tx queue interval
94 pgset "queue_map_max 7" Sets the max value of tx queue interval, for multiqueue devices
95 To select queue 1 of a given device,
96 use queue_map_min=1 and queue_map_max=1
97
93 pgset "src_mac_count 1" Sets the number of MACs we'll range through. 98 pgset "src_mac_count 1" Sets the number of MACs we'll range through.
94 The 'minimum' MAC is what you set with srcmac. 99 The 'minimum' MAC is what you set with srcmac.
95 100
@@ -101,6 +106,9 @@ Examples:
101 IPDST_RND, UDPSRC_RND, 106 IPDST_RND, UDPSRC_RND,
102 UDPDST_RND, MACSRC_RND, MACDST_RND 107 UDPDST_RND, MACSRC_RND, MACDST_RND
103 MPLS_RND, VID_RND, SVID_RND 108 MPLS_RND, VID_RND, SVID_RND
109 QUEUE_MAP_RND # queue map random
110 QUEUE_MAP_CPU # queue map mirrors smp_processor_id()
111
104 112
105 pgset "udp_src_min 9" set UDP source port min, If < udp_src_max, then 113 pgset "udp_src_min 9" set UDP source port min, If < udp_src_max, then
106 cycle through the port range. 114 cycle through the port range.
diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt
index 72a22f65960e..262d8e6793a3 100644
--- a/Documentation/vm/ksm.txt
+++ b/Documentation/vm/ksm.txt
@@ -52,15 +52,15 @@ The KSM daemon is controlled by sysfs files in /sys/kernel/mm/ksm/,
52readable by all but writable only by root: 52readable by all but writable only by root:
53 53
54max_kernel_pages - set to maximum number of kernel pages that KSM may use 54max_kernel_pages - set to maximum number of kernel pages that KSM may use
55 e.g. "echo 2000 > /sys/kernel/mm/ksm/max_kernel_pages" 55 e.g. "echo 100000 > /sys/kernel/mm/ksm/max_kernel_pages"
56 Value 0 imposes no limit on the kernel pages KSM may use; 56 Value 0 imposes no limit on the kernel pages KSM may use;
57 but note that any process using MADV_MERGEABLE can cause 57 but note that any process using MADV_MERGEABLE can cause
58 KSM to allocate these pages, unswappable until it exits. 58 KSM to allocate these pages, unswappable until it exits.
59 Default: 2000 (chosen for demonstration purposes) 59 Default: quarter of memory (chosen to not pin too much)
60 60
61pages_to_scan - how many present pages to scan before ksmd goes to sleep 61pages_to_scan - how many present pages to scan before ksmd goes to sleep
62 e.g. "echo 200 > /sys/kernel/mm/ksm/pages_to_scan" 62 e.g. "echo 100 > /sys/kernel/mm/ksm/pages_to_scan"
63 Default: 200 (chosen for demonstration purposes) 63 Default: 100 (chosen for demonstration purposes)
64 64
65sleep_millisecs - how many milliseconds ksmd should sleep before next scan 65sleep_millisecs - how many milliseconds ksmd should sleep before next scan
66 e.g. "echo 20 > /sys/kernel/mm/ksm/sleep_millisecs" 66 e.g. "echo 20 > /sys/kernel/mm/ksm/sleep_millisecs"
@@ -70,7 +70,8 @@ run - set 0 to stop ksmd from running but keep merged pages,
70 set 1 to run ksmd e.g. "echo 1 > /sys/kernel/mm/ksm/run", 70 set 1 to run ksmd e.g. "echo 1 > /sys/kernel/mm/ksm/run",
71 set 2 to stop ksmd and unmerge all pages currently merged, 71 set 2 to stop ksmd and unmerge all pages currently merged,
72 but leave mergeable areas registered for next run 72 but leave mergeable areas registered for next run
73 Default: 1 (for immediate use by apps which register) 73 Default: 0 (must be changed to 1 to activate KSM,
74 except if CONFIG_SYSFS is disabled)
74 75
75The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/: 76The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/:
76 77
@@ -86,4 +87,4 @@ pages_volatile embraces several different kinds of activity, but a high
86proportion there would also indicate poor use of madvise MADV_MERGEABLE. 87proportion there would also indicate poor use of madvise MADV_MERGEABLE.
87 88
88Izik Eidus, 89Izik Eidus,
89Hugh Dickins, 30 July 2009 90Hugh Dickins, 24 Sept 2009
diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c
index fa1a30d9e9d5..3ec4f2a22585 100644
--- a/Documentation/vm/page-types.c
+++ b/Documentation/vm/page-types.c
@@ -2,7 +2,10 @@
2 * page-types: Tool for querying page flags 2 * page-types: Tool for querying page flags
3 * 3 *
4 * Copyright (C) 2009 Intel corporation 4 * Copyright (C) 2009 Intel corporation
5 * Copyright (C) 2009 Wu Fengguang <fengguang.wu@intel.com> 5 *
6 * Authors: Wu Fengguang <fengguang.wu@intel.com>
7 *
8 * Released under the General Public License (GPL).
6 */ 9 */
7 10
8#define _LARGEFILE64_SOURCE 11#define _LARGEFILE64_SOURCE
@@ -69,7 +72,9 @@
69#define KPF_COMPOUND_TAIL 16 72#define KPF_COMPOUND_TAIL 16
70#define KPF_HUGE 17 73#define KPF_HUGE 17
71#define KPF_UNEVICTABLE 18 74#define KPF_UNEVICTABLE 18
75#define KPF_HWPOISON 19
72#define KPF_NOPAGE 20 76#define KPF_NOPAGE 20
77#define KPF_KSM 21
73 78
74/* [32-] kernel hacking assistances */ 79/* [32-] kernel hacking assistances */
75#define KPF_RESERVED 32 80#define KPF_RESERVED 32
@@ -116,7 +121,9 @@ static char *page_flag_names[] = {
116 [KPF_COMPOUND_TAIL] = "T:compound_tail", 121 [KPF_COMPOUND_TAIL] = "T:compound_tail",
117 [KPF_HUGE] = "G:huge", 122 [KPF_HUGE] = "G:huge",
118 [KPF_UNEVICTABLE] = "u:unevictable", 123 [KPF_UNEVICTABLE] = "u:unevictable",
124 [KPF_HWPOISON] = "X:hwpoison",
119 [KPF_NOPAGE] = "n:nopage", 125 [KPF_NOPAGE] = "n:nopage",
126 [KPF_KSM] = "x:ksm",
120 127
121 [KPF_RESERVED] = "r:reserved", 128 [KPF_RESERVED] = "r:reserved",
122 [KPF_MLOCKED] = "m:mlocked", 129 [KPF_MLOCKED] = "m:mlocked",
@@ -152,9 +159,6 @@ static unsigned long opt_size[MAX_ADDR_RANGES];
152static int nr_vmas; 159static int nr_vmas;
153static unsigned long pg_start[MAX_VMAS]; 160static unsigned long pg_start[MAX_VMAS];
154static unsigned long pg_end[MAX_VMAS]; 161static unsigned long pg_end[MAX_VMAS];
155static unsigned long voffset;
156
157static int pagemap_fd;
158 162
159#define MAX_BIT_FILTERS 64 163#define MAX_BIT_FILTERS 64
160static int nr_bit_filters; 164static int nr_bit_filters;
@@ -163,9 +167,16 @@ static uint64_t opt_bits[MAX_BIT_FILTERS];
163 167
164static int page_size; 168static int page_size;
165 169
166#define PAGES_BATCH (64 << 10) /* 64k pages */ 170static int pagemap_fd;
167static int kpageflags_fd; 171static int kpageflags_fd;
168 172
173static int opt_hwpoison;
174static int opt_unpoison;
175
176static char *hwpoison_debug_fs = "/debug/hwpoison";
177static int hwpoison_inject_fd;
178static int hwpoison_forget_fd;
179
169#define HASH_SHIFT 13 180#define HASH_SHIFT 13
170#define HASH_SIZE (1 << HASH_SHIFT) 181#define HASH_SIZE (1 << HASH_SHIFT)
171#define HASH_MASK (HASH_SIZE - 1) 182#define HASH_MASK (HASH_SIZE - 1)
@@ -207,6 +218,74 @@ static void fatal(const char *x, ...)
207 exit(EXIT_FAILURE); 218 exit(EXIT_FAILURE);
208} 219}
209 220
221int checked_open(const char *pathname, int flags)
222{
223 int fd = open(pathname, flags);
224
225 if (fd < 0) {
226 perror(pathname);
227 exit(EXIT_FAILURE);
228 }
229
230 return fd;
231}
232
233/*
234 * pagemap/kpageflags routines
235 */
236
237static unsigned long do_u64_read(int fd, char *name,
238 uint64_t *buf,
239 unsigned long index,
240 unsigned long count)
241{
242 long bytes;
243
244 if (index > ULONG_MAX / 8)
245 fatal("index overflow: %lu\n", index);
246
247 if (lseek(fd, index * 8, SEEK_SET) < 0) {
248 perror(name);
249 exit(EXIT_FAILURE);
250 }
251
252 bytes = read(fd, buf, count * 8);
253 if (bytes < 0) {
254 perror(name);
255 exit(EXIT_FAILURE);
256 }
257 if (bytes % 8)
258 fatal("partial read: %lu bytes\n", bytes);
259
260 return bytes / 8;
261}
262
263static unsigned long kpageflags_read(uint64_t *buf,
264 unsigned long index,
265 unsigned long pages)
266{
267 return do_u64_read(kpageflags_fd, PROC_KPAGEFLAGS, buf, index, pages);
268}
269
270static unsigned long pagemap_read(uint64_t *buf,
271 unsigned long index,
272 unsigned long pages)
273{
274 return do_u64_read(pagemap_fd, "/proc/pid/pagemap", buf, index, pages);
275}
276
277static unsigned long pagemap_pfn(uint64_t val)
278{
279 unsigned long pfn;
280
281 if (val & PM_PRESENT)
282 pfn = PM_PFRAME(val);
283 else
284 pfn = 0;
285
286 return pfn;
287}
288
210 289
211/* 290/*
212 * page flag names 291 * page flag names
@@ -255,7 +334,8 @@ static char *page_flag_longname(uint64_t flags)
255 * page list and summary 334 * page list and summary
256 */ 335 */
257 336
258static void show_page_range(unsigned long offset, uint64_t flags) 337static void show_page_range(unsigned long voffset,
338 unsigned long offset, uint64_t flags)
259{ 339{
260 static uint64_t flags0; 340 static uint64_t flags0;
261 static unsigned long voff; 341 static unsigned long voff;
@@ -281,7 +361,8 @@ static void show_page_range(unsigned long offset, uint64_t flags)
281 count = 1; 361 count = 1;
282} 362}
283 363
284static void show_page(unsigned long offset, uint64_t flags) 364static void show_page(unsigned long voffset,
365 unsigned long offset, uint64_t flags)
285{ 366{
286 if (opt_pid) 367 if (opt_pid)
287 printf("%lx\t", voffset); 368 printf("%lx\t", voffset);
@@ -362,6 +443,62 @@ static uint64_t well_known_flags(uint64_t flags)
362 return flags; 443 return flags;
363} 444}
364 445
446static uint64_t kpageflags_flags(uint64_t flags)
447{
448 flags = expand_overloaded_flags(flags);
449
450 if (!opt_raw)
451 flags = well_known_flags(flags);
452
453 return flags;
454}
455
456/*
457 * page actions
458 */
459
460static void prepare_hwpoison_fd(void)
461{
462 char buf[100];
463
464 if (opt_hwpoison && !hwpoison_inject_fd) {
465 sprintf(buf, "%s/corrupt-pfn", hwpoison_debug_fs);
466 hwpoison_inject_fd = checked_open(buf, O_WRONLY);
467 }
468
469 if (opt_unpoison && !hwpoison_forget_fd) {
470 sprintf(buf, "%s/renew-pfn", hwpoison_debug_fs);
471 hwpoison_forget_fd = checked_open(buf, O_WRONLY);
472 }
473}
474
475static int hwpoison_page(unsigned long offset)
476{
477 char buf[100];
478 int len;
479
480 len = sprintf(buf, "0x%lx\n", offset);
481 len = write(hwpoison_inject_fd, buf, len);
482 if (len < 0) {
483 perror("hwpoison inject");
484 return len;
485 }
486 return 0;
487}
488
489static int unpoison_page(unsigned long offset)
490{
491 char buf[100];
492 int len;
493
494 len = sprintf(buf, "0x%lx\n", offset);
495 len = write(hwpoison_forget_fd, buf, len);
496 if (len < 0) {
497 perror("hwpoison forget");
498 return len;
499 }
500 return 0;
501}
365 502
366/* 503/*
367 * page frame walker 504 * page frame walker
@@ -394,104 +531,83 @@ static int hash_slot(uint64_t flags)
394 exit(EXIT_FAILURE); 531 exit(EXIT_FAILURE);
395} 532}
396 533
397static void add_page(unsigned long offset, uint64_t flags) 534static void add_page(unsigned long voffset,
535 unsigned long offset, uint64_t flags)
398{ 536{
399 flags = expand_overloaded_flags(flags); 537 flags = kpageflags_flags(flags);
400
401 if (!opt_raw)
402 flags = well_known_flags(flags);
403 538
404 if (!bit_mask_ok(flags)) 539 if (!bit_mask_ok(flags))
405 return; 540 return;
406 541
542 if (opt_hwpoison)
543 hwpoison_page(offset);
544 if (opt_unpoison)
545 unpoison_page(offset);
546
407 if (opt_list == 1) 547 if (opt_list == 1)
408 show_page_range(offset, flags); 548 show_page_range(voffset, offset, flags);
409 else if (opt_list == 2) 549 else if (opt_list == 2)
410 show_page(offset, flags); 550 show_page(voffset, offset, flags);
411 551
412 nr_pages[hash_slot(flags)]++; 552 nr_pages[hash_slot(flags)]++;
413 total_pages++; 553 total_pages++;
414} 554}
415 555
416static void walk_pfn(unsigned long index, unsigned long count) 556#define KPAGEFLAGS_BATCH (64 << 10) /* 64k pages */
557static void walk_pfn(unsigned long voffset,
558 unsigned long index,
559 unsigned long count)
417{ 560{
561 uint64_t buf[KPAGEFLAGS_BATCH];
418 unsigned long batch; 562 unsigned long batch;
419 unsigned long n; 563 unsigned long pages;
420 unsigned long i; 564 unsigned long i;
421 565
422 if (index > ULONG_MAX / KPF_BYTES)
423 fatal("index overflow: %lu\n", index);
424
425 lseek(kpageflags_fd, index * KPF_BYTES, SEEK_SET);
426
427 while (count) { 566 while (count) {
428 uint64_t kpageflags_buf[KPF_BYTES * PAGES_BATCH]; 567 batch = min_t(unsigned long, count, KPAGEFLAGS_BATCH);
429 568 pages = kpageflags_read(buf, index, batch);
430 batch = min_t(unsigned long, count, PAGES_BATCH); 569 if (pages == 0)
431 n = read(kpageflags_fd, kpageflags_buf, batch * KPF_BYTES);
432 if (n == 0)
433 break; 570 break;
434 if (n < 0) {
435 perror(PROC_KPAGEFLAGS);
436 exit(EXIT_FAILURE);
437 }
438 571
439 if (n % KPF_BYTES != 0) 572 for (i = 0; i < pages; i++)
440 fatal("partial read: %lu bytes\n", n); 573 add_page(voffset + i, index + i, buf[i]);
441 n = n / KPF_BYTES;
442 574
443 for (i = 0; i < n; i++) 575 index += pages;
444 add_page(index + i, kpageflags_buf[i]); 576 count -= pages;
445
446 index += batch;
447 count -= batch;
448 } 577 }
449} 578}
450 579
451 580#define PAGEMAP_BATCH (64 << 10)
452#define PAGEMAP_BATCH 4096 581static void walk_vma(unsigned long index, unsigned long count)
453static unsigned long task_pfn(unsigned long pgoff)
454{ 582{
455 static uint64_t buf[PAGEMAP_BATCH]; 583 uint64_t buf[PAGEMAP_BATCH];
456 static unsigned long start; 584 unsigned long batch;
457 static long count; 585 unsigned long pages;
458 uint64_t pfn; 586 unsigned long pfn;
587 unsigned long i;
459 588
460 if (pgoff < start || pgoff >= start + count) { 589 while (count) {
461 if (lseek64(pagemap_fd, 590 batch = min_t(unsigned long, count, PAGEMAP_BATCH);
462 (uint64_t)pgoff * PM_ENTRY_BYTES, 591 pages = pagemap_read(buf, index, batch);
463 SEEK_SET) < 0) { 592 if (pages == 0)
464 perror("pagemap seek"); 593 break;
465 exit(EXIT_FAILURE);
466 }
467 count = read(pagemap_fd, buf, sizeof(buf));
468 if (count == 0)
469 return 0;
470 if (count < 0) {
471 perror("pagemap read");
472 exit(EXIT_FAILURE);
473 }
474 if (count % PM_ENTRY_BYTES) {
475 fatal("pagemap read not aligned.\n");
476 exit(EXIT_FAILURE);
477 }
478 count /= PM_ENTRY_BYTES;
479 start = pgoff;
480 }
481 594
482 pfn = buf[pgoff - start]; 595 for (i = 0; i < pages; i++) {
483 if (pfn & PM_PRESENT) 596 pfn = pagemap_pfn(buf[i]);
484 pfn = PM_PFRAME(pfn); 597 if (pfn)
485 else 598 walk_pfn(index + i, pfn, 1);
486 pfn = 0; 599 }
487 600
488 return pfn; 601 index += pages;
602 count -= pages;
603 }
489} 604}
490 605
491static void walk_task(unsigned long index, unsigned long count) 606static void walk_task(unsigned long index, unsigned long count)
492{ 607{
493 int i = 0;
494 const unsigned long end = index + count; 608 const unsigned long end = index + count;
609 unsigned long start;
610 int i = 0;
495 611
496 while (index < end) { 612 while (index < end) {
497 613
@@ -501,15 +617,11 @@ static void walk_task(unsigned long index, unsigned long count)
501 if (pg_start[i] >= end) 617 if (pg_start[i] >= end)
502 return; 618 return;
503 619
504 voffset = max_t(unsigned long, pg_start[i], index); 620 start = max_t(unsigned long, pg_start[i], index);
505 index = min_t(unsigned long, pg_end[i], end); 621 index = min_t(unsigned long, pg_end[i], end);
506 622
507 assert(voffset < index); 623 assert(start < index);
508 for (; voffset < index; voffset++) { 624 walk_vma(start, index - start);
509 unsigned long pfn = task_pfn(voffset);
510 if (pfn)
511 walk_pfn(pfn, 1);
512 }
513 } 625 }
514} 626}
515 627
@@ -527,18 +639,14 @@ static void walk_addr_ranges(void)
527{ 639{
528 int i; 640 int i;
529 641
530 kpageflags_fd = open(PROC_KPAGEFLAGS, O_RDONLY); 642 kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY);
531 if (kpageflags_fd < 0) {
532 perror(PROC_KPAGEFLAGS);
533 exit(EXIT_FAILURE);
534 }
535 643
536 if (!nr_addr_ranges) 644 if (!nr_addr_ranges)
537 add_addr_range(0, ULONG_MAX); 645 add_addr_range(0, ULONG_MAX);
538 646
539 for (i = 0; i < nr_addr_ranges; i++) 647 for (i = 0; i < nr_addr_ranges; i++)
540 if (!opt_pid) 648 if (!opt_pid)
541 walk_pfn(opt_offset[i], opt_size[i]); 649 walk_pfn(0, opt_offset[i], opt_size[i]);
542 else 650 else
543 walk_task(opt_offset[i], opt_size[i]); 651 walk_task(opt_offset[i], opt_size[i]);
544 652
@@ -575,6 +683,8 @@ static void usage(void)
575" -l|--list Show page details in ranges\n" 683" -l|--list Show page details in ranges\n"
576" -L|--list-each Show page details one by one\n" 684" -L|--list-each Show page details one by one\n"
577" -N|--no-summary Don't show summay info\n" 685" -N|--no-summary Don't show summay info\n"
686" -X|--hwpoison hwpoison pages\n"
687" -x|--unpoison unpoison pages\n"
578" -h|--help Show this usage message\n" 688" -h|--help Show this usage message\n"
579"addr-spec:\n" 689"addr-spec:\n"
580" N one page at offset N (unit: pages)\n" 690" N one page at offset N (unit: pages)\n"
@@ -624,11 +734,7 @@ static void parse_pid(const char *str)
624 opt_pid = parse_number(str); 734 opt_pid = parse_number(str);
625 735
626 sprintf(buf, "/proc/%d/pagemap", opt_pid); 736 sprintf(buf, "/proc/%d/pagemap", opt_pid);
627 pagemap_fd = open(buf, O_RDONLY); 737 pagemap_fd = checked_open(buf, O_RDONLY);
628 if (pagemap_fd < 0) {
629 perror(buf);
630 exit(EXIT_FAILURE);
631 }
632 738
633 sprintf(buf, "/proc/%d/maps", opt_pid); 739 sprintf(buf, "/proc/%d/maps", opt_pid);
634 file = fopen(buf, "r"); 740 file = fopen(buf, "r");
@@ -788,6 +894,8 @@ static struct option opts[] = {
788 { "list" , 0, NULL, 'l' }, 894 { "list" , 0, NULL, 'l' },
789 { "list-each" , 0, NULL, 'L' }, 895 { "list-each" , 0, NULL, 'L' },
790 { "no-summary", 0, NULL, 'N' }, 896 { "no-summary", 0, NULL, 'N' },
897 { "hwpoison" , 0, NULL, 'X' },
898 { "unpoison" , 0, NULL, 'x' },
791 { "help" , 0, NULL, 'h' }, 899 { "help" , 0, NULL, 'h' },
792 { NULL , 0, NULL, 0 } 900 { NULL , 0, NULL, 0 }
793}; 901};
@@ -799,7 +907,7 @@ int main(int argc, char *argv[])
799 page_size = getpagesize(); 907 page_size = getpagesize();
800 908
801 while ((c = getopt_long(argc, argv, 909 while ((c = getopt_long(argc, argv,
802 "rp:f:a:b:lLNh", opts, NULL)) != -1) { 910 "rp:f:a:b:lLNXxh", opts, NULL)) != -1) {
803 switch (c) { 911 switch (c) {
804 case 'r': 912 case 'r':
805 opt_raw = 1; 913 opt_raw = 1;
@@ -825,6 +933,14 @@ int main(int argc, char *argv[])
825 case 'N': 933 case 'N':
826 opt_no_summary = 1; 934 opt_no_summary = 1;
827 break; 935 break;
936 case 'X':
937 opt_hwpoison = 1;
938 prepare_hwpoison_fd();
939 break;
940 case 'x':
941 opt_unpoison = 1;
942 prepare_hwpoison_fd();
943 break;
828 case 'h': 944 case 'h':
829 usage(); 945 usage();
830 exit(0); 946 exit(0);
@@ -844,7 +960,7 @@ int main(int argc, char *argv[])
844 walk_addr_ranges(); 960 walk_addr_ranges();
845 961
846 if (opt_list == 1) 962 if (opt_list == 1)
847 show_page_range(0, 0); /* drain the buffer */ 963 show_page_range(0, 0, 0); /* drain the buffer */
848 964
849 if (opt_no_summary) 965 if (opt_no_summary)
850 return 0; 966 return 0;
diff --git a/Documentation/vm/pagemap.txt b/Documentation/vm/pagemap.txt
index 600a304a828c..df09b9650a81 100644
--- a/Documentation/vm/pagemap.txt
+++ b/Documentation/vm/pagemap.txt
@@ -57,7 +57,9 @@ There are three components to pagemap:
57 16. COMPOUND_TAIL 57 16. COMPOUND_TAIL
58 16. HUGE 58 16. HUGE
59 18. UNEVICTABLE 59 18. UNEVICTABLE
60 19. HWPOISON
60 20. NOPAGE 61 20. NOPAGE
62 21. KSM
61 63
62Short descriptions to the page flags: 64Short descriptions to the page flags:
63 65
@@ -86,9 +88,15 @@ Short descriptions to the page flags:
8617. HUGE 8817. HUGE
87 this is an integral part of a HugeTLB page 89 this is an integral part of a HugeTLB page
88 90
9119. HWPOISON
92 hardware detected memory corruption on this page: don't touch the data!
93
8920. NOPAGE 9420. NOPAGE
90 no page frame exists at the requested address 95 no page frame exists at the requested address
91 96
9721. KSM
98 identical memory pages dynamically shared between one or more processes
99
92 [IO related page flags] 100 [IO related page flags]
93 1. ERROR IO error occurred 101 1. ERROR IO error occurred
94 3. UPTODATE page has up-to-date data 102 3. UPTODATE page has up-to-date data
diff --git a/MAINTAINERS b/MAINTAINERS
index 09a2028bab7f..e1da925b38c8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3643,6 +3643,13 @@ F: Documentation/blockdev/nbd.txt
3643F: drivers/block/nbd.c 3643F: drivers/block/nbd.c
3644F: include/linux/nbd.h 3644F: include/linux/nbd.h
3645 3645
3646NETWORK DROP MONITOR
3647M: Neil Horman <nhorman@tuxdriver.com>
3648L: netdev@vger.kernel.org
3649S: Maintained
3650W: https://fedorahosted.org/dropwatch/
3651F: net/core/drop_monitor.c
3652
3646NETWORKING [GENERAL] 3653NETWORKING [GENERAL]
3647M: "David S. Miller" <davem@davemloft.net> 3654M: "David S. Miller" <davem@davemloft.net>
3648L: netdev@vger.kernel.org 3655L: netdev@vger.kernel.org
@@ -3973,6 +3980,7 @@ F: drivers/block/paride/
3973PARISC ARCHITECTURE 3980PARISC ARCHITECTURE
3974M: Kyle McMartin <kyle@mcmartin.ca> 3981M: Kyle McMartin <kyle@mcmartin.ca>
3975M: Helge Deller <deller@gmx.de> 3982M: Helge Deller <deller@gmx.de>
3983M: "James E.J. Bottomley" <jejb@parisc-linux.org>
3976L: linux-parisc@vger.kernel.org 3984L: linux-parisc@vger.kernel.org
3977W: http://www.parisc-linux.org/ 3985W: http://www.parisc-linux.org/
3978T: git git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6.git 3986T: git git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6.git
diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c
index 594ee0e657fe..9a8876f715d8 100644
--- a/arch/m68knommu/kernel/asm-offsets.c
+++ b/arch/m68knommu/kernel/asm-offsets.c
@@ -45,25 +45,25 @@ int main(void)
45 DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate)); 45 DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
46 46
47 /* offsets into the pt_regs */ 47 /* offsets into the pt_regs */
48 DEFINE(PT_D0, offsetof(struct pt_regs, d0)); 48 DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
49 DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0)); 49 DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
50 DEFINE(PT_D1, offsetof(struct pt_regs, d1)); 50 DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
51 DEFINE(PT_D2, offsetof(struct pt_regs, d2)); 51 DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
52 DEFINE(PT_D3, offsetof(struct pt_regs, d3)); 52 DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
53 DEFINE(PT_D4, offsetof(struct pt_regs, d4)); 53 DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
54 DEFINE(PT_D5, offsetof(struct pt_regs, d5)); 54 DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
55 DEFINE(PT_A0, offsetof(struct pt_regs, a0)); 55 DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
56 DEFINE(PT_A1, offsetof(struct pt_regs, a1)); 56 DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
57 DEFINE(PT_A2, offsetof(struct pt_regs, a2)); 57 DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
58 DEFINE(PT_PC, offsetof(struct pt_regs, pc)); 58 DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
59 DEFINE(PT_SR, offsetof(struct pt_regs, sr)); 59 DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
60 60
61#ifdef CONFIG_COLDFIRE 61#ifdef CONFIG_COLDFIRE
62 /* bitfields are a bit difficult */ 62 /* bitfields are a bit difficult */
63 DEFINE(PT_FORMATVEC, offsetof(struct pt_regs, sr) - 2); 63 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
64#else 64#else
65 /* bitfields are a bit difficult */ 65 /* bitfields are a bit difficult */
66 DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4); 66 DEFINE(PT_OFF_VECTOR, offsetof(struct pt_regs, pc) + 4);
67#endif 67#endif
68 68
69 /* signal defines */ 69 /* signal defines */
diff --git a/arch/m68knommu/kernel/entry.S b/arch/m68knommu/kernel/entry.S
index f56faa5c9cd9..56043ade3941 100644
--- a/arch/m68knommu/kernel/entry.S
+++ b/arch/m68knommu/kernel/entry.S
@@ -46,7 +46,7 @@
46ENTRY(buserr) 46ENTRY(buserr)
47 SAVE_ALL 47 SAVE_ALL
48 moveq #-1,%d0 48 moveq #-1,%d0
49 movel %d0,%sp@(PT_ORIG_D0) 49 movel %d0,%sp@(PT_OFF_ORIG_D0)
50 movel %sp,%sp@- /* stack frame pointer argument */ 50 movel %sp,%sp@- /* stack frame pointer argument */
51 jsr buserr_c 51 jsr buserr_c
52 addql #4,%sp 52 addql #4,%sp
@@ -55,7 +55,7 @@ ENTRY(buserr)
55ENTRY(trap) 55ENTRY(trap)
56 SAVE_ALL 56 SAVE_ALL
57 moveq #-1,%d0 57 moveq #-1,%d0
58 movel %d0,%sp@(PT_ORIG_D0) 58 movel %d0,%sp@(PT_OFF_ORIG_D0)
59 movel %sp,%sp@- /* stack frame pointer argument */ 59 movel %sp,%sp@- /* stack frame pointer argument */
60 jsr trap_c 60 jsr trap_c
61 addql #4,%sp 61 addql #4,%sp
@@ -67,7 +67,7 @@ ENTRY(trap)
67ENTRY(dbginterrupt) 67ENTRY(dbginterrupt)
68 SAVE_ALL 68 SAVE_ALL
69 moveq #-1,%d0 69 moveq #-1,%d0
70 movel %d0,%sp@(PT_ORIG_D0) 70 movel %d0,%sp@(PT_OFF_ORIG_D0)
71 movel %sp,%sp@- /* stack frame pointer argument */ 71 movel %sp,%sp@- /* stack frame pointer argument */
72 jsr dbginterrupt_c 72 jsr dbginterrupt_c
73 addql #4,%sp 73 addql #4,%sp
diff --git a/arch/m68knommu/mm/init.c b/arch/m68knommu/mm/init.c
index b1703c67a4f1..f3236d0b522d 100644
--- a/arch/m68knommu/mm/init.c
+++ b/arch/m68knommu/mm/init.c
@@ -162,7 +162,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
162 totalram_pages++; 162 totalram_pages++;
163 pages++; 163 pages++;
164 } 164 }
165 printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages); 165 printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages * (PAGE_SIZE / 1024));
166} 166}
167#endif 167#endif
168 168
diff --git a/arch/m68knommu/platform/5206e/config.c b/arch/m68knommu/platform/5206e/config.c
index 0f41ba82a3b5..942397984c66 100644
--- a/arch/m68knommu/platform/5206e/config.c
+++ b/arch/m68knommu/platform/5206e/config.c
@@ -17,7 +17,6 @@
17#include <asm/mcfsim.h> 17#include <asm/mcfsim.h>
18#include <asm/mcfuart.h> 18#include <asm/mcfuart.h>
19#include <asm/mcfdma.h> 19#include <asm/mcfdma.h>
20#include <asm/mcfuart.h>
21 20
22/***************************************************************************/ 21/***************************************************************************/
23 22
diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S
index b1aef72f3baf..9d80d2c42866 100644
--- a/arch/m68knommu/platform/68328/entry.S
+++ b/arch/m68knommu/platform/68328/entry.S
@@ -39,17 +39,17 @@
39.globl inthandler7 39.globl inthandler7
40 40
41badsys: 41badsys:
42 movel #-ENOSYS,%sp@(PT_D0) 42 movel #-ENOSYS,%sp@(PT_OFF_D0)
43 jra ret_from_exception 43 jra ret_from_exception
44 44
45do_trace: 45do_trace:
46 movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ 46 movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
47 subql #4,%sp 47 subql #4,%sp
48 SAVE_SWITCH_STACK 48 SAVE_SWITCH_STACK
49 jbsr syscall_trace 49 jbsr syscall_trace
50 RESTORE_SWITCH_STACK 50 RESTORE_SWITCH_STACK
51 addql #4,%sp 51 addql #4,%sp
52 movel %sp@(PT_ORIG_D0),%d1 52 movel %sp@(PT_OFF_ORIG_D0),%d1
53 movel #-ENOSYS,%d0 53 movel #-ENOSYS,%d0
54 cmpl #NR_syscalls,%d1 54 cmpl #NR_syscalls,%d1
55 jcc 1f 55 jcc 1f
@@ -57,7 +57,7 @@ do_trace:
57 lea sys_call_table, %a0 57 lea sys_call_table, %a0
58 jbsr %a0@(%d1) 58 jbsr %a0@(%d1)
59 59
601: movel %d0,%sp@(PT_D0) /* save the return value */ 601: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
61 subql #4,%sp /* dummy return address */ 61 subql #4,%sp /* dummy return address */
62 SAVE_SWITCH_STACK 62 SAVE_SWITCH_STACK
63 jbsr syscall_trace 63 jbsr syscall_trace
@@ -75,7 +75,7 @@ ENTRY(system_call)
75 jbsr set_esp0 75 jbsr set_esp0
76 addql #4,%sp 76 addql #4,%sp
77 77
78 movel %sp@(PT_ORIG_D0),%d0 78 movel %sp@(PT_OFF_ORIG_D0),%d0
79 79
80 movel %sp,%d1 /* get thread_info pointer */ 80 movel %sp,%d1 /* get thread_info pointer */
81 andl #-THREAD_SIZE,%d1 81 andl #-THREAD_SIZE,%d1
@@ -88,10 +88,10 @@ ENTRY(system_call)
88 lea sys_call_table,%a0 88 lea sys_call_table,%a0
89 movel %a0@(%d0), %a0 89 movel %a0@(%d0), %a0
90 jbsr %a0@ 90 jbsr %a0@
91 movel %d0,%sp@(PT_D0) /* save the return value*/ 91 movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
92 92
93ret_from_exception: 93ret_from_exception:
94 btst #5,%sp@(PT_SR) /* check if returning to kernel*/ 94 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
95 jeq Luser_return /* if so, skip resched, signals*/ 95 jeq Luser_return /* if so, skip resched, signals*/
96 96
97Lkernel_return: 97Lkernel_return:
@@ -133,7 +133,7 @@ Lreturn:
133 */ 133 */
134inthandler1: 134inthandler1:
135 SAVE_ALL 135 SAVE_ALL
136 movew %sp@(PT_VECTOR), %d0 136 movew %sp@(PT_OFF_VECTOR), %d0
137 and #0x3ff, %d0 137 and #0x3ff, %d0
138 138
139 movel %sp,%sp@- 139 movel %sp,%sp@-
@@ -144,7 +144,7 @@ inthandler1:
144 144
145inthandler2: 145inthandler2:
146 SAVE_ALL 146 SAVE_ALL
147 movew %sp@(PT_VECTOR), %d0 147 movew %sp@(PT_OFF_VECTOR), %d0
148 and #0x3ff, %d0 148 and #0x3ff, %d0
149 149
150 movel %sp,%sp@- 150 movel %sp,%sp@-
@@ -155,7 +155,7 @@ inthandler2:
155 155
156inthandler3: 156inthandler3:
157 SAVE_ALL 157 SAVE_ALL
158 movew %sp@(PT_VECTOR), %d0 158 movew %sp@(PT_OFF_VECTOR), %d0
159 and #0x3ff, %d0 159 and #0x3ff, %d0
160 160
161 movel %sp,%sp@- 161 movel %sp,%sp@-
@@ -166,7 +166,7 @@ inthandler3:
166 166
167inthandler4: 167inthandler4:
168 SAVE_ALL 168 SAVE_ALL
169 movew %sp@(PT_VECTOR), %d0 169 movew %sp@(PT_OFF_VECTOR), %d0
170 and #0x3ff, %d0 170 and #0x3ff, %d0
171 171
172 movel %sp,%sp@- 172 movel %sp,%sp@-
@@ -177,7 +177,7 @@ inthandler4:
177 177
178inthandler5: 178inthandler5:
179 SAVE_ALL 179 SAVE_ALL
180 movew %sp@(PT_VECTOR), %d0 180 movew %sp@(PT_OFF_VECTOR), %d0
181 and #0x3ff, %d0 181 and #0x3ff, %d0
182 182
183 movel %sp,%sp@- 183 movel %sp,%sp@-
@@ -188,7 +188,7 @@ inthandler5:
188 188
189inthandler6: 189inthandler6:
190 SAVE_ALL 190 SAVE_ALL
191 movew %sp@(PT_VECTOR), %d0 191 movew %sp@(PT_OFF_VECTOR), %d0
192 and #0x3ff, %d0 192 and #0x3ff, %d0
193 193
194 movel %sp,%sp@- 194 movel %sp,%sp@-
@@ -199,7 +199,7 @@ inthandler6:
199 199
200inthandler7: 200inthandler7:
201 SAVE_ALL 201 SAVE_ALL
202 movew %sp@(PT_VECTOR), %d0 202 movew %sp@(PT_OFF_VECTOR), %d0
203 and #0x3ff, %d0 203 and #0x3ff, %d0
204 204
205 movel %sp,%sp@- 205 movel %sp,%sp@-
@@ -210,7 +210,7 @@ inthandler7:
210 210
211inthandler: 211inthandler:
212 SAVE_ALL 212 SAVE_ALL
213 movew %sp@(PT_VECTOR), %d0 213 movew %sp@(PT_OFF_VECTOR), %d0
214 and #0x3ff, %d0 214 and #0x3ff, %d0
215 215
216 movel %sp,%sp@- 216 movel %sp,%sp@-
@@ -224,7 +224,7 @@ ret_from_interrupt:
2242: 2242:
225 RESTORE_ALL 225 RESTORE_ALL
2261: 2261:
227 moveb %sp@(PT_SR), %d0 227 moveb %sp@(PT_OFF_SR), %d0
228 and #7, %d0 228 and #7, %d0
229 jhi 2b 229 jhi 2b
230 230
diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S
index 55dfefe38642..6d3460a39cac 100644
--- a/arch/m68knommu/platform/68360/entry.S
+++ b/arch/m68knommu/platform/68360/entry.S
@@ -35,17 +35,17 @@
35.globl inthandler 35.globl inthandler
36 36
37badsys: 37badsys:
38 movel #-ENOSYS,%sp@(PT_D0) 38 movel #-ENOSYS,%sp@(PT_OFF_D0)
39 jra ret_from_exception 39 jra ret_from_exception
40 40
41do_trace: 41do_trace:
42 movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ 42 movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
43 subql #4,%sp 43 subql #4,%sp
44 SAVE_SWITCH_STACK 44 SAVE_SWITCH_STACK
45 jbsr syscall_trace 45 jbsr syscall_trace
46 RESTORE_SWITCH_STACK 46 RESTORE_SWITCH_STACK
47 addql #4,%sp 47 addql #4,%sp
48 movel %sp@(PT_ORIG_D0),%d1 48 movel %sp@(PT_OFF_ORIG_D0),%d1
49 movel #-ENOSYS,%d0 49 movel #-ENOSYS,%d0
50 cmpl #NR_syscalls,%d1 50 cmpl #NR_syscalls,%d1
51 jcc 1f 51 jcc 1f
@@ -53,7 +53,7 @@ do_trace:
53 lea sys_call_table, %a0 53 lea sys_call_table, %a0
54 jbsr %a0@(%d1) 54 jbsr %a0@(%d1)
55 55
561: movel %d0,%sp@(PT_D0) /* save the return value */ 561: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
57 subql #4,%sp /* dummy return address */ 57 subql #4,%sp /* dummy return address */
58 SAVE_SWITCH_STACK 58 SAVE_SWITCH_STACK
59 jbsr syscall_trace 59 jbsr syscall_trace
@@ -79,10 +79,10 @@ ENTRY(system_call)
79 lea sys_call_table,%a0 79 lea sys_call_table,%a0
80 movel %a0@(%d0), %a0 80 movel %a0@(%d0), %a0
81 jbsr %a0@ 81 jbsr %a0@
82 movel %d0,%sp@(PT_D0) /* save the return value*/ 82 movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
83 83
84ret_from_exception: 84ret_from_exception:
85 btst #5,%sp@(PT_SR) /* check if returning to kernel*/ 85 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
86 jeq Luser_return /* if so, skip resched, signals*/ 86 jeq Luser_return /* if so, skip resched, signals*/
87 87
88Lkernel_return: 88Lkernel_return:
@@ -124,7 +124,7 @@ Lreturn:
124 */ 124 */
125inthandler: 125inthandler:
126 SAVE_ALL 126 SAVE_ALL
127 movew %sp@(PT_VECTOR), %d0 127 movew %sp@(PT_OFF_VECTOR), %d0
128 and.l #0x3ff, %d0 128 and.l #0x3ff, %d0
129 lsr.l #0x02, %d0 129 lsr.l #0x02, %d0
130 130
@@ -139,7 +139,7 @@ ret_from_interrupt:
1392: 1392:
140 RESTORE_ALL 140 RESTORE_ALL
1411: 1411:
142 moveb %sp@(PT_SR), %d0 142 moveb %sp@(PT_OFF_SR), %d0
143 and #7, %d0 143 and #7, %d0
144 jhi 2b 144 jhi 2b
145 /* check if we need to do software interrupts */ 145 /* check if we need to do software interrupts */
diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S
index 3b471c0da24a..dd7d591f70ea 100644
--- a/arch/m68knommu/platform/coldfire/entry.S
+++ b/arch/m68knommu/platform/coldfire/entry.S
@@ -81,11 +81,11 @@ ENTRY(system_call)
81 81
82 movel %d3,%a0 82 movel %d3,%a0
83 jbsr %a0@ 83 jbsr %a0@
84 movel %d0,%sp@(PT_D0) /* save the return value */ 84 movel %d0,%sp@(PT_OFF_D0) /* save the return value */
85 jra ret_from_exception 85 jra ret_from_exception
861: 861:
87 movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_D0 */ 87 movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_OFF_D0 */
88 movel %d2,PT_D0(%sp) /* on syscall entry */ 88 movel %d2,PT_OFF_D0(%sp) /* on syscall entry */
89 subql #4,%sp 89 subql #4,%sp
90 SAVE_SWITCH_STACK 90 SAVE_SWITCH_STACK
91 jbsr syscall_trace 91 jbsr syscall_trace
@@ -93,7 +93,7 @@ ENTRY(system_call)
93 addql #4,%sp 93 addql #4,%sp
94 movel %d3,%a0 94 movel %d3,%a0
95 jbsr %a0@ 95 jbsr %a0@
96 movel %d0,%sp@(PT_D0) /* save the return value */ 96 movel %d0,%sp@(PT_OFF_D0) /* save the return value */
97 subql #4,%sp /* dummy return address */ 97 subql #4,%sp /* dummy return address */
98 SAVE_SWITCH_STACK 98 SAVE_SWITCH_STACK
99 jbsr syscall_trace 99 jbsr syscall_trace
@@ -104,7 +104,7 @@ ret_from_signal:
104 104
105ret_from_exception: 105ret_from_exception:
106 move #0x2700,%sr /* disable intrs */ 106 move #0x2700,%sr /* disable intrs */
107 btst #5,%sp@(PT_SR) /* check if returning to kernel */ 107 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */
108 jeq Luser_return /* if so, skip resched, signals */ 108 jeq Luser_return /* if so, skip resched, signals */
109 109
110#ifdef CONFIG_PREEMPT 110#ifdef CONFIG_PREEMPT
@@ -142,8 +142,8 @@ Luser_return:
142Lreturn: 142Lreturn:
143 move #0x2700,%sr /* disable intrs */ 143 move #0x2700,%sr /* disable intrs */
144 movel sw_usp,%a0 /* get usp */ 144 movel sw_usp,%a0 /* get usp */
145 movel %sp@(PT_PC),%a0@- /* copy exception program counter */ 145 movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */
146 movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */ 146 movel %sp@(PT_OFF_FORMATVEC),%a0@- /* copy exception format/vector/sr */
147 moveml %sp@,%d1-%d5/%a0-%a2 147 moveml %sp@,%d1-%d5/%a0-%a2
148 lea %sp@(32),%sp /* space for 8 regs */ 148 lea %sp@(32),%sp /* space for 8 regs */
149 movel %sp@+,%d0 149 movel %sp@+,%d0
@@ -181,9 +181,9 @@ Lsignal_return:
181ENTRY(inthandler) 181ENTRY(inthandler)
182 SAVE_ALL 182 SAVE_ALL
183 moveq #-1,%d0 183 moveq #-1,%d0
184 movel %d0,%sp@(PT_ORIG_D0) 184 movel %d0,%sp@(PT_OFF_ORIG_D0)
185 185
186 movew %sp@(PT_FORMATVEC),%d0 /* put exception # in d0 */ 186 movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */
187 andl #0x03fc,%d0 /* mask out vector only */ 187 andl #0x03fc,%d0 /* mask out vector only */
188 188
189 movel %sp,%sp@- /* push regs arg */ 189 movel %sp,%sp@- /* push regs arg */
@@ -203,7 +203,7 @@ ENTRY(inthandler)
203ENTRY(fasthandler) 203ENTRY(fasthandler)
204 SAVE_LOCAL 204 SAVE_LOCAL
205 205
206 movew %sp@(PT_FORMATVEC),%d0 206 movew %sp@(PT_OFF_FORMATVEC),%d0
207 andl #0x03fc,%d0 /* mask out vector only */ 207 andl #0x03fc,%d0 /* mask out vector only */
208 208
209 movel %sp,%sp@- /* push regs arg */ 209 movel %sp,%sp@- /* push regs arg */
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index acc1f05d1e2c..e3ecb36dd554 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -592,6 +592,8 @@ C_ENTRY(full_exception_trap):
592 nop 592 nop
593 mfs r7, rfsr; /* save FSR */ 593 mfs r7, rfsr; /* save FSR */
594 nop 594 nop
595 mts rfsr, r0; /* Clear sticky fsr */
596 nop
595 la r12, r0, full_exception 597 la r12, r0, full_exception
596 set_vms; 598 set_vms;
597 rtbd r12, 0; 599 rtbd r12, 0;
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 6b0288ebccd6..2b86c03aa841 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -384,7 +384,7 @@ handle_other_ex: /* Handle Other exceptions here */
384 addk r8, r17, r0; /* Load exception address */ 384 addk r8, r17, r0; /* Load exception address */
385 bralid r15, full_exception; /* Branch to the handler */ 385 bralid r15, full_exception; /* Branch to the handler */
386 nop; 386 nop;
387 mts r0, rfsr; /* Clear sticky fsr */ 387 mts rfsr, r0; /* Clear sticky fsr */
388 nop 388 nop
389 389
390 /* 390 /*
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 4201c743cc9f..c592d475b3d8 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -235,7 +235,9 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
235 regs->pc = pc; 235 regs->pc = pc;
236 regs->r1 = usp; 236 regs->r1 = usp;
237 regs->pt_mode = 0; 237 regs->pt_mode = 0;
238#ifdef CONFIG_MMU
238 regs->msr |= MSR_UMS; 239 regs->msr |= MSR_UMS;
240#endif
239} 241}
240 242
241#ifdef CONFIG_MMU 243#ifdef CONFIG_MMU
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index f388dc68f605..524d9352f17e 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,6 +18,7 @@ config PARISC
18 select BUG 18 select BUG
19 select HAVE_PERF_EVENTS 19 select HAVE_PERF_EVENTS
20 select GENERIC_ATOMIC64 if !64BIT 20 select GENERIC_ATOMIC64 if !64BIT
21 select HAVE_ARCH_TRACEHOOK
21 help 22 help
22 The PA-RISC microprocessor is designed by Hewlett-Packard and used 23 The PA-RISC microprocessor is designed by Hewlett-Packard and used
23 in many of their workstations & servers (HP9000 700 and 800 series, 24 in many of their workstations & servers (HP9000 700 and 800 series,
diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h
index de3fe3a18229..6fec4d4a1a18 100644
--- a/arch/parisc/include/asm/fixmap.h
+++ b/arch/parisc/include/asm/fixmap.h
@@ -21,9 +21,9 @@
21#define KERNEL_MAP_END (TMPALIAS_MAP_START) 21#define KERNEL_MAP_END (TMPALIAS_MAP_START)
22 22
23#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
24extern void *vmalloc_start; 24extern void *parisc_vmalloc_start;
25#define PCXL_DMA_MAP_SIZE (8*1024*1024) 25#define PCXL_DMA_MAP_SIZE (8*1024*1024)
26#define VMALLOC_START ((unsigned long)vmalloc_start) 26#define VMALLOC_START ((unsigned long)parisc_vmalloc_start)
27#define VMALLOC_END (KERNEL_MAP_END) 27#define VMALLOC_END (KERNEL_MAP_END)
28#endif /*__ASSEMBLY__*/ 28#endif /*__ASSEMBLY__*/
29 29
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h
index ce93133d5112..0d68184a76cb 100644
--- a/arch/parisc/include/asm/hardirq.h
+++ b/arch/parisc/include/asm/hardirq.h
@@ -1,29 +1,11 @@
1/* hardirq.h: PA-RISC hard IRQ support. 1/* hardirq.h: PA-RISC hard IRQ support.
2 * 2 *
3 * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> 3 * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
4 *
5 * The locking is really quite interesting. There's a cpu-local
6 * count of how many interrupts are being handled, and a global
7 * lock. An interrupt can only be serviced if the global lock
8 * is free. You can't be sure no more interrupts are being
9 * serviced until you've acquired the lock and then checked
10 * all the per-cpu interrupt counts are all zero. It's a specialised
11 * br_lock, and that's exactly how Sparc does it. We don't because
12 * it's more locking for us. This way is lock-free in the interrupt path.
13 */ 4 */
14 5
15#ifndef _PARISC_HARDIRQ_H 6#ifndef _PARISC_HARDIRQ_H
16#define _PARISC_HARDIRQ_H 7#define _PARISC_HARDIRQ_H
17 8
18#include <linux/threads.h> 9#include <asm-generic/hardirq.h>
19#include <linux/irq.h>
20
21typedef struct {
22 unsigned long __softirq_pending; /* set_bit is used on this */
23} ____cacheline_aligned irq_cpustat_t;
24
25#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
26
27void ack_bad_irq(unsigned int irq);
28 10
29#endif /* _PARISC_HARDIRQ_H */ 11#endif /* _PARISC_HARDIRQ_H */
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
index 302f68dc889c..aead40b16dd8 100644
--- a/arch/parisc/include/asm/ptrace.h
+++ b/arch/parisc/include/asm/ptrace.h
@@ -59,8 +59,11 @@ void user_enable_block_step(struct task_struct *task);
59#define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) 59#define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0)
60#define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) 60#define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0)
61#define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) 61#define instruction_pointer(regs) ((regs)->iaoq[0] & ~3)
62#define user_stack_pointer(regs) ((regs)->gr[30])
62unsigned long profile_pc(struct pt_regs *); 63unsigned long profile_pc(struct pt_regs *);
63extern void show_regs(struct pt_regs *); 64extern void show_regs(struct pt_regs *);
64#endif 65
66
67#endif /* __KERNEL__ */
65 68
66#endif 69#endif
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
new file mode 100644
index 000000000000..8bdfd2c8c39f
--- /dev/null
+++ b/arch/parisc/include/asm/syscall.h
@@ -0,0 +1,40 @@
1/* syscall.h */
2
3#ifndef _ASM_PARISC_SYSCALL_H_
4#define _ASM_PARISC_SYSCALL_H_
5
6#include <linux/err.h>
7#include <asm/ptrace.h>
8
9static inline long syscall_get_nr(struct task_struct *tsk,
10 struct pt_regs *regs)
11{
12 return regs->gr[20];
13}
14
15static inline void syscall_get_arguments(struct task_struct *tsk,
16 struct pt_regs *regs, unsigned int i,
17 unsigned int n, unsigned long *args)
18{
19 BUG_ON(i);
20
21 switch (n) {
22 case 6:
23 args[5] = regs->gr[21];
24 case 5:
25 args[4] = regs->gr[22];
26 case 4:
27 args[3] = regs->gr[23];
28 case 3:
29 args[2] = regs->gr[24];
30 case 2:
31 args[1] = regs->gr[25];
32 case 1:
33 args[0] = regs->gr[26];
34 break;
35 default:
36 BUG();
37 }
38}
39
40#endif /*_ASM_PARISC_SYSCALL_H_*/
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index ac775a76bff7..7ecc1039cfed 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -32,6 +32,11 @@ struct thread_info {
32#define init_thread_info (init_thread_union.thread_info) 32#define init_thread_info (init_thread_union.thread_info)
33#define init_stack (init_thread_union.stack) 33#define init_stack (init_thread_union.stack)
34 34
35/* how to get the thread information struct from C */
36#define current_thread_info() ((struct thread_info *)mfctl(30))
37
38#endif /* !__ASSEMBLY */
39
35/* thread information allocation */ 40/* thread information allocation */
36 41
37#define THREAD_SIZE_ORDER 2 42#define THREAD_SIZE_ORDER 2
@@ -40,11 +45,6 @@ struct thread_info {
40#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 45#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
41#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) 46#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
42 47
43/* how to get the thread information struct from C */
44#define current_thread_info() ((struct thread_info *)mfctl(30))
45
46#endif /* !__ASSEMBLY */
47
48#define PREEMPT_ACTIVE_BIT 28 48#define PREEMPT_ACTIVE_BIT 28
49#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) 49#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
50 50
@@ -60,6 +60,8 @@ struct thread_info {
60#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ 60#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
61#define TIF_FREEZE 7 /* is freezing for suspend */ 61#define TIF_FREEZE 7 /* is freezing for suspend */
62#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ 62#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
63#define TIF_SINGLESTEP 9 /* single stepping? */
64#define TIF_BLOCKSTEP 10 /* branch stepping? */
63 65
64#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 66#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
65#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 67#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -69,6 +71,8 @@ struct thread_info {
69#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 71#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
70#define _TIF_FREEZE (1 << TIF_FREEZE) 72#define _TIF_FREEZE (1 << TIF_FREEZE)
71#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 73#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
74#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
75#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
72 76
73#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ 77#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
74 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) 78 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 699cf8ef2118..fcd3c707bf12 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -270,8 +270,8 @@ int main(void)
270 DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count)); 270 DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count));
271 DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop)); 271 DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop));
272 BLANK(); 272 BLANK();
273 DEFINE(PA_BLOCKSTEP_BIT, 31-PT_BLOCKSTEP_BIT); 273 DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP);
274 DEFINE(PA_SINGLESTEP_BIT, 31-PT_SINGLESTEP_BIT); 274 DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP);
275 BLANK(); 275 BLANK();
276 DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); 276 DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
277 DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); 277 DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 8c4712b74dc1..3a44f7f704fa 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -2047,12 +2047,13 @@ syscall_do_signal:
2047 b,n syscall_check_sig 2047 b,n syscall_check_sig
2048 2048
2049syscall_restore: 2049syscall_restore:
2050 /* Are we being ptraced? */
2051 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2050 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2052 2051
2053 ldw TASK_PTRACE(%r1), %r19 2052 /* Are we being ptraced? */
2054 bb,< %r19,31,syscall_restore_rfi 2053 ldw TASK_FLAGS(%r1),%r19
2055 nop 2054 ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2
2055 and,COND(=) %r19,%r2,%r0
2056 b,n syscall_restore_rfi
2056 2057
2057 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ 2058 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
2058 rest_fp %r19 2059 rest_fp %r19
@@ -2113,16 +2114,16 @@ syscall_restore_rfi:
2113 ldi 0x0b,%r20 /* Create new PSW */ 2114 ldi 0x0b,%r20 /* Create new PSW */
2114 depi -1,13,1,%r20 /* C, Q, D, and I bits */ 2115 depi -1,13,1,%r20 /* C, Q, D, and I bits */
2115 2116
2116 /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are 2117 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
2117 * set in include/linux/ptrace.h and converted to PA bitmap 2118 * set in thread_info.h and converted to PA bitmap
2118 * numbers in asm-offsets.c */ 2119 * numbers in asm-offsets.c */
2119 2120
2120 /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */ 2121 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
2121 extru,= %r19,PA_SINGLESTEP_BIT,1,%r0 2122 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
2122 depi -1,27,1,%r20 /* R bit */ 2123 depi -1,27,1,%r20 /* R bit */
2123 2124
2124 /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */ 2125 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
2125 extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0 2126 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
2126 depi -1,7,1,%r20 /* T bit */ 2127 depi -1,7,1,%r20 /* T bit */
2127 2128
2128 STREG %r20,TASK_PT_PSW(%r1) 2129 STREG %r20,TASK_PT_PSW(%r1)
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 330f536a9324..2e7610cb33d5 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -423,8 +423,3 @@ void __init init_IRQ(void)
423 set_eiem(cpu_eiem); /* EIEM : enable all external intr */ 423 set_eiem(cpu_eiem); /* EIEM : enable all external intr */
424 424
425} 425}
426
427void ack_bad_irq(unsigned int irq)
428{
429 printk(KERN_WARNING "unexpected IRQ %d\n", irq);
430}
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 61ee0eec4e69..212074653df7 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -893,7 +893,7 @@ int module_finalize(const Elf_Ehdr *hdr,
893 * ourselves */ 893 * ourselves */
894 for (i = 1; i < hdr->e_shnum; i++) { 894 for (i = 1; i < hdr->e_shnum; i++) {
895 if(sechdrs[i].sh_type == SHT_SYMTAB 895 if(sechdrs[i].sh_type == SHT_SYMTAB
896 && (sechdrs[i].sh_type & SHF_ALLOC)) { 896 && (sechdrs[i].sh_flags & SHF_ALLOC)) {
897 int strindex = sechdrs[i].sh_link; 897 int strindex = sechdrs[i].sh_link;
898 /* FIXME: AWFUL HACK 898 /* FIXME: AWFUL HACK
899 * The cast is to drop the const from 899 * The cast is to drop the const from
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 927db3668b6f..c4f49e45129d 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -13,6 +13,7 @@
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/ptrace.h> 15#include <linux/ptrace.h>
16#include <linux/tracehook.h>
16#include <linux/user.h> 17#include <linux/user.h>
17#include <linux/personality.h> 18#include <linux/personality.h>
18#include <linux/security.h> 19#include <linux/security.h>
@@ -35,7 +36,8 @@
35 */ 36 */
36void ptrace_disable(struct task_struct *task) 37void ptrace_disable(struct task_struct *task)
37{ 38{
38 task->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); 39 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
40 clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
39 41
40 /* make sure the trap bits are not set */ 42 /* make sure the trap bits are not set */
41 pa_psw(task)->r = 0; 43 pa_psw(task)->r = 0;
@@ -55,8 +57,8 @@ void user_disable_single_step(struct task_struct *task)
55 57
56void user_enable_single_step(struct task_struct *task) 58void user_enable_single_step(struct task_struct *task)
57{ 59{
58 task->ptrace &= ~PT_BLOCKSTEP; 60 clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
59 task->ptrace |= PT_SINGLESTEP; 61 set_tsk_thread_flag(task, TIF_SINGLESTEP);
60 62
61 if (pa_psw(task)->n) { 63 if (pa_psw(task)->n) {
62 struct siginfo si; 64 struct siginfo si;
@@ -98,8 +100,8 @@ void user_enable_single_step(struct task_struct *task)
98 100
99void user_enable_block_step(struct task_struct *task) 101void user_enable_block_step(struct task_struct *task)
100{ 102{
101 task->ptrace &= ~PT_SINGLESTEP; 103 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
102 task->ptrace |= PT_BLOCKSTEP; 104 set_tsk_thread_flag(task, TIF_BLOCKSTEP);
103 105
104 /* Enable taken branch trap. */ 106 /* Enable taken branch trap. */
105 pa_psw(task)->r = 0; 107 pa_psw(task)->r = 0;
@@ -263,22 +265,20 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
263} 265}
264#endif 266#endif
265 267
268long do_syscall_trace_enter(struct pt_regs *regs)
269{
270 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
271 tracehook_report_syscall_entry(regs))
272 return -1L;
273
274 return regs->gr[20];
275}
266 276
267void syscall_trace(void) 277void do_syscall_trace_exit(struct pt_regs *regs)
268{ 278{
269 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 279 int stepping = test_thread_flag(TIF_SINGLESTEP) ||
270 return; 280 test_thread_flag(TIF_BLOCKSTEP);
271 if (!(current->ptrace & PT_PTRACED)) 281
272 return; 282 if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
273 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) 283 tracehook_report_syscall_exit(regs, stepping);
274 ? 0x80 : 0));
275 /*
276 * this isn't the same as continuing with a signal, but it will do
277 * for normal use. strace only continues with a signal if the
278 * stopping signal is not SIGTRAP. -brl
279 */
280 if (current->exit_code) {
281 send_sig(current->exit_code, current, 1);
282 current->exit_code = 0;
283 }
284} 284}
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 8eb3c63c407a..e8467e4aa8d1 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -21,6 +21,7 @@
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/wait.h> 22#include <linux/wait.h>
23#include <linux/ptrace.h> 23#include <linux/ptrace.h>
24#include <linux/tracehook.h>
24#include <linux/unistd.h> 25#include <linux/unistd.h>
25#include <linux/stddef.h> 26#include <linux/stddef.h>
26#include <linux/compat.h> 27#include <linux/compat.h>
@@ -34,7 +35,6 @@
34#include <asm/asm-offsets.h> 35#include <asm/asm-offsets.h>
35 36
36#ifdef CONFIG_COMPAT 37#ifdef CONFIG_COMPAT
37#include <linux/compat.h>
38#include "signal32.h" 38#include "signal32.h"
39#endif 39#endif
40 40
@@ -468,6 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
468 sigaddset(&current->blocked,sig); 468 sigaddset(&current->blocked,sig);
469 recalc_sigpending(); 469 recalc_sigpending();
470 spin_unlock_irq(&current->sighand->siglock); 470 spin_unlock_irq(&current->sighand->siglock);
471
472 tracehook_signal_handler(sig, info, ka, regs, 0);
473
471 return 1; 474 return 1;
472} 475}
473 476
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 59fc1a43ec3e..f5f96021caa0 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -288,18 +288,23 @@ tracesys:
288 STREG %r18,PT_GR18(%r2) 288 STREG %r18,PT_GR18(%r2)
289 /* Finished saving things for the debugger */ 289 /* Finished saving things for the debugger */
290 290
291 ldil L%syscall_trace,%r1 291 copy %r2,%r26
292 ldil L%do_syscall_trace_enter,%r1
292 ldil L%tracesys_next,%r2 293 ldil L%tracesys_next,%r2
293 be R%syscall_trace(%sr7,%r1) 294 be R%do_syscall_trace_enter(%sr7,%r1)
294 ldo R%tracesys_next(%r2),%r2 295 ldo R%tracesys_next(%r2),%r2
295 296
296tracesys_next: 297tracesys_next:
298 /* do_syscall_trace_enter either returned the syscallno, or -1L,
299 * so we skip restoring the PT_GR20 below, since we pulled it from
300 * task->thread.regs.gr[20] above.
301 */
302 copy %ret0,%r20
297 ldil L%sys_call_table,%r1 303 ldil L%sys_call_table,%r1
298 ldo R%sys_call_table(%r1), %r19 304 ldo R%sys_call_table(%r1), %r19
299 305
300 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 306 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
301 LDREG TI_TASK(%r1), %r1 307 LDREG TI_TASK(%r1), %r1
302 LDREG TASK_PT_GR20(%r1), %r20
303 LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ 308 LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */
304 LDREG TASK_PT_GR25(%r1), %r25 309 LDREG TASK_PT_GR25(%r1), %r25
305 LDREG TASK_PT_GR24(%r1), %r24 310 LDREG TASK_PT_GR24(%r1), %r24
@@ -336,7 +341,8 @@ tracesys_exit:
336#ifdef CONFIG_64BIT 341#ifdef CONFIG_64BIT
337 ldo -16(%r30),%r29 /* Reference param save area */ 342 ldo -16(%r30),%r29 /* Reference param save area */
338#endif 343#endif
339 bl syscall_trace, %r2 344 ldo TASK_REGS(%r1),%r26
345 bl do_syscall_trace_exit,%r2
340 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ 346 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
341 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 347 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
342 LDREG TI_TASK(%r1), %r1 348 LDREG TI_TASK(%r1), %r1
@@ -353,12 +359,12 @@ tracesys_exit:
353 359
354tracesys_sigexit: 360tracesys_sigexit:
355 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 361 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
356 LDREG 0(%r1), %r1 362 LDREG TI_TASK(%r1), %r1
357#ifdef CONFIG_64BIT 363#ifdef CONFIG_64BIT
358 ldo -16(%r30),%r29 /* Reference param save area */ 364 ldo -16(%r30),%r29 /* Reference param save area */
359#endif 365#endif
360 bl syscall_trace, %r2 366 bl do_syscall_trace_exit,%r2
361 nop 367 ldo TASK_REGS(%r1),%r26
362 368
363 ldil L%syscall_exit_rfi,%r1 369 ldil L%syscall_exit_rfi,%r1
364 be,n R%syscall_exit_rfi(%sr7,%r1) 370 be,n R%syscall_exit_rfi(%sr7,%r1)
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 775be2791bc2..fda4baa059b5 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -28,6 +28,7 @@
28#include <asm/cache.h> 28#include <asm/cache.h>
29#include <asm/page.h> 29#include <asm/page.h>
30#include <asm/asm-offsets.h> 30#include <asm/asm-offsets.h>
31#include <asm/thread_info.h>
31 32
32/* ld script to make hppa Linux kernel */ 33/* ld script to make hppa Linux kernel */
33#ifndef CONFIG_64BIT 34#ifndef CONFIG_64BIT
@@ -134,6 +135,15 @@ SECTIONS
134 __init_begin = .; 135 __init_begin = .;
135 INIT_TEXT_SECTION(16384) 136 INIT_TEXT_SECTION(16384)
136 INIT_DATA_SECTION(16) 137 INIT_DATA_SECTION(16)
138 /* we have to discard exit text and such at runtime, not link time */
139 .exit.text :
140 {
141 EXIT_TEXT
142 }
143 .exit.data :
144 {
145 EXIT_DATA
146 }
137 147
138 PERCPU(PAGE_SIZE) 148 PERCPU(PAGE_SIZE)
139 . = ALIGN(PAGE_SIZE); 149 . = ALIGN(PAGE_SIZE);
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index d5aca31fddbb..13b6e3e59b99 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -434,8 +434,8 @@ void mark_rodata_ro(void)
434#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ 434#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
435 & ~(VM_MAP_OFFSET-1))) 435 & ~(VM_MAP_OFFSET-1)))
436 436
437void *vmalloc_start __read_mostly; 437void *parisc_vmalloc_start __read_mostly;
438EXPORT_SYMBOL(vmalloc_start); 438EXPORT_SYMBOL(parisc_vmalloc_start);
439 439
440#ifdef CONFIG_PA11 440#ifdef CONFIG_PA11
441unsigned long pcxl_dma_start __read_mostly; 441unsigned long pcxl_dma_start __read_mostly;
@@ -496,13 +496,14 @@ void __init mem_init(void)
496#ifdef CONFIG_PA11 496#ifdef CONFIG_PA11
497 if (hppa_dma_ops == &pcxl_dma_ops) { 497 if (hppa_dma_ops == &pcxl_dma_ops) {
498 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); 498 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
499 vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE); 499 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
500 + PCXL_DMA_MAP_SIZE);
500 } else { 501 } else {
501 pcxl_dma_start = 0; 502 pcxl_dma_start = 0;
502 vmalloc_start = SET_MAP_OFFSET(MAP_START); 503 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
503 } 504 }
504#else 505#else
505 vmalloc_start = SET_MAP_OFFSET(MAP_START); 506 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
506#endif 507#endif
507 508
508 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", 509 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index ec5eee7c25d8..06cce8285ba0 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -58,7 +58,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
58int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); 58int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
59int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); 59int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
60 60
61static inline int kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu) 61static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
62{ 62{
63 return vcpu->arch.sie_block->gmslm 63 return vcpu->arch.sie_block->gmslm
64 - vcpu->arch.sie_block->gmsor 64 - vcpu->arch.sie_block->gmsor
diff --git a/arch/sparc/include/asm/hardirq_32.h b/arch/sparc/include/asm/hardirq_32.h
index 4f63ed8df551..162007643cdc 100644
--- a/arch/sparc/include/asm/hardirq_32.h
+++ b/arch/sparc/include/asm/hardirq_32.h
@@ -7,17 +7,7 @@
7#ifndef __SPARC_HARDIRQ_H 7#ifndef __SPARC_HARDIRQ_H
8#define __SPARC_HARDIRQ_H 8#define __SPARC_HARDIRQ_H
9 9
10#include <linux/threads.h>
11#include <linux/spinlock.h>
12#include <linux/cache.h>
13
14/* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */
15typedef struct {
16 unsigned int __softirq_pending;
17} ____cacheline_aligned irq_cpustat_t;
18
19#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
20
21#define HARDIRQ_BITS 8 10#define HARDIRQ_BITS 8
11#include <asm-generic/hardirq.h>
22 12
23#endif /* __SPARC_HARDIRQ_H */ 13#endif /* __SPARC_HARDIRQ_H */
diff --git a/arch/sparc/include/asm/irq_32.h b/arch/sparc/include/asm/irq_32.h
index ea43057d4763..cbf4801deaaf 100644
--- a/arch/sparc/include/asm/irq_32.h
+++ b/arch/sparc/include/asm/irq_32.h
@@ -6,10 +6,10 @@
6#ifndef _SPARC_IRQ_H 6#ifndef _SPARC_IRQ_H
7#define _SPARC_IRQ_H 7#define _SPARC_IRQ_H
8 8
9#include <linux/interrupt.h>
10
11#define NR_IRQS 16 9#define NR_IRQS 16
12 10
11#include <linux/interrupt.h>
12
13#define irq_canonicalize(irq) (irq) 13#define irq_canonicalize(irq) (irq)
14 14
15extern void __init init_IRQ(void); 15extern void __init init_IRQ(void);
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 0ff92fa22064..f3cb790fa2ae 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -41,8 +41,8 @@
41#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) 41#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
42#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) 42#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
43#define VMALLOC_START _AC(0x0000000100000000,UL) 43#define VMALLOC_START _AC(0x0000000100000000,UL)
44#define VMALLOC_END _AC(0x0000000200000000,UL) 44#define VMALLOC_END _AC(0x0000010000000000,UL)
45#define VMEMMAP_BASE _AC(0x0000000200000000,UL) 45#define VMEMMAP_BASE _AC(0x0000010000000000,UL)
46 46
47#define vmemmap ((struct page *)VMEMMAP_BASE) 47#define vmemmap ((struct page *)VMEMMAP_BASE)
48 48
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 3ea6e8cde8c5..1d361477d7d6 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -280,8 +280,8 @@ kvmap_dtlb_nonlinear:
280 280
281#ifdef CONFIG_SPARSEMEM_VMEMMAP 281#ifdef CONFIG_SPARSEMEM_VMEMMAP
282 /* Do not use the TSB for vmemmap. */ 282 /* Do not use the TSB for vmemmap. */
283 mov (VMEMMAP_BASE >> 24), %g5 283 mov (VMEMMAP_BASE >> 40), %g5
284 sllx %g5, 24, %g5 284 sllx %g5, 40, %g5
285 cmp %g4,%g5 285 cmp %g4,%g5
286 bgeu,pn %xcc, kvmap_vmemmap 286 bgeu,pn %xcc, kvmap_vmemmap
287 nop 287 nop
@@ -293,8 +293,8 @@ kvmap_dtlb_tsbmiss:
293 sethi %hi(MODULES_VADDR), %g5 293 sethi %hi(MODULES_VADDR), %g5
294 cmp %g4, %g5 294 cmp %g4, %g5
295 blu,pn %xcc, kvmap_dtlb_longpath 295 blu,pn %xcc, kvmap_dtlb_longpath
296 mov (VMALLOC_END >> 24), %g5 296 mov (VMALLOC_END >> 40), %g5
297 sllx %g5, 24, %g5 297 sllx %g5, 40, %g5
298 cmp %g4, %g5 298 cmp %g4, %g5
299 bgeu,pn %xcc, kvmap_dtlb_longpath 299 bgeu,pn %xcc, kvmap_dtlb_longpath
300 nop 300 nop
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 2d6a1b10c81d..04db92743896 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -56,7 +56,8 @@ struct cpu_hw_events {
56 struct perf_event *events[MAX_HWEVENTS]; 56 struct perf_event *events[MAX_HWEVENTS];
57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
59 int enabled; 59 u64 pcr;
60 int enabled;
60}; 61};
61DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; 62DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
62 63
@@ -68,8 +69,30 @@ struct perf_event_map {
68#define PIC_LOWER 0x02 69#define PIC_LOWER 0x02
69}; 70};
70 71
72static unsigned long perf_event_encode(const struct perf_event_map *pmap)
73{
74 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
75}
76
77static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk)
78{
79 *msk = val & 0xff;
80 *enc = val >> 16;
81}
82
83#define C(x) PERF_COUNT_HW_CACHE_##x
84
85#define CACHE_OP_UNSUPPORTED 0xfffe
86#define CACHE_OP_NONSENSE 0xffff
87
88typedef struct perf_event_map cache_map_t
89 [PERF_COUNT_HW_CACHE_MAX]
90 [PERF_COUNT_HW_CACHE_OP_MAX]
91 [PERF_COUNT_HW_CACHE_RESULT_MAX];
92
71struct sparc_pmu { 93struct sparc_pmu {
72 const struct perf_event_map *(*event_map)(int); 94 const struct perf_event_map *(*event_map)(int);
95 const cache_map_t *cache_map;
73 int max_events; 96 int max_events;
74 int upper_shift; 97 int upper_shift;
75 int lower_shift; 98 int lower_shift;
@@ -80,21 +103,109 @@ struct sparc_pmu {
80 int lower_nop; 103 int lower_nop;
81}; 104};
82 105
83static const struct perf_event_map ultra3i_perfmon_event_map[] = { 106static const struct perf_event_map ultra3_perfmon_event_map[] = {
84 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, 107 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
85 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, 108 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
86 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, 109 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
87 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, 110 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
88}; 111};
89 112
90static const struct perf_event_map *ultra3i_event_map(int event_id) 113static const struct perf_event_map *ultra3_event_map(int event_id)
91{ 114{
92 return &ultra3i_perfmon_event_map[event_id]; 115 return &ultra3_perfmon_event_map[event_id];
93} 116}
94 117
95static const struct sparc_pmu ultra3i_pmu = { 118static const cache_map_t ultra3_cache_map = {
96 .event_map = ultra3i_event_map, 119[C(L1D)] = {
97 .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), 120 [C(OP_READ)] = {
121 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
122 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
123 },
124 [C(OP_WRITE)] = {
125 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
126 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
127 },
128 [C(OP_PREFETCH)] = {
129 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
130 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
131 },
132},
133[C(L1I)] = {
134 [C(OP_READ)] = {
135 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
136 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
137 },
138 [ C(OP_WRITE) ] = {
139 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
140 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
141 },
142 [ C(OP_PREFETCH) ] = {
143 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
144 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
145 },
146},
147[C(LL)] = {
148 [C(OP_READ)] = {
149 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
150 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
151 },
152 [C(OP_WRITE)] = {
153 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
154 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
155 },
156 [C(OP_PREFETCH)] = {
157 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
158 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
159 },
160},
161[C(DTLB)] = {
162 [C(OP_READ)] = {
163 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
164 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
165 },
166 [ C(OP_WRITE) ] = {
167 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
168 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
169 },
170 [ C(OP_PREFETCH) ] = {
171 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
172 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
173 },
174},
175[C(ITLB)] = {
176 [C(OP_READ)] = {
177 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
178 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
179 },
180 [ C(OP_WRITE) ] = {
181 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
182 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
183 },
184 [ C(OP_PREFETCH) ] = {
185 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
186 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
187 },
188},
189[C(BPU)] = {
190 [C(OP_READ)] = {
191 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
192 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
193 },
194 [ C(OP_WRITE) ] = {
195 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
196 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
197 },
198 [ C(OP_PREFETCH) ] = {
199 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
200 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
201 },
202},
203};
204
205static const struct sparc_pmu ultra3_pmu = {
206 .event_map = ultra3_event_map,
207 .cache_map = &ultra3_cache_map,
208 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
98 .upper_shift = 11, 209 .upper_shift = 11,
99 .lower_shift = 4, 210 .lower_shift = 4,
100 .event_mask = 0x3f, 211 .event_mask = 0x3f,
@@ -102,6 +213,121 @@ static const struct sparc_pmu ultra3i_pmu = {
102 .lower_nop = 0x14, 213 .lower_nop = 0x14,
103}; 214};
104 215
216/* Niagara1 is very limited. The upper PIC is hard-locked to count
217 * only instructions, so it is free running which creates all kinds of
218 * problems. Some hardware designs make one wonder if the creator
219 * even looked at how this stuff gets used by software.
220 */
221static const struct perf_event_map niagara1_perfmon_event_map[] = {
222 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
223 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
224 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
225 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
226};
227
228static const struct perf_event_map *niagara1_event_map(int event_id)
229{
230 return &niagara1_perfmon_event_map[event_id];
231}
232
233static const cache_map_t niagara1_cache_map = {
234[C(L1D)] = {
235 [C(OP_READ)] = {
236 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
237 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
238 },
239 [C(OP_WRITE)] = {
240 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
241 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
242 },
243 [C(OP_PREFETCH)] = {
244 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
245 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
246 },
247},
248[C(L1I)] = {
249 [C(OP_READ)] = {
250 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
251 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
252 },
253 [ C(OP_WRITE) ] = {
254 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
255 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
256 },
257 [ C(OP_PREFETCH) ] = {
258 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
259 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
260 },
261},
262[C(LL)] = {
263 [C(OP_READ)] = {
264 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
265 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
266 },
267 [C(OP_WRITE)] = {
268 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
269 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
270 },
271 [C(OP_PREFETCH)] = {
272 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
273 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
274 },
275},
276[C(DTLB)] = {
277 [C(OP_READ)] = {
278 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
279 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
280 },
281 [ C(OP_WRITE) ] = {
282 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
283 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
284 },
285 [ C(OP_PREFETCH) ] = {
286 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
287 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
288 },
289},
290[C(ITLB)] = {
291 [C(OP_READ)] = {
292 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
293 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
294 },
295 [ C(OP_WRITE) ] = {
296 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
297 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
298 },
299 [ C(OP_PREFETCH) ] = {
300 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
301 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
302 },
303},
304[C(BPU)] = {
305 [C(OP_READ)] = {
306 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
307 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
308 },
309 [ C(OP_WRITE) ] = {
310 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
311 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
312 },
313 [ C(OP_PREFETCH) ] = {
314 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
315 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
316 },
317},
318};
319
320static const struct sparc_pmu niagara1_pmu = {
321 .event_map = niagara1_event_map,
322 .cache_map = &niagara1_cache_map,
323 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
324 .upper_shift = 0,
325 .lower_shift = 4,
326 .event_mask = 0x7,
327 .upper_nop = 0x0,
328 .lower_nop = 0x0,
329};
330
105static const struct perf_event_map niagara2_perfmon_event_map[] = { 331static const struct perf_event_map niagara2_perfmon_event_map[] = {
106 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, 332 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
107 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, 333 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
@@ -116,8 +342,96 @@ static const struct perf_event_map *niagara2_event_map(int event_id)
116 return &niagara2_perfmon_event_map[event_id]; 342 return &niagara2_perfmon_event_map[event_id];
117} 343}
118 344
345static const cache_map_t niagara2_cache_map = {
346[C(L1D)] = {
347 [C(OP_READ)] = {
348 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
349 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
350 },
351 [C(OP_WRITE)] = {
352 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
353 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
354 },
355 [C(OP_PREFETCH)] = {
356 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
357 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
358 },
359},
360[C(L1I)] = {
361 [C(OP_READ)] = {
362 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
363 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
364 },
365 [ C(OP_WRITE) ] = {
366 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
367 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
368 },
369 [ C(OP_PREFETCH) ] = {
370 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
371 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
372 },
373},
374[C(LL)] = {
375 [C(OP_READ)] = {
376 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
377 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
378 },
379 [C(OP_WRITE)] = {
380 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
381 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
382 },
383 [C(OP_PREFETCH)] = {
384 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
385 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
386 },
387},
388[C(DTLB)] = {
389 [C(OP_READ)] = {
390 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
391 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
392 },
393 [ C(OP_WRITE) ] = {
394 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
395 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
396 },
397 [ C(OP_PREFETCH) ] = {
398 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
399 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
400 },
401},
402[C(ITLB)] = {
403 [C(OP_READ)] = {
404 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
405 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
406 },
407 [ C(OP_WRITE) ] = {
408 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
409 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
410 },
411 [ C(OP_PREFETCH) ] = {
412 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
413 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
414 },
415},
416[C(BPU)] = {
417 [C(OP_READ)] = {
418 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
419 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
420 },
421 [ C(OP_WRITE) ] = {
422 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
423 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
424 },
425 [ C(OP_PREFETCH) ] = {
426 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
427 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
428 },
429},
430};
431
119static const struct sparc_pmu niagara2_pmu = { 432static const struct sparc_pmu niagara2_pmu = {
120 .event_map = niagara2_event_map, 433 .event_map = niagara2_event_map,
434 .cache_map = &niagara2_cache_map,
121 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), 435 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
122 .upper_shift = 19, 436 .upper_shift = 19,
123 .lower_shift = 6, 437 .lower_shift = 6,
@@ -151,23 +465,30 @@ static u64 nop_for_index(int idx)
151 sparc_pmu->lower_nop, idx); 465 sparc_pmu->lower_nop, idx);
152} 466}
153 467
154static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, 468static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
155 int idx)
156{ 469{
157 u64 val, mask = mask_for_index(idx); 470 u64 val, mask = mask_for_index(idx);
158 471
159 val = pcr_ops->read(); 472 val = cpuc->pcr;
160 pcr_ops->write((val & ~mask) | hwc->config); 473 val &= ~mask;
474 val |= hwc->config;
475 cpuc->pcr = val;
476
477 pcr_ops->write(cpuc->pcr);
161} 478}
162 479
163static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, 480static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
164 int idx)
165{ 481{
166 u64 mask = mask_for_index(idx); 482 u64 mask = mask_for_index(idx);
167 u64 nop = nop_for_index(idx); 483 u64 nop = nop_for_index(idx);
168 u64 val = pcr_ops->read(); 484 u64 val;
169 485
170 pcr_ops->write((val & ~mask) | nop); 486 val = cpuc->pcr;
487 val &= ~mask;
488 val |= nop;
489 cpuc->pcr = val;
490
491 pcr_ops->write(cpuc->pcr);
171} 492}
172 493
173void hw_perf_enable(void) 494void hw_perf_enable(void)
@@ -182,7 +503,7 @@ void hw_perf_enable(void)
182 cpuc->enabled = 1; 503 cpuc->enabled = 1;
183 barrier(); 504 barrier();
184 505
185 val = pcr_ops->read(); 506 val = cpuc->pcr;
186 507
187 for (i = 0; i < MAX_HWEVENTS; i++) { 508 for (i = 0; i < MAX_HWEVENTS; i++) {
188 struct perf_event *cp = cpuc->events[i]; 509 struct perf_event *cp = cpuc->events[i];
@@ -194,7 +515,9 @@ void hw_perf_enable(void)
194 val |= hwc->config_base; 515 val |= hwc->config_base;
195 } 516 }
196 517
197 pcr_ops->write(val); 518 cpuc->pcr = val;
519
520 pcr_ops->write(cpuc->pcr);
198} 521}
199 522
200void hw_perf_disable(void) 523void hw_perf_disable(void)
@@ -207,10 +530,12 @@ void hw_perf_disable(void)
207 530
208 cpuc->enabled = 0; 531 cpuc->enabled = 0;
209 532
210 val = pcr_ops->read(); 533 val = cpuc->pcr;
211 val &= ~(PCR_UTRACE | PCR_STRACE | 534 val &= ~(PCR_UTRACE | PCR_STRACE |
212 sparc_pmu->hv_bit | sparc_pmu->irq_bit); 535 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
213 pcr_ops->write(val); 536 cpuc->pcr = val;
537
538 pcr_ops->write(cpuc->pcr);
214} 539}
215 540
216static u32 read_pmc(int idx) 541static u32 read_pmc(int idx)
@@ -242,7 +567,7 @@ static void write_pmc(int idx, u64 val)
242} 567}
243 568
244static int sparc_perf_event_set_period(struct perf_event *event, 569static int sparc_perf_event_set_period(struct perf_event *event,
245 struct hw_perf_event *hwc, int idx) 570 struct hw_perf_event *hwc, int idx)
246{ 571{
247 s64 left = atomic64_read(&hwc->period_left); 572 s64 left = atomic64_read(&hwc->period_left);
248 s64 period = hwc->sample_period; 573 s64 period = hwc->sample_period;
@@ -282,19 +607,19 @@ static int sparc_pmu_enable(struct perf_event *event)
282 if (test_and_set_bit(idx, cpuc->used_mask)) 607 if (test_and_set_bit(idx, cpuc->used_mask))
283 return -EAGAIN; 608 return -EAGAIN;
284 609
285 sparc_pmu_disable_event(hwc, idx); 610 sparc_pmu_disable_event(cpuc, hwc, idx);
286 611
287 cpuc->events[idx] = event; 612 cpuc->events[idx] = event;
288 set_bit(idx, cpuc->active_mask); 613 set_bit(idx, cpuc->active_mask);
289 614
290 sparc_perf_event_set_period(event, hwc, idx); 615 sparc_perf_event_set_period(event, hwc, idx);
291 sparc_pmu_enable_event(hwc, idx); 616 sparc_pmu_enable_event(cpuc, hwc, idx);
292 perf_event_update_userpage(event); 617 perf_event_update_userpage(event);
293 return 0; 618 return 0;
294} 619}
295 620
296static u64 sparc_perf_event_update(struct perf_event *event, 621static u64 sparc_perf_event_update(struct perf_event *event,
297 struct hw_perf_event *hwc, int idx) 622 struct hw_perf_event *hwc, int idx)
298{ 623{
299 int shift = 64 - 32; 624 int shift = 64 - 32;
300 u64 prev_raw_count, new_raw_count; 625 u64 prev_raw_count, new_raw_count;
@@ -324,7 +649,7 @@ static void sparc_pmu_disable(struct perf_event *event)
324 int idx = hwc->idx; 649 int idx = hwc->idx;
325 650
326 clear_bit(idx, cpuc->active_mask); 651 clear_bit(idx, cpuc->active_mask);
327 sparc_pmu_disable_event(hwc, idx); 652 sparc_pmu_disable_event(cpuc, hwc, idx);
328 653
329 barrier(); 654 barrier();
330 655
@@ -338,18 +663,29 @@ static void sparc_pmu_disable(struct perf_event *event)
338static void sparc_pmu_read(struct perf_event *event) 663static void sparc_pmu_read(struct perf_event *event)
339{ 664{
340 struct hw_perf_event *hwc = &event->hw; 665 struct hw_perf_event *hwc = &event->hw;
666
341 sparc_perf_event_update(event, hwc, hwc->idx); 667 sparc_perf_event_update(event, hwc, hwc->idx);
342} 668}
343 669
344static void sparc_pmu_unthrottle(struct perf_event *event) 670static void sparc_pmu_unthrottle(struct perf_event *event)
345{ 671{
672 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
346 struct hw_perf_event *hwc = &event->hw; 673 struct hw_perf_event *hwc = &event->hw;
347 sparc_pmu_enable_event(hwc, hwc->idx); 674
675 sparc_pmu_enable_event(cpuc, hwc, hwc->idx);
348} 676}
349 677
350static atomic_t active_events = ATOMIC_INIT(0); 678static atomic_t active_events = ATOMIC_INIT(0);
351static DEFINE_MUTEX(pmc_grab_mutex); 679static DEFINE_MUTEX(pmc_grab_mutex);
352 680
681static void perf_stop_nmi_watchdog(void *unused)
682{
683 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
684
685 stop_nmi_watchdog(NULL);
686 cpuc->pcr = pcr_ops->read();
687}
688
353void perf_event_grab_pmc(void) 689void perf_event_grab_pmc(void)
354{ 690{
355 if (atomic_inc_not_zero(&active_events)) 691 if (atomic_inc_not_zero(&active_events))
@@ -358,7 +694,7 @@ void perf_event_grab_pmc(void)
358 mutex_lock(&pmc_grab_mutex); 694 mutex_lock(&pmc_grab_mutex);
359 if (atomic_read(&active_events) == 0) { 695 if (atomic_read(&active_events) == 0) {
360 if (atomic_read(&nmi_active) > 0) { 696 if (atomic_read(&nmi_active) > 0) {
361 on_each_cpu(stop_nmi_watchdog, NULL, 1); 697 on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
362 BUG_ON(atomic_read(&nmi_active) != 0); 698 BUG_ON(atomic_read(&nmi_active) != 0);
363 } 699 }
364 atomic_inc(&active_events); 700 atomic_inc(&active_events);
@@ -375,30 +711,160 @@ void perf_event_release_pmc(void)
375 } 711 }
376} 712}
377 713
714static const struct perf_event_map *sparc_map_cache_event(u64 config)
715{
716 unsigned int cache_type, cache_op, cache_result;
717 const struct perf_event_map *pmap;
718
719 if (!sparc_pmu->cache_map)
720 return ERR_PTR(-ENOENT);
721
722 cache_type = (config >> 0) & 0xff;
723 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
724 return ERR_PTR(-EINVAL);
725
726 cache_op = (config >> 8) & 0xff;
727 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
728 return ERR_PTR(-EINVAL);
729
730 cache_result = (config >> 16) & 0xff;
731 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
732 return ERR_PTR(-EINVAL);
733
734 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
735
736 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
737 return ERR_PTR(-ENOENT);
738
739 if (pmap->encoding == CACHE_OP_NONSENSE)
740 return ERR_PTR(-EINVAL);
741
742 return pmap;
743}
744
378static void hw_perf_event_destroy(struct perf_event *event) 745static void hw_perf_event_destroy(struct perf_event *event)
379{ 746{
380 perf_event_release_pmc(); 747 perf_event_release_pmc();
381} 748}
382 749
750/* Make sure all events can be scheduled into the hardware at
751 * the same time. This is simplified by the fact that we only
752 * need to support 2 simultaneous HW events.
753 */
754static int sparc_check_constraints(unsigned long *events, int n_ev)
755{
756 if (n_ev <= perf_max_events) {
757 u8 msk1, msk2;
758 u16 dummy;
759
760 if (n_ev == 1)
761 return 0;
762 BUG_ON(n_ev != 2);
763 perf_event_decode(events[0], &dummy, &msk1);
764 perf_event_decode(events[1], &dummy, &msk2);
765
766 /* If both events can go on any counter, OK. */
767 if (msk1 == (PIC_UPPER | PIC_LOWER) &&
768 msk2 == (PIC_UPPER | PIC_LOWER))
769 return 0;
770
771 /* If one event is limited to a specific counter,
772 * and the other can go on both, OK.
773 */
774 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
775 msk2 == (PIC_UPPER | PIC_LOWER))
776 return 0;
777 if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) &&
778 msk1 == (PIC_UPPER | PIC_LOWER))
779 return 0;
780
781 /* If the events are fixed to different counters, OK. */
782 if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) ||
783 (msk1 == PIC_LOWER && msk2 == PIC_UPPER))
784 return 0;
785
786 /* Otherwise, there is a conflict. */
787 }
788
789 return -1;
790}
791
792static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
793{
794 int eu = 0, ek = 0, eh = 0;
795 struct perf_event *event;
796 int i, n, first;
797
798 n = n_prev + n_new;
799 if (n <= 1)
800 return 0;
801
802 first = 1;
803 for (i = 0; i < n; i++) {
804 event = evts[i];
805 if (first) {
806 eu = event->attr.exclude_user;
807 ek = event->attr.exclude_kernel;
808 eh = event->attr.exclude_hv;
809 first = 0;
810 } else if (event->attr.exclude_user != eu ||
811 event->attr.exclude_kernel != ek ||
812 event->attr.exclude_hv != eh) {
813 return -EAGAIN;
814 }
815 }
816
817 return 0;
818}
819
820static int collect_events(struct perf_event *group, int max_count,
821 struct perf_event *evts[], unsigned long *events)
822{
823 struct perf_event *event;
824 int n = 0;
825
826 if (!is_software_event(group)) {
827 if (n >= max_count)
828 return -1;
829 evts[n] = group;
830 events[n++] = group->hw.event_base;
831 }
832 list_for_each_entry(event, &group->sibling_list, group_entry) {
833 if (!is_software_event(event) &&
834 event->state != PERF_EVENT_STATE_OFF) {
835 if (n >= max_count)
836 return -1;
837 evts[n] = event;
838 events[n++] = event->hw.event_base;
839 }
840 }
841 return n;
842}
843
383static int __hw_perf_event_init(struct perf_event *event) 844static int __hw_perf_event_init(struct perf_event *event)
384{ 845{
385 struct perf_event_attr *attr = &event->attr; 846 struct perf_event_attr *attr = &event->attr;
847 struct perf_event *evts[MAX_HWEVENTS];
386 struct hw_perf_event *hwc = &event->hw; 848 struct hw_perf_event *hwc = &event->hw;
849 unsigned long events[MAX_HWEVENTS];
387 const struct perf_event_map *pmap; 850 const struct perf_event_map *pmap;
388 u64 enc; 851 u64 enc;
852 int n;
389 853
390 if (atomic_read(&nmi_active) < 0) 854 if (atomic_read(&nmi_active) < 0)
391 return -ENODEV; 855 return -ENODEV;
392 856
393 if (attr->type != PERF_TYPE_HARDWARE) 857 if (attr->type == PERF_TYPE_HARDWARE) {
858 if (attr->config >= sparc_pmu->max_events)
859 return -EINVAL;
860 pmap = sparc_pmu->event_map(attr->config);
861 } else if (attr->type == PERF_TYPE_HW_CACHE) {
862 pmap = sparc_map_cache_event(attr->config);
863 if (IS_ERR(pmap))
864 return PTR_ERR(pmap);
865 } else
394 return -EOPNOTSUPP; 866 return -EOPNOTSUPP;
395 867
396 if (attr->config >= sparc_pmu->max_events)
397 return -EINVAL;
398
399 perf_event_grab_pmc();
400 event->destroy = hw_perf_event_destroy;
401
402 /* We save the enable bits in the config_base. So to 868 /* We save the enable bits in the config_base. So to
403 * turn off sampling just write 'config', and to enable 869 * turn off sampling just write 'config', and to enable
404 * things write 'config | config_base'. 870 * things write 'config | config_base'.
@@ -411,15 +877,39 @@ static int __hw_perf_event_init(struct perf_event *event)
411 if (!attr->exclude_hv) 877 if (!attr->exclude_hv)
412 hwc->config_base |= sparc_pmu->hv_bit; 878 hwc->config_base |= sparc_pmu->hv_bit;
413 879
880 hwc->event_base = perf_event_encode(pmap);
881
882 enc = pmap->encoding;
883
884 n = 0;
885 if (event->group_leader != event) {
886 n = collect_events(event->group_leader,
887 perf_max_events - 1,
888 evts, events);
889 if (n < 0)
890 return -EINVAL;
891 }
892 events[n] = hwc->event_base;
893 evts[n] = event;
894
895 if (check_excludes(evts, n, 1))
896 return -EINVAL;
897
898 if (sparc_check_constraints(events, n + 1))
899 return -EINVAL;
900
901 /* Try to do all error checking before this point, as unwinding
902 * state after grabbing the PMC is difficult.
903 */
904 perf_event_grab_pmc();
905 event->destroy = hw_perf_event_destroy;
906
414 if (!hwc->sample_period) { 907 if (!hwc->sample_period) {
415 hwc->sample_period = MAX_PERIOD; 908 hwc->sample_period = MAX_PERIOD;
416 hwc->last_period = hwc->sample_period; 909 hwc->last_period = hwc->sample_period;
417 atomic64_set(&hwc->period_left, hwc->sample_period); 910 atomic64_set(&hwc->period_left, hwc->sample_period);
418 } 911 }
419 912
420 pmap = sparc_pmu->event_map(attr->config);
421
422 enc = pmap->encoding;
423 if (pmap->pic_mask & PIC_UPPER) { 913 if (pmap->pic_mask & PIC_UPPER) {
424 hwc->idx = PIC_UPPER_INDEX; 914 hwc->idx = PIC_UPPER_INDEX;
425 enc <<= sparc_pmu->upper_shift; 915 enc <<= sparc_pmu->upper_shift;
@@ -472,7 +962,7 @@ void perf_event_print_debug(void)
472} 962}
473 963
474static int __kprobes perf_event_nmi_handler(struct notifier_block *self, 964static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
475 unsigned long cmd, void *__args) 965 unsigned long cmd, void *__args)
476{ 966{
477 struct die_args *args = __args; 967 struct die_args *args = __args;
478 struct perf_sample_data data; 968 struct perf_sample_data data;
@@ -513,7 +1003,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
513 continue; 1003 continue;
514 1004
515 if (perf_event_overflow(event, 1, &data, regs)) 1005 if (perf_event_overflow(event, 1, &data, regs))
516 sparc_pmu_disable_event(hwc, idx); 1006 sparc_pmu_disable_event(cpuc, hwc, idx);
517 } 1007 }
518 1008
519 return NOTIFY_STOP; 1009 return NOTIFY_STOP;
@@ -525,8 +1015,15 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = {
525 1015
526static bool __init supported_pmu(void) 1016static bool __init supported_pmu(void)
527{ 1017{
528 if (!strcmp(sparc_pmu_type, "ultra3i")) { 1018 if (!strcmp(sparc_pmu_type, "ultra3") ||
529 sparc_pmu = &ultra3i_pmu; 1019 !strcmp(sparc_pmu_type, "ultra3+") ||
1020 !strcmp(sparc_pmu_type, "ultra3i") ||
1021 !strcmp(sparc_pmu_type, "ultra4+")) {
1022 sparc_pmu = &ultra3_pmu;
1023 return true;
1024 }
1025 if (!strcmp(sparc_pmu_type, "niagara")) {
1026 sparc_pmu = &niagara1_pmu;
530 return true; 1027 return true;
531 } 1028 }
532 if (!strcmp(sparc_pmu_type, "niagara2")) { 1029 if (!strcmp(sparc_pmu_type, "niagara2")) {
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c
index f97cb8b6ee5f..f9024bccff16 100644
--- a/arch/sparc/oprofile/init.c
+++ b/arch/sparc/oprofile/init.c
@@ -11,6 +11,7 @@
11#include <linux/oprofile.h> 11#include <linux/oprofile.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/param.h> /* for HZ */
14 15
15#ifdef CONFIG_SPARC64 16#ifdef CONFIG_SPARC64
16#include <linux/notifier.h> 17#include <linux/notifier.h>
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8da93745c087..c876bace8fdc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -86,10 +86,6 @@ config STACKTRACE_SUPPORT
86config HAVE_LATENCYTOP_SUPPORT 86config HAVE_LATENCYTOP_SUPPORT
87 def_bool y 87 def_bool y
88 88
89config FAST_CMPXCHG_LOCAL
90 bool
91 default y
92
93config MMU 89config MMU
94 def_bool y 90 def_bool y
95 91
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 527519b8a9f9..f2824fb8c79c 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -400,7 +400,7 @@ config X86_TSC
400 400
401config X86_CMPXCHG64 401config X86_CMPXCHG64
402 def_bool y 402 def_bool y
403 depends on X86_PAE || X86_64 403 depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
404 404
405# this should be set for all -march=.. options where the compiler 405# this should be set for all -march=.. options where the compiler
406# generates cmov. 406# generates cmov.
@@ -412,6 +412,7 @@ config X86_MINIMUM_CPU_FAMILY
412 int 412 int
413 default "64" if X86_64 413 default "64" if X86_64
414 default "6" if X86_32 && X86_P6_NOP 414 default "6" if X86_32 && X86_P6_NOP
415 default "5" if X86_32 && X86_CMPXCHG64
415 default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK) 416 default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK)
416 default "3" 417 default "3"
417 418
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 74619c4f9fda..1733f9f65e82 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -21,8 +21,8 @@
21#define __AUDIT_ARCH_LE 0x40000000 21#define __AUDIT_ARCH_LE 0x40000000
22 22
23#ifndef CONFIG_AUDITSYSCALL 23#ifndef CONFIG_AUDITSYSCALL
24#define sysexit_audit int_ret_from_sys_call 24#define sysexit_audit ia32_ret_from_sys_call
25#define sysretl_audit int_ret_from_sys_call 25#define sysretl_audit ia32_ret_from_sys_call
26#endif 26#endif
27 27
28#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) 28#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
@@ -39,12 +39,12 @@
39 .endm 39 .endm
40 40
41 /* clobbers %eax */ 41 /* clobbers %eax */
42 .macro CLEAR_RREGS _r9=rax 42 .macro CLEAR_RREGS offset=0, _r9=rax
43 xorl %eax,%eax 43 xorl %eax,%eax
44 movq %rax,R11(%rsp) 44 movq %rax,\offset+R11(%rsp)
45 movq %rax,R10(%rsp) 45 movq %rax,\offset+R10(%rsp)
46 movq %\_r9,R9(%rsp) 46 movq %\_r9,\offset+R9(%rsp)
47 movq %rax,R8(%rsp) 47 movq %rax,\offset+R8(%rsp)
48 .endm 48 .endm
49 49
50 /* 50 /*
@@ -172,6 +172,10 @@ sysexit_from_sys_call:
172 movl RIP-R11(%rsp),%edx /* User %eip */ 172 movl RIP-R11(%rsp),%edx /* User %eip */
173 CFI_REGISTER rip,rdx 173 CFI_REGISTER rip,rdx
174 RESTORE_ARGS 1,24,1,1,1,1 174 RESTORE_ARGS 1,24,1,1,1,1
175 xorq %r8,%r8
176 xorq %r9,%r9
177 xorq %r10,%r10
178 xorq %r11,%r11
175 popfq 179 popfq
176 CFI_ADJUST_CFA_OFFSET -8 180 CFI_ADJUST_CFA_OFFSET -8
177 /*CFI_RESTORE rflags*/ 181 /*CFI_RESTORE rflags*/
@@ -202,7 +206,7 @@ sysexit_from_sys_call:
202 206
203 .macro auditsys_exit exit,ebpsave=RBP 207 .macro auditsys_exit exit,ebpsave=RBP
204 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) 208 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
205 jnz int_ret_from_sys_call 209 jnz ia32_ret_from_sys_call
206 TRACE_IRQS_ON 210 TRACE_IRQS_ON
207 sti 211 sti
208 movl %eax,%esi /* second arg, syscall return value */ 212 movl %eax,%esi /* second arg, syscall return value */
@@ -218,8 +222,9 @@ sysexit_from_sys_call:
218 cli 222 cli
219 TRACE_IRQS_OFF 223 TRACE_IRQS_OFF
220 testl %edi,TI_flags(%r10) 224 testl %edi,TI_flags(%r10)
221 jnz int_with_check 225 jz \exit
222 jmp \exit 226 CLEAR_RREGS -ARGOFFSET
227 jmp int_with_check
223 .endm 228 .endm
224 229
225sysenter_auditsys: 230sysenter_auditsys:
@@ -329,6 +334,9 @@ sysretl_from_sys_call:
329 CFI_REGISTER rip,rcx 334 CFI_REGISTER rip,rcx
330 movl EFLAGS-ARGOFFSET(%rsp),%r11d 335 movl EFLAGS-ARGOFFSET(%rsp),%r11d
331 /*CFI_REGISTER rflags,r11*/ 336 /*CFI_REGISTER rflags,r11*/
337 xorq %r10,%r10
338 xorq %r9,%r9
339 xorq %r8,%r8
332 TRACE_IRQS_ON 340 TRACE_IRQS_ON
333 movl RSP-ARGOFFSET(%rsp),%esp 341 movl RSP-ARGOFFSET(%rsp),%esp
334 CFI_RESTORE rsp 342 CFI_RESTORE rsp
@@ -353,7 +361,7 @@ cstar_tracesys:
353#endif 361#endif
354 xchgl %r9d,%ebp 362 xchgl %r9d,%ebp
355 SAVE_REST 363 SAVE_REST
356 CLEAR_RREGS r9 364 CLEAR_RREGS 0, r9
357 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ 365 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
358 movq %rsp,%rdi /* &pt_regs -> arg1 */ 366 movq %rsp,%rdi /* &pt_regs -> arg1 */
359 call syscall_trace_enter 367 call syscall_trace_enter
@@ -425,6 +433,8 @@ ia32_do_call:
425 call *ia32_sys_call_table(,%rax,8) # xxx: rip relative 433 call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
426ia32_sysret: 434ia32_sysret:
427 movq %rax,RAX-ARGOFFSET(%rsp) 435 movq %rax,RAX-ARGOFFSET(%rsp)
436ia32_ret_from_sys_call:
437 CLEAR_RREGS -ARGOFFSET
428 jmp int_ret_from_sys_call 438 jmp int_ret_from_sys_call
429 439
430ia32_tracesys: 440ia32_tracesys:
@@ -442,8 +452,8 @@ END(ia32_syscall)
442 452
443ia32_badsys: 453ia32_badsys:
444 movq $0,ORIG_RAX-ARGOFFSET(%rsp) 454 movq $0,ORIG_RAX-ARGOFFSET(%rsp)
445 movq $-ENOSYS,RAX-ARGOFFSET(%rsp) 455 movq $-ENOSYS,%rax
446 jmp int_ret_from_sys_call 456 jmp ia32_sysret
447 457
448quiet_ni_syscall: 458quiet_ni_syscall:
449 movq $-ENOSYS,%rax 459 movq $-ENOSYS,%rax
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3be000435fad..d83892226f73 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -796,6 +796,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
796#define KVM_ARCH_WANT_MMU_NOTIFIER 796#define KVM_ARCH_WANT_MMU_NOTIFIER
797int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 797int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
798int kvm_age_hva(struct kvm *kvm, unsigned long hva); 798int kvm_age_hva(struct kvm *kvm, unsigned long hva);
799void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
799int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); 800int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
800int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); 801int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
801int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); 802int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 41fd965c80c6..b9c830c12b4a 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -206,8 +206,11 @@ static int __init setup_early_printk(char *buf)
206 206
207 while (*buf != '\0') { 207 while (*buf != '\0') {
208 if (!strncmp(buf, "serial", 6)) { 208 if (!strncmp(buf, "serial", 6)) {
209 early_serial_init(buf + 6); 209 buf += 6;
210 early_serial_init(buf);
210 early_console_register(&early_serial_console, keep); 211 early_console_register(&early_serial_console, keep);
212 if (!strncmp(buf, ",ttyS", 5))
213 buf += 5;
211 } 214 }
212 if (!strncmp(buf, "ttyS", 4)) { 215 if (!strncmp(buf, "ttyS", 4)) {
213 early_serial_init(buf + 4); 216 early_serial_init(buf + 4);
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index 1736c5a725aa..9c3bd4a2050e 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -15,8 +15,10 @@ EXPORT_SYMBOL(mcount);
15 * the export, but dont use it from C code, it is used 15 * the export, but dont use it from C code, it is used
16 * by assembly code and is not using C calling convention! 16 * by assembly code and is not using C calling convention!
17 */ 17 */
18#ifndef CONFIG_X86_CMPXCHG64
18extern void cmpxchg8b_emu(void); 19extern void cmpxchg8b_emu(void);
19EXPORT_SYMBOL(cmpxchg8b_emu); 20EXPORT_SYMBOL(cmpxchg8b_emu);
21#endif
20 22
21/* Networking helper routines. */ 23/* Networking helper routines. */
22EXPORT_SYMBOL(csum_partial_copy_generic); 24EXPORT_SYMBOL(csum_partial_copy_generic);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 1ae5ceba7eb2..7024224f0fc8 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -664,7 +664,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
664{ 664{
665 ktime_t now = apic->lapic_timer.timer.base->get_time(); 665 ktime_t now = apic->lapic_timer.timer.base->get_time();
666 666
667 apic->lapic_timer.period = apic_get_reg(apic, APIC_TMICT) * 667 apic->lapic_timer.period = (u64)apic_get_reg(apic, APIC_TMICT) *
668 APIC_BUS_CYCLE_NS * apic->divide_count; 668 APIC_BUS_CYCLE_NS * apic->divide_count;
669 atomic_set(&apic->lapic_timer.pending, 0); 669 atomic_set(&apic->lapic_timer.pending, 0);
670 670
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index eca41ae9f453..685a4ffac8e6 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -156,6 +156,8 @@ module_param(oos_shadow, bool, 0644);
156#define CREATE_TRACE_POINTS 156#define CREATE_TRACE_POINTS
157#include "mmutrace.h" 157#include "mmutrace.h"
158 158
159#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
160
159#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 161#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
160 162
161struct kvm_rmap_desc { 163struct kvm_rmap_desc {
@@ -634,9 +636,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
634 if (*spte & shadow_accessed_mask) 636 if (*spte & shadow_accessed_mask)
635 kvm_set_pfn_accessed(pfn); 637 kvm_set_pfn_accessed(pfn);
636 if (is_writeble_pte(*spte)) 638 if (is_writeble_pte(*spte))
637 kvm_release_pfn_dirty(pfn); 639 kvm_set_pfn_dirty(pfn);
638 else
639 kvm_release_pfn_clean(pfn);
640 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level); 640 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
641 if (!*rmapp) { 641 if (!*rmapp) {
642 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); 642 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
@@ -748,7 +748,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
748 return write_protected; 748 return write_protected;
749} 749}
750 750
751static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) 751static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
752{ 752{
753 u64 *spte; 753 u64 *spte;
754 int need_tlb_flush = 0; 754 int need_tlb_flush = 0;
@@ -763,8 +763,45 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
763 return need_tlb_flush; 763 return need_tlb_flush;
764} 764}
765 765
766static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, 766static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
767 int (*handler)(struct kvm *kvm, unsigned long *rmapp)) 767{
768 int need_flush = 0;
769 u64 *spte, new_spte;
770 pte_t *ptep = (pte_t *)data;
771 pfn_t new_pfn;
772
773 WARN_ON(pte_huge(*ptep));
774 new_pfn = pte_pfn(*ptep);
775 spte = rmap_next(kvm, rmapp, NULL);
776 while (spte) {
777 BUG_ON(!is_shadow_present_pte(*spte));
778 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
779 need_flush = 1;
780 if (pte_write(*ptep)) {
781 rmap_remove(kvm, spte);
782 __set_spte(spte, shadow_trap_nonpresent_pte);
783 spte = rmap_next(kvm, rmapp, NULL);
784 } else {
785 new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
786 new_spte |= (u64)new_pfn << PAGE_SHIFT;
787
788 new_spte &= ~PT_WRITABLE_MASK;
789 new_spte &= ~SPTE_HOST_WRITEABLE;
790 if (is_writeble_pte(*spte))
791 kvm_set_pfn_dirty(spte_to_pfn(*spte));
792 __set_spte(spte, new_spte);
793 spte = rmap_next(kvm, rmapp, spte);
794 }
795 }
796 if (need_flush)
797 kvm_flush_remote_tlbs(kvm);
798
799 return 0;
800}
801
802static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, u64 data,
803 int (*handler)(struct kvm *kvm, unsigned long *rmapp,
804 u64 data))
768{ 805{
769 int i, j; 806 int i, j;
770 int retval = 0; 807 int retval = 0;
@@ -786,13 +823,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
786 if (hva >= start && hva < end) { 823 if (hva >= start && hva < end) {
787 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; 824 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
788 825
789 retval |= handler(kvm, &memslot->rmap[gfn_offset]); 826 retval |= handler(kvm, &memslot->rmap[gfn_offset],
827 data);
790 828
791 for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { 829 for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
792 int idx = gfn_offset; 830 int idx = gfn_offset;
793 idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j); 831 idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j);
794 retval |= handler(kvm, 832 retval |= handler(kvm,
795 &memslot->lpage_info[j][idx].rmap_pde); 833 &memslot->lpage_info[j][idx].rmap_pde,
834 data);
796 } 835 }
797 } 836 }
798 } 837 }
@@ -802,10 +841,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
802 841
803int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 842int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
804{ 843{
805 return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); 844 return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
806} 845}
807 846
808static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp) 847void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
848{
849 kvm_handle_hva(kvm, hva, (u64)&pte, kvm_set_pte_rmapp);
850}
851
852static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
809{ 853{
810 u64 *spte; 854 u64 *spte;
811 int young = 0; 855 int young = 0;
@@ -841,13 +885,13 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
841 gfn = unalias_gfn(vcpu->kvm, gfn); 885 gfn = unalias_gfn(vcpu->kvm, gfn);
842 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); 886 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
843 887
844 kvm_unmap_rmapp(vcpu->kvm, rmapp); 888 kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
845 kvm_flush_remote_tlbs(vcpu->kvm); 889 kvm_flush_remote_tlbs(vcpu->kvm);
846} 890}
847 891
848int kvm_age_hva(struct kvm *kvm, unsigned long hva) 892int kvm_age_hva(struct kvm *kvm, unsigned long hva)
849{ 893{
850 return kvm_handle_hva(kvm, hva, kvm_age_rmapp); 894 return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
851} 895}
852 896
853#ifdef MMU_DEBUG 897#ifdef MMU_DEBUG
@@ -1756,7 +1800,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1756 unsigned pte_access, int user_fault, 1800 unsigned pte_access, int user_fault,
1757 int write_fault, int dirty, int level, 1801 int write_fault, int dirty, int level,
1758 gfn_t gfn, pfn_t pfn, bool speculative, 1802 gfn_t gfn, pfn_t pfn, bool speculative,
1759 bool can_unsync) 1803 bool can_unsync, bool reset_host_protection)
1760{ 1804{
1761 u64 spte; 1805 u64 spte;
1762 int ret = 0; 1806 int ret = 0;
@@ -1783,6 +1827,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1783 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, 1827 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1784 kvm_is_mmio_pfn(pfn)); 1828 kvm_is_mmio_pfn(pfn));
1785 1829
1830 if (reset_host_protection)
1831 spte |= SPTE_HOST_WRITEABLE;
1832
1786 spte |= (u64)pfn << PAGE_SHIFT; 1833 spte |= (u64)pfn << PAGE_SHIFT;
1787 1834
1788 if ((pte_access & ACC_WRITE_MASK) 1835 if ((pte_access & ACC_WRITE_MASK)
@@ -1828,7 +1875,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1828 unsigned pt_access, unsigned pte_access, 1875 unsigned pt_access, unsigned pte_access,
1829 int user_fault, int write_fault, int dirty, 1876 int user_fault, int write_fault, int dirty,
1830 int *ptwrite, int level, gfn_t gfn, 1877 int *ptwrite, int level, gfn_t gfn,
1831 pfn_t pfn, bool speculative) 1878 pfn_t pfn, bool speculative,
1879 bool reset_host_protection)
1832{ 1880{
1833 int was_rmapped = 0; 1881 int was_rmapped = 0;
1834 int was_writeble = is_writeble_pte(*sptep); 1882 int was_writeble = is_writeble_pte(*sptep);
@@ -1860,7 +1908,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1860 } 1908 }
1861 1909
1862 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, 1910 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
1863 dirty, level, gfn, pfn, speculative, true)) { 1911 dirty, level, gfn, pfn, speculative, true,
1912 reset_host_protection)) {
1864 if (write_fault) 1913 if (write_fault)
1865 *ptwrite = 1; 1914 *ptwrite = 1;
1866 kvm_x86_ops->tlb_flush(vcpu); 1915 kvm_x86_ops->tlb_flush(vcpu);
@@ -1877,8 +1926,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1877 page_header_update_slot(vcpu->kvm, sptep, gfn); 1926 page_header_update_slot(vcpu->kvm, sptep, gfn);
1878 if (!was_rmapped) { 1927 if (!was_rmapped) {
1879 rmap_count = rmap_add(vcpu, sptep, gfn); 1928 rmap_count = rmap_add(vcpu, sptep, gfn);
1880 if (!is_rmap_spte(*sptep)) 1929 kvm_release_pfn_clean(pfn);
1881 kvm_release_pfn_clean(pfn);
1882 if (rmap_count > RMAP_RECYCLE_THRESHOLD) 1930 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1883 rmap_recycle(vcpu, sptep, gfn); 1931 rmap_recycle(vcpu, sptep, gfn);
1884 } else { 1932 } else {
@@ -1909,7 +1957,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1909 if (iterator.level == level) { 1957 if (iterator.level == level) {
1910 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, 1958 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1911 0, write, 1, &pt_write, 1959 0, write, 1, &pt_write,
1912 level, gfn, pfn, false); 1960 level, gfn, pfn, false, true);
1913 ++vcpu->stat.pf_fixed; 1961 ++vcpu->stat.pf_fixed;
1914 break; 1962 break;
1915 } 1963 }
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index d2fec9c12d22..72558f8ff3f5 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -273,9 +273,13 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
273 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq)) 273 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
274 return; 274 return;
275 kvm_get_pfn(pfn); 275 kvm_get_pfn(pfn);
276 /*
277 * we call mmu_set_spte() with reset_host_protection = true beacuse that
278 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
279 */
276 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, 280 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
277 gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL, 281 gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL,
278 gpte_to_gfn(gpte), pfn, true); 282 gpte_to_gfn(gpte), pfn, true, true);
279} 283}
280 284
281/* 285/*
@@ -308,7 +312,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
308 user_fault, write_fault, 312 user_fault, write_fault,
309 gw->ptes[gw->level-1] & PT_DIRTY_MASK, 313 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
310 ptwrite, level, 314 ptwrite, level,
311 gw->gfn, pfn, false); 315 gw->gfn, pfn, false, true);
312 break; 316 break;
313 } 317 }
314 318
@@ -558,6 +562,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
558static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 562static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
559{ 563{
560 int i, offset, nr_present; 564 int i, offset, nr_present;
565 bool reset_host_protection;
561 566
562 offset = nr_present = 0; 567 offset = nr_present = 0;
563 568
@@ -595,9 +600,16 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
595 600
596 nr_present++; 601 nr_present++;
597 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); 602 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
603 if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
604 pte_access &= ~ACC_WRITE_MASK;
605 reset_host_protection = 0;
606 } else {
607 reset_host_protection = 1;
608 }
598 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, 609 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
599 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, 610 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
600 spte_to_pfn(sp->spt[i]), true, false); 611 spte_to_pfn(sp->spt[i]), true, false,
612 reset_host_protection);
601 } 613 }
602 614
603 return !nr_present; 615 return !nr_present;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 944cc9c04b3c..c17404add91f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -767,6 +767,8 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
767 rdtscll(tsc_this); 767 rdtscll(tsc_this);
768 delta = vcpu->arch.host_tsc - tsc_this; 768 delta = vcpu->arch.host_tsc - tsc_this;
769 svm->vmcb->control.tsc_offset += delta; 769 svm->vmcb->control.tsc_offset += delta;
770 if (is_nested(svm))
771 svm->nested.hsave->control.tsc_offset += delta;
770 vcpu->cpu = cpu; 772 vcpu->cpu = cpu;
771 kvm_migrate_timers(vcpu); 773 kvm_migrate_timers(vcpu);
772 svm->asid_generation = 0; 774 svm->asid_generation = 0;
@@ -2057,10 +2059,14 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2057 2059
2058 switch (ecx) { 2060 switch (ecx) {
2059 case MSR_IA32_TSC: { 2061 case MSR_IA32_TSC: {
2060 u64 tsc; 2062 u64 tsc_offset;
2063
2064 if (is_nested(svm))
2065 tsc_offset = svm->nested.hsave->control.tsc_offset;
2066 else
2067 tsc_offset = svm->vmcb->control.tsc_offset;
2061 2068
2062 rdtscll(tsc); 2069 *data = tsc_offset + native_read_tsc();
2063 *data = svm->vmcb->control.tsc_offset + tsc;
2064 break; 2070 break;
2065 } 2071 }
2066 case MSR_K6_STAR: 2072 case MSR_K6_STAR:
@@ -2146,10 +2152,17 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2146 2152
2147 switch (ecx) { 2153 switch (ecx) {
2148 case MSR_IA32_TSC: { 2154 case MSR_IA32_TSC: {
2149 u64 tsc; 2155 u64 tsc_offset = data - native_read_tsc();
2156 u64 g_tsc_offset = 0;
2157
2158 if (is_nested(svm)) {
2159 g_tsc_offset = svm->vmcb->control.tsc_offset -
2160 svm->nested.hsave->control.tsc_offset;
2161 svm->nested.hsave->control.tsc_offset = tsc_offset;
2162 }
2163
2164 svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset;
2150 2165
2151 rdtscll(tsc);
2152 svm->vmcb->control.tsc_offset = data - tsc;
2153 break; 2166 break;
2154 } 2167 }
2155 case MSR_K6_STAR: 2168 case MSR_K6_STAR:
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f3812014bd0b..ed53b42caba1 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -709,7 +709,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
709 if (vcpu->cpu != cpu) { 709 if (vcpu->cpu != cpu) {
710 vcpu_clear(vmx); 710 vcpu_clear(vmx);
711 kvm_migrate_timers(vcpu); 711 kvm_migrate_timers(vcpu);
712 vpid_sync_vcpu_all(vmx); 712 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
713 local_irq_disable(); 713 local_irq_disable();
714 list_add(&vmx->local_vcpus_link, 714 list_add(&vmx->local_vcpus_link,
715 &per_cpu(vcpus_on_cpu, cpu)); 715 &per_cpu(vcpus_on_cpu, cpu));
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index be451ee44249..9b9695322f56 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1591,6 +1591,8 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
1591 1591
1592 if (cpuid->nent < 1) 1592 if (cpuid->nent < 1)
1593 goto out; 1593 goto out;
1594 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1595 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1594 r = -ENOMEM; 1596 r = -ENOMEM;
1595 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); 1597 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1596 if (!cpuid_entries) 1598 if (!cpuid_entries)
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 3e549b8ec8c9..85f5db95c60f 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -15,8 +15,10 @@ ifeq ($(CONFIG_X86_32),y)
15 obj-y += atomic64_32.o 15 obj-y += atomic64_32.o
16 lib-y += checksum_32.o 16 lib-y += checksum_32.o
17 lib-y += strstr_32.o 17 lib-y += strstr_32.o
18 lib-y += semaphore_32.o string_32.o cmpxchg8b_emu.o 18 lib-y += semaphore_32.o string_32.o
19 19ifneq ($(CONFIG_X86_CMPXCHG64),y)
20 lib-y += cmpxchg8b_emu.o
21endif
20 lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o 22 lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
21else 23else
22 obj-y += io_64.o iomap_copy_64.o 24 obj-y += io_64.o iomap_copy_64.o
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index 60ab75104da9..1c129211302d 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -217,7 +217,7 @@ static const struct agp_bridge_driver parisc_agp_driver = {
217 .configure = parisc_agp_configure, 217 .configure = parisc_agp_configure,
218 .fetch_size = parisc_agp_fetch_size, 218 .fetch_size = parisc_agp_fetch_size,
219 .tlb_flush = parisc_agp_tlbflush, 219 .tlb_flush = parisc_agp_tlbflush,
220 .mask_memory = parisc_agp_page_mask_memory, 220 .mask_memory = parisc_agp_mask_memory,
221 .masks = parisc_agp_masks, 221 .masks = parisc_agp_masks,
222 .agp_enable = parisc_agp_enable, 222 .agp_enable = parisc_agp_enable,
223 .cache_flush = global_cache_flush, 223 .cache_flush = global_cache_flush,
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index abf4a2529f80..60697909ebdb 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -227,7 +227,8 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
227 * cn_proc_mcast_ctl 227 * cn_proc_mcast_ctl
228 * @data: message sent from userspace via the connector 228 * @data: message sent from userspace via the connector
229 */ 229 */
230static void cn_proc_mcast_ctl(struct cn_msg *msg) 230static void cn_proc_mcast_ctl(struct cn_msg *msg,
231 struct netlink_skb_parms *nsp)
231{ 232{
232 enum proc_cn_mcast_op *mc_op = NULL; 233 enum proc_cn_mcast_op *mc_op = NULL;
233 int err = 0; 234 int err = 0;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 4e551e63b6dc..4f4ac82382f7 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -15,8 +15,8 @@ module_param(ecc_enable_override, int, 0644);
15 15
16/* Lookup table for all possible MC control instances */ 16/* Lookup table for all possible MC control instances */
17struct amd64_pvt; 17struct amd64_pvt;
18static struct mem_ctl_info *mci_lookup[MAX_NUMNODES]; 18static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
19static struct amd64_pvt *pvt_lookup[MAX_NUMNODES]; 19static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
20 20
21/* 21/*
22 * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only 22 * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
@@ -189,7 +189,10 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
189/* Map from a CSROW entry to the mask entry that operates on it */ 189/* Map from a CSROW entry to the mask entry that operates on it */
190static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) 190static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
191{ 191{
192 return csrow >> (pvt->num_dcsm >> 3); 192 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F)
193 return csrow;
194 else
195 return csrow >> 1;
193} 196}
194 197
195/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ 198/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
@@ -279,29 +282,26 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
279 intlv_en = pvt->dram_IntlvEn[0]; 282 intlv_en = pvt->dram_IntlvEn[0];
280 283
281 if (intlv_en == 0) { 284 if (intlv_en == 0) {
282 for (node_id = 0; ; ) { 285 for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
283 if (amd64_base_limit_match(pvt, sys_addr, node_id)) 286 if (amd64_base_limit_match(pvt, sys_addr, node_id))
284 break; 287 goto found;
285
286 if (++node_id >= DRAM_REG_COUNT)
287 goto err_no_match;
288 } 288 }
289 goto found; 289 goto err_no_match;
290 } 290 }
291 291
292 if (unlikely((intlv_en != (0x01 << 8)) && 292 if (unlikely((intlv_en != 0x01) &&
293 (intlv_en != (0x03 << 8)) && 293 (intlv_en != 0x03) &&
294 (intlv_en != (0x07 << 8)))) { 294 (intlv_en != 0x07))) {
295 amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " 295 amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
296 "IntlvEn field of DRAM Base Register for node 0: " 296 "IntlvEn field of DRAM Base Register for node 0: "
297 "This probably indicates a BIOS bug.\n", intlv_en); 297 "this probably indicates a BIOS bug.\n", intlv_en);
298 return NULL; 298 return NULL;
299 } 299 }
300 300
301 bits = (((u32) sys_addr) >> 12) & intlv_en; 301 bits = (((u32) sys_addr) >> 12) & intlv_en;
302 302
303 for (node_id = 0; ; ) { 303 for (node_id = 0; ; ) {
304 if ((pvt->dram_limit[node_id] & intlv_en) == bits) 304 if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
305 break; /* intlv_sel field matches */ 305 break; /* intlv_sel field matches */
306 306
307 if (++node_id >= DRAM_REG_COUNT) 307 if (++node_id >= DRAM_REG_COUNT)
@@ -311,10 +311,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
311 /* sanity test for sys_addr */ 311 /* sanity test for sys_addr */
312 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { 312 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
313 amd64_printk(KERN_WARNING, 313 amd64_printk(KERN_WARNING,
314 "%s(): sys_addr 0x%lx falls outside base/limit " 314 "%s(): sys_addr 0x%llx falls outside base/limit "
315 "address range for node %d with node interleaving " 315 "address range for node %d with node interleaving "
316 "enabled.\n", __func__, (unsigned long)sys_addr, 316 "enabled.\n",
317 node_id); 317 __func__, sys_addr, node_id);
318 return NULL; 318 return NULL;
319 } 319 }
320 320
@@ -377,7 +377,7 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
377 * base/mask register pair, test the condition shown near the start of 377 * base/mask register pair, test the condition shown near the start of
378 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). 378 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
379 */ 379 */
380 for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { 380 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
381 381
382 /* This DRAM chip select is disabled on this node */ 382 /* This DRAM chip select is disabled on this node */
383 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) 383 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
@@ -734,7 +734,7 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
734 u64 base, mask; 734 u64 base, mask;
735 735
736 pvt = mci->pvt_info; 736 pvt = mci->pvt_info;
737 BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT)); 737 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
738 738
739 base = base_from_dct_base(pvt, csrow); 739 base = base_from_dct_base(pvt, csrow);
740 mask = mask_from_dct_mask(pvt, csrow); 740 mask = mask_from_dct_mask(pvt, csrow);
@@ -962,35 +962,27 @@ err_reg:
962 */ 962 */
963static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) 963static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
964{ 964{
965 if (pvt->ext_model >= OPTERON_CPU_REV_F) { 965
966 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) {
967 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
968 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
969 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
970 pvt->dcs_shift = REV_E_DCS_SHIFT;
971 pvt->cs_count = 8;
972 pvt->num_dcsm = 8;
973 } else {
966 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; 974 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
967 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; 975 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
968 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; 976 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
969 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; 977 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
970 978
971 switch (boot_cpu_data.x86) { 979 if (boot_cpu_data.x86 == 0x11) {
972 case 0xf: 980 pvt->cs_count = 4;
973 pvt->num_dcsm = REV_F_DCSM_COUNT; 981 pvt->num_dcsm = 2;
974 break; 982 } else {
975 983 pvt->cs_count = 8;
976 case 0x10: 984 pvt->num_dcsm = 4;
977 pvt->num_dcsm = F10_DCSM_COUNT;
978 break;
979
980 case 0x11:
981 pvt->num_dcsm = F11_DCSM_COUNT;
982 break;
983
984 default:
985 amd64_printk(KERN_ERR, "Unsupported family!\n");
986 break;
987 } 985 }
988 } else {
989 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
990 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
991 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
992 pvt->dcs_shift = REV_E_DCS_SHIFT;
993 pvt->num_dcsm = REV_E_DCSM_COUNT;
994 } 986 }
995} 987}
996 988
@@ -1003,7 +995,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
1003 995
1004 amd64_set_dct_base_and_mask(pvt); 996 amd64_set_dct_base_and_mask(pvt);
1005 997
1006 for (cs = 0; cs < CHIPSELECT_COUNT; cs++) { 998 for (cs = 0; cs < pvt->cs_count; cs++) {
1007 reg = K8_DCSB0 + (cs * 4); 999 reg = K8_DCSB0 + (cs * 4);
1008 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, 1000 err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
1009 &pvt->dcsb0[cs]); 1001 &pvt->dcsb0[cs]);
@@ -1130,7 +1122,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1130 debugf0("Reading K8_DRAM_BASE_LOW failed\n"); 1122 debugf0("Reading K8_DRAM_BASE_LOW failed\n");
1131 1123
1132 /* Extract parts into separate data entries */ 1124 /* Extract parts into separate data entries */
1133 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; 1125 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 24;
1134 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; 1126 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1135 pvt->dram_rw_en[dram] = (low & 0x3); 1127 pvt->dram_rw_en[dram] = (low & 0x3);
1136 1128
@@ -1143,7 +1135,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1143 * Extract parts into separate data entries. Limit is the HIGHEST memory 1135 * Extract parts into separate data entries. Limit is the HIGHEST memory
1144 * location of the region, so lower 24 bits need to be all ones 1136 * location of the region, so lower 24 bits need to be all ones
1145 */ 1137 */
1146 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; 1138 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 24) | 0x00FFFFFF;
1147 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; 1139 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1148 pvt->dram_DstNode[dram] = (low & 0x7); 1140 pvt->dram_DstNode[dram] = (low & 0x7);
1149} 1141}
@@ -1193,7 +1185,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1193 * different from the node that detected the error. 1185 * different from the node that detected the error.
1194 */ 1186 */
1195 src_mci = find_mc_by_sys_addr(mci, SystemAddress); 1187 src_mci = find_mc_by_sys_addr(mci, SystemAddress);
1196 if (src_mci) { 1188 if (!src_mci) {
1197 amd64_mc_printk(mci, KERN_ERR, 1189 amd64_mc_printk(mci, KERN_ERR,
1198 "failed to map error address 0x%lx to a node\n", 1190 "failed to map error address 0x%lx to a node\n",
1199 (unsigned long)SystemAddress); 1191 (unsigned long)SystemAddress);
@@ -1376,8 +1368,8 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1376 1368
1377 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; 1369 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1378 1370
1379 pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) | 1371 pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
1380 ((u64) low_base & 0xFFFF0000))) << 8; 1372 (((u64)low_base & 0xFFFF0000) << 24);
1381 1373
1382 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); 1374 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1383 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); 1375 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
@@ -1398,9 +1390,9 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1398 * Extract address values and form a LIMIT address. Limit is the HIGHEST 1390 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1399 * memory location of the region, so low 24 bits need to be all ones. 1391 * memory location of the region, so low 24 bits need to be all ones.
1400 */ 1392 */
1401 low_limit |= 0x0000FFFF; 1393 pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
1402 pvt->dram_limit[dram] = 1394 (((u64) low_limit & 0xFFFF0000) << 24) |
1403 ((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF); 1395 0x00FFFFFF;
1404} 1396}
1405 1397
1406static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) 1398static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
@@ -1566,7 +1558,7 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1566 1558
1567 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); 1559 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
1568 1560
1569 for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { 1561 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1570 1562
1571 cs_base = amd64_get_dct_base(pvt, cs, csrow); 1563 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1572 if (!(cs_base & K8_DCSB_CS_ENABLE)) 1564 if (!(cs_base & K8_DCSB_CS_ENABLE))
@@ -2497,7 +2489,7 @@ err_reg:
2497 * NOTE: CPU Revision Dependent code 2489 * NOTE: CPU Revision Dependent code
2498 * 2490 *
2499 * Input: 2491 * Input:
2500 * @csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1) 2492 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
2501 * k8 private pointer to --> 2493 * k8 private pointer to -->
2502 * DRAM Bank Address mapping register 2494 * DRAM Bank Address mapping register
2503 * node_id 2495 * node_id
@@ -2577,7 +2569,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
2577 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" 2569 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
2578 ); 2570 );
2579 2571
2580 for (i = 0; i < CHIPSELECT_COUNT; i++) { 2572 for (i = 0; i < pvt->cs_count; i++) {
2581 csrow = &mci->csrows[i]; 2573 csrow = &mci->csrows[i];
2582 2574
2583 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { 2575 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
@@ -2988,7 +2980,7 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
2988 goto err_exit; 2980 goto err_exit;
2989 2981
2990 ret = -ENOMEM; 2982 ret = -ENOMEM;
2991 mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id); 2983 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
2992 if (!mci) 2984 if (!mci)
2993 goto err_exit; 2985 goto err_exit;
2994 2986
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 8ea07e2715dc..c6f359a85207 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -132,6 +132,8 @@
132#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__ 132#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__
133#define EDAC_MOD_STR "amd64_edac" 133#define EDAC_MOD_STR "amd64_edac"
134 134
135#define EDAC_MAX_NUMNODES 8
136
135/* Extended Model from CPUID, for CPU Revision numbers */ 137/* Extended Model from CPUID, for CPU Revision numbers */
136#define OPTERON_CPU_LE_REV_C 0 138#define OPTERON_CPU_LE_REV_C 0
137#define OPTERON_CPU_REV_D 1 139#define OPTERON_CPU_REV_D 1
@@ -142,7 +144,7 @@
142#define OPTERON_CPU_REV_FA 5 144#define OPTERON_CPU_REV_FA 5
143 145
144/* Hardware limit on ChipSelect rows per MC and processors per system */ 146/* Hardware limit on ChipSelect rows per MC and processors per system */
145#define CHIPSELECT_COUNT 8 147#define MAX_CS_COUNT 8
146#define DRAM_REG_COUNT 8 148#define DRAM_REG_COUNT 8
147 149
148 150
@@ -193,7 +195,6 @@
193 */ 195 */
194#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL) 196#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
195#define REV_E_DCS_SHIFT 4 197#define REV_E_DCS_SHIFT 4
196#define REV_E_DCSM_COUNT 8
197 198
198#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL) 199#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
199#define REV_F_F1Xh_DCS_SHIFT 8 200#define REV_F_F1Xh_DCS_SHIFT 8
@@ -204,9 +205,6 @@
204 */ 205 */
205#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL) 206#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
206#define REV_F_DCS_SHIFT 8 207#define REV_F_DCS_SHIFT 8
207#define REV_F_DCSM_COUNT 4
208#define F10_DCSM_COUNT 4
209#define F11_DCSM_COUNT 2
210 208
211/* DRAM CS Mask Registers */ 209/* DRAM CS Mask Registers */
212#define K8_DCSM0 0x60 210#define K8_DCSM0 0x60
@@ -374,13 +372,11 @@ enum {
374 372
375#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ 373#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
376 (BIT(((word) & 0xF) + 20) | \ 374 (BIT(((word) & 0xF) + 20) | \
377 BIT(17) | \ 375 BIT(17) | bits)
378 ((bits) & 0xF))
379 376
380#define SET_NB_DRAM_INJECTION_READ(word, bits) \ 377#define SET_NB_DRAM_INJECTION_READ(word, bits) \
381 (BIT(((word) & 0xF) + 20) | \ 378 (BIT(((word) & 0xF) + 20) | \
382 BIT(16) | \ 379 BIT(16) | bits)
383 ((bits) & 0xF))
384 380
385#define K8_NBCAP 0xE8 381#define K8_NBCAP 0xE8
386#define K8_NBCAP_CORES (BIT(12)|BIT(13)) 382#define K8_NBCAP_CORES (BIT(12)|BIT(13))
@@ -445,12 +441,12 @@ struct amd64_pvt {
445 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ 441 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
446 442
447 /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ 443 /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
448 u32 dcsb0[CHIPSELECT_COUNT]; 444 u32 dcsb0[MAX_CS_COUNT];
449 u32 dcsb1[CHIPSELECT_COUNT]; 445 u32 dcsb1[MAX_CS_COUNT];
450 446
451 /* DRAM CS Mask Registers F2x[1,0][6C:60] */ 447 /* DRAM CS Mask Registers F2x[1,0][6C:60] */
452 u32 dcsm0[CHIPSELECT_COUNT]; 448 u32 dcsm0[MAX_CS_COUNT];
453 u32 dcsm1[CHIPSELECT_COUNT]; 449 u32 dcsm1[MAX_CS_COUNT];
454 450
455 /* 451 /*
456 * Decoded parts of DRAM BASE and LIMIT Registers 452 * Decoded parts of DRAM BASE and LIMIT Registers
@@ -470,6 +466,7 @@ struct amd64_pvt {
470 */ 466 */
471 u32 dcsb_base; /* DCSB base bits */ 467 u32 dcsb_base; /* DCSB base bits */
472 u32 dcsm_mask; /* DCSM mask bits */ 468 u32 dcsm_mask; /* DCSM mask bits */
469 u32 cs_count; /* num chip selects (== num DCSB registers) */
473 u32 num_dcsm; /* Number of DCSM registers */ 470 u32 num_dcsm; /* Number of DCSM registers */
474 u32 dcs_mask_notused; /* DCSM notused mask bits */ 471 u32 dcs_mask_notused; /* DCSM notused mask bits */
475 u32 dcs_shift; /* DCSB and DCSM shift value */ 472 u32 dcs_shift; /* DCSB and DCSM shift value */
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index d3675b76b3a7..29f1f7a612d9 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -1,5 +1,11 @@
1#include "amd64_edac.h" 1#include "amd64_edac.h"
2 2
3static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf)
4{
5 struct amd64_pvt *pvt = mci->pvt_info;
6 return sprintf(buf, "0x%x\n", pvt->injection.section);
7}
8
3/* 9/*
4 * store error injection section value which refers to one of 4 16-byte sections 10 * store error injection section value which refers to one of 4 16-byte sections
5 * within a 64-byte cacheline 11 * within a 64-byte cacheline
@@ -15,12 +21,26 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
15 21
16 ret = strict_strtoul(data, 10, &value); 22 ret = strict_strtoul(data, 10, &value);
17 if (ret != -EINVAL) { 23 if (ret != -EINVAL) {
24
25 if (value > 3) {
26 amd64_printk(KERN_WARNING,
27 "%s: invalid section 0x%lx\n",
28 __func__, value);
29 return -EINVAL;
30 }
31
18 pvt->injection.section = (u32) value; 32 pvt->injection.section = (u32) value;
19 return count; 33 return count;
20 } 34 }
21 return ret; 35 return ret;
22} 36}
23 37
38static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf)
39{
40 struct amd64_pvt *pvt = mci->pvt_info;
41 return sprintf(buf, "0x%x\n", pvt->injection.word);
42}
43
24/* 44/*
25 * store error injection word value which refers to one of 9 16-bit word of the 45 * store error injection word value which refers to one of 9 16-bit word of the
26 * 16-byte (128-bit + ECC bits) section 46 * 16-byte (128-bit + ECC bits) section
@@ -37,14 +57,25 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
37 ret = strict_strtoul(data, 10, &value); 57 ret = strict_strtoul(data, 10, &value);
38 if (ret != -EINVAL) { 58 if (ret != -EINVAL) {
39 59
40 value = (value <= 8) ? value : 0; 60 if (value > 8) {
41 pvt->injection.word = (u32) value; 61 amd64_printk(KERN_WARNING,
62 "%s: invalid word 0x%lx\n",
63 __func__, value);
64 return -EINVAL;
65 }
42 66
67 pvt->injection.word = (u32) value;
43 return count; 68 return count;
44 } 69 }
45 return ret; 70 return ret;
46} 71}
47 72
73static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf)
74{
75 struct amd64_pvt *pvt = mci->pvt_info;
76 return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
77}
78
48/* 79/*
49 * store 16 bit error injection vector which enables injecting errors to the 80 * store 16 bit error injection vector which enables injecting errors to the
50 * corresponding bit within the error injection word above. When used during a 81 * corresponding bit within the error injection word above. When used during a
@@ -60,8 +91,14 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
60 ret = strict_strtoul(data, 16, &value); 91 ret = strict_strtoul(data, 16, &value);
61 if (ret != -EINVAL) { 92 if (ret != -EINVAL) {
62 93
63 pvt->injection.bit_map = (u32) value & 0xFFFF; 94 if (value & 0xFFFF0000) {
95 amd64_printk(KERN_WARNING,
96 "%s: invalid EccVector: 0x%lx\n",
97 __func__, value);
98 return -EINVAL;
99 }
64 100
101 pvt->injection.bit_map = (u32) value;
65 return count; 102 return count;
66 } 103 }
67 return ret; 104 return ret;
@@ -147,7 +184,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
147 .name = "inject_section", 184 .name = "inject_section",
148 .mode = (S_IRUGO | S_IWUSR) 185 .mode = (S_IRUGO | S_IWUSR)
149 }, 186 },
150 .show = NULL, 187 .show = amd64_inject_section_show,
151 .store = amd64_inject_section_store, 188 .store = amd64_inject_section_store,
152 }, 189 },
153 { 190 {
@@ -155,7 +192,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
155 .name = "inject_word", 192 .name = "inject_word",
156 .mode = (S_IRUGO | S_IWUSR) 193 .mode = (S_IRUGO | S_IWUSR)
157 }, 194 },
158 .show = NULL, 195 .show = amd64_inject_word_show,
159 .store = amd64_inject_word_store, 196 .store = amd64_inject_word_store,
160 }, 197 },
161 { 198 {
@@ -163,7 +200,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
163 .name = "inject_ecc_vector", 200 .name = "inject_ecc_vector",
164 .mode = (S_IRUGO | S_IWUSR) 201 .mode = (S_IRUGO | S_IWUSR)
165 }, 202 },
166 .show = NULL, 203 .show = amd64_inject_ecc_vector_show,
167 .store = amd64_inject_ecc_vector_store, 204 .store = amd64_inject_ecc_vector_store,
168 }, 205 },
169 { 206 {
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 420a96e7f2db..051d1ebbd287 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -939,7 +939,7 @@ static int __init ibft_init(void)
939 939
940 if (ibft_addr) { 940 if (ibft_addr) {
941 printk(KERN_INFO "iBFT detected at 0x%llx.\n", 941 printk(KERN_INFO "iBFT detected at 0x%llx.\n",
942 (u64)virt_to_phys((void *)ibft_addr)); 942 (u64)isa_virt_to_bus(ibft_addr));
943 943
944 rc = ibft_check_device(); 944 rc = ibft_check_device();
945 if (rc) 945 if (rc)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index d53fbbfefa3e..dfb15c06c88f 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -65,10 +65,10 @@ void __init reserve_ibft_region(void)
65 * so skip that area */ 65 * so skip that area */
66 if (pos == VGA_MEM) 66 if (pos == VGA_MEM)
67 pos += VGA_SIZE; 67 pos += VGA_SIZE;
68 virt = phys_to_virt(pos); 68 virt = isa_bus_to_virt(pos);
69 if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) { 69 if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) {
70 unsigned long *addr = 70 unsigned long *addr =
71 (unsigned long *)phys_to_virt(pos + 4); 71 (unsigned long *)isa_bus_to_virt(pos + 4);
72 len = *addr; 72 len = *addr;
73 /* if the length of the table extends past 1M, 73 /* if the length of the table extends past 1M,
74 * the table cannot be valid. */ 74 * the table cannot be valid. */
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index ecd739534f6a..82b16808a274 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -83,7 +83,8 @@ static int __devexit lis302dl_spi_remove(struct spi_device *spi)
83 struct lis3lv02d *lis3 = spi_get_drvdata(spi); 83 struct lis3lv02d *lis3 = spi_get_drvdata(spi);
84 lis3lv02d_joystick_disable(); 84 lis3lv02d_joystick_disable();
85 lis3lv02d_poweroff(lis3); 85 lis3lv02d_poweroff(lis3);
86 return 0; 86
87 return lis3lv02d_remove_fs(&lis3_dev);
87} 88}
88 89
89#ifdef CONFIG_PM 90#ifdef CONFIG_PM
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 28d09a5d8450..017c09540c2f 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -273,14 +273,8 @@ static const struct ide_proc_devset ide_generic_settings[] = {
273 273
274static void proc_ide_settings_warn(void) 274static void proc_ide_settings_warn(void)
275{ 275{
276 static int warned; 276 printk_once(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is "
277
278 if (warned)
279 return;
280
281 printk(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is "
282 "obsolete, and will be removed soon!\n"); 277 "obsolete, and will be removed soon!\n");
283 warned = 1;
284} 278}
285 279
286static int ide_settings_proc_show(struct seq_file *m, void *v) 280static int ide_settings_proc_show(struct seq_file *m, void *v)
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
index afca22beaadf..3b88eba04c9c 100644
--- a/drivers/ide/sis5513.c
+++ b/drivers/ide/sis5513.c
@@ -2,7 +2,7 @@
2 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> 2 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
3 * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer 3 * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer
4 * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz> 4 * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz>
5 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz 5 * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz
6 * 6 *
7 * May be copied or modified under the terms of the GNU General Public License 7 * May be copied or modified under the terms of the GNU General Public License
8 * 8 *
@@ -281,11 +281,13 @@ static void config_drive_art_rwp(ide_drive_t *drive)
281 281
282 pci_read_config_byte(dev, 0x4b, &reg4bh); 282 pci_read_config_byte(dev, 0x4b, &reg4bh);
283 283
284 rw_prefetch = reg4bh & ~(0x11 << drive->dn);
285
284 if (drive->media == ide_disk) 286 if (drive->media == ide_disk)
285 rw_prefetch = 0x11 << drive->dn; 287 rw_prefetch |= 0x11 << drive->dn;
286 288
287 if ((reg4bh & (0x11 << drive->dn)) != rw_prefetch) 289 if (reg4bh != rw_prefetch)
288 pci_write_config_byte(dev, 0x4b, reg4bh|rw_prefetch); 290 pci_write_config_byte(dev, 0x4b, rw_prefetch);
289} 291}
290 292
291static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio) 293static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio)
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 2d8352419c0d..65bf91e16a42 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -603,7 +603,7 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
603 603
604 if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_CONF) { 604 if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_CONF) {
605 u16 info = CAPIMSG_U16(skb->data, 12); // Info field 605 u16 info = CAPIMSG_U16(skb->data, 12); // Info field
606 if (info == 0) { 606 if ((info & 0xff00) == 0) {
607 mutex_lock(&cdev->ncci_list_mtx); 607 mutex_lock(&cdev->ncci_list_mtx);
608 capincci_alloc(cdev, CAPIMSG_NCCI(skb->data)); 608 capincci_alloc(cdev, CAPIMSG_NCCI(skb->data));
609 mutex_unlock(&cdev->ncci_list_mtx); 609 mutex_unlock(&cdev->ncci_list_mtx);
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 650120261abf..3e6d17f42a98 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -40,7 +40,7 @@ static int debugmode = 0;
40MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux"); 40MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux");
41MODULE_AUTHOR("Carsten Paeth"); 41MODULE_AUTHOR("Carsten Paeth");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43module_param(debugmode, uint, 0); 43module_param(debugmode, uint, S_IRUGO|S_IWUSR);
44 44
45/* -------- type definitions ----------------------------------------- */ 45/* -------- type definitions ----------------------------------------- */
46 46
@@ -671,8 +671,8 @@ static void n0(capidrv_contr * card, capidrv_ncci * ncci)
671 NULL, /* Useruserdata */ /* $$$$ */ 671 NULL, /* Useruserdata */ /* $$$$ */
672 NULL /* Facilitydataarray */ 672 NULL /* Facilitydataarray */
673 ); 673 );
674 send_message(card, &cmsg);
675 plci_change_state(card, ncci->plcip, EV_PLCI_DISCONNECT_REQ); 674 plci_change_state(card, ncci->plcip, EV_PLCI_DISCONNECT_REQ);
675 send_message(card, &cmsg);
676 676
677 cmd.command = ISDN_STAT_BHUP; 677 cmd.command = ISDN_STAT_BHUP;
678 cmd.driver = card->myid; 678 cmd.driver = card->myid;
@@ -924,8 +924,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg)
924 */ 924 */
925 capi_cmsg_answer(cmsg); 925 capi_cmsg_answer(cmsg);
926 cmsg->Reject = 1; /* ignore */ 926 cmsg->Reject = 1; /* ignore */
927 send_message(card, cmsg);
928 plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); 927 plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
928 send_message(card, cmsg);
929 printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s ignored\n", 929 printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s ignored\n",
930 card->contrnr, 930 card->contrnr,
931 cmd.parm.setup.phone, 931 cmd.parm.setup.phone,
@@ -974,8 +974,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg)
974 case 2: /* Call will be rejected. */ 974 case 2: /* Call will be rejected. */
975 capi_cmsg_answer(cmsg); 975 capi_cmsg_answer(cmsg);
976 cmsg->Reject = 2; /* reject call, normal call clearing */ 976 cmsg->Reject = 2; /* reject call, normal call clearing */
977 send_message(card, cmsg);
978 plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); 977 plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
978 send_message(card, cmsg);
979 break; 979 break;
980 980
981 default: 981 default:
@@ -983,8 +983,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg)
983 capi_cmsg_answer(cmsg); 983 capi_cmsg_answer(cmsg);
984 cmsg->Reject = 8; /* reject call, 984 cmsg->Reject = 8; /* reject call,
985 destination out of order */ 985 destination out of order */
986 send_message(card, cmsg);
987 plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); 986 plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
987 send_message(card, cmsg);
988 break; 988 break;
989 } 989 }
990 return; 990 return;
@@ -1020,8 +1020,8 @@ static void handle_plci(_cmsg * cmsg)
1020 card->bchans[plcip->chan].disconnecting = 1; 1020 card->bchans[plcip->chan].disconnecting = 1;
1021 plci_change_state(card, plcip, EV_PLCI_DISCONNECT_IND); 1021 plci_change_state(card, plcip, EV_PLCI_DISCONNECT_IND);
1022 capi_cmsg_answer(cmsg); 1022 capi_cmsg_answer(cmsg);
1023 send_message(card, cmsg);
1024 plci_change_state(card, plcip, EV_PLCI_DISCONNECT_RESP); 1023 plci_change_state(card, plcip, EV_PLCI_DISCONNECT_RESP);
1024 send_message(card, cmsg);
1025 break; 1025 break;
1026 1026
1027 case CAPI_DISCONNECT_CONF: /* plci */ 1027 case CAPI_DISCONNECT_CONF: /* plci */
@@ -1078,8 +1078,8 @@ static void handle_plci(_cmsg * cmsg)
1078 1078
1079 if (card->bchans[plcip->chan].incoming) { 1079 if (card->bchans[plcip->chan].incoming) {
1080 capi_cmsg_answer(cmsg); 1080 capi_cmsg_answer(cmsg);
1081 send_message(card, cmsg);
1082 plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND); 1081 plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND);
1082 send_message(card, cmsg);
1083 } else { 1083 } else {
1084 capidrv_ncci *nccip; 1084 capidrv_ncci *nccip;
1085 capi_cmsg_answer(cmsg); 1085 capi_cmsg_answer(cmsg);
@@ -1098,13 +1098,14 @@ static void handle_plci(_cmsg * cmsg)
1098 NULL /* NCPI */ 1098 NULL /* NCPI */
1099 ); 1099 );
1100 nccip->msgid = cmsg->Messagenumber; 1100 nccip->msgid = cmsg->Messagenumber;
1101 plci_change_state(card, plcip,
1102 EV_PLCI_CONNECT_ACTIVE_IND);
1103 ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ);
1101 send_message(card, cmsg); 1104 send_message(card, cmsg);
1102 cmd.command = ISDN_STAT_DCONN; 1105 cmd.command = ISDN_STAT_DCONN;
1103 cmd.driver = card->myid; 1106 cmd.driver = card->myid;
1104 cmd.arg = plcip->chan; 1107 cmd.arg = plcip->chan;
1105 card->interface.statcallb(&cmd); 1108 card->interface.statcallb(&cmd);
1106 plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND);
1107 ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ);
1108 } 1109 }
1109 break; 1110 break;
1110 1111
@@ -1193,8 +1194,8 @@ static void handle_ncci(_cmsg * cmsg)
1193 goto notfound; 1194 goto notfound;
1194 1195
1195 capi_cmsg_answer(cmsg); 1196 capi_cmsg_answer(cmsg);
1196 send_message(card, cmsg);
1197 ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_ACTIVE_IND); 1197 ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_ACTIVE_IND);
1198 send_message(card, cmsg);
1198 1199
1199 cmd.command = ISDN_STAT_BCONN; 1200 cmd.command = ISDN_STAT_BCONN;
1200 cmd.driver = card->myid; 1201 cmd.driver = card->myid;
@@ -1222,8 +1223,8 @@ static void handle_ncci(_cmsg * cmsg)
1222 0, /* Reject */ 1223 0, /* Reject */
1223 NULL /* NCPI */ 1224 NULL /* NCPI */
1224 ); 1225 );
1225 send_message(card, cmsg);
1226 ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_RESP); 1226 ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_RESP);
1227 send_message(card, cmsg);
1227 break; 1228 break;
1228 } 1229 }
1229 printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr); 1230 printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr);
@@ -1299,8 +1300,8 @@ static void handle_ncci(_cmsg * cmsg)
1299 card->bchans[nccip->chan].disconnecting = 1; 1300 card->bchans[nccip->chan].disconnecting = 1;
1300 ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_IND); 1301 ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_IND);
1301 capi_cmsg_answer(cmsg); 1302 capi_cmsg_answer(cmsg);
1302 send_message(card, cmsg);
1303 ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_RESP); 1303 ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_RESP);
1304 send_message(card, cmsg);
1304 break; 1305 break;
1305 1306
1306 case CAPI_DISCONNECT_B3_CONF: /* ncci */ 1307 case CAPI_DISCONNECT_B3_CONF: /* ncci */
@@ -2014,8 +2015,8 @@ static void send_listen(capidrv_contr *card)
2014 card->cipmask, 2015 card->cipmask,
2015 card->cipmask2, 2016 card->cipmask2,
2016 NULL, NULL); 2017 NULL, NULL);
2017 send_message(card, &cmdcmsg);
2018 listen_change_state(card, EV_LISTEN_REQ); 2018 listen_change_state(card, EV_LISTEN_REQ);
2019 send_message(card, &cmdcmsg);
2019} 2020}
2020 2021
2021static void listentimerfunc(unsigned long x) 2022static void listentimerfunc(unsigned long x)
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index 234cc5d53312..44a58e6f8f65 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -334,7 +334,14 @@ static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes,
334 return startbytes - numbytes; 334 return startbytes - numbytes;
335} 335}
336 336
337/* process a block of data received from the device 337/**
338 * gigaset_m10x_input() - process a block of data received from the device
339 * @inbuf: received data and device descriptor structure.
340 *
341 * Called by hardware module {ser,usb}_gigaset with a block of received
342 * bytes. Separates the bytes received over the serial data channel into
343 * user data and command replies (locked/unlocked) according to the
344 * current state of the interface.
338 */ 345 */
339void gigaset_m10x_input(struct inbuf_t *inbuf) 346void gigaset_m10x_input(struct inbuf_t *inbuf)
340{ 347{
@@ -543,16 +550,17 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
543 return iraw_skb; 550 return iraw_skb;
544} 551}
545 552
546/* gigaset_send_skb 553/**
547 * called by common.c to queue an skb for sending 554 * gigaset_m10x_send_skb() - queue an skb for sending
548 * and start transmission if necessary 555 * @bcs: B channel descriptor structure.
549 * parameters: 556 * @skb: data to send.
550 * B Channel control structure 557 *
551 * skb 558 * Called by i4l.c to encode and queue an skb for sending, and start
559 * transmission if necessary.
560 *
552 * Return value: 561 * Return value:
553 * number of bytes accepted for sending 562 * number of bytes accepted for sending (skb->len) if ok,
554 * (skb->len if ok, 0 if out of buffer space) 563 * error code < 0 (eg. -ENOMEM) on error
555 * or error code (< 0, eg. -EINVAL)
556 */ 564 */
557int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb) 565int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
558{ 566{
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 781c4041f7b0..5ed1d99eb9f3 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -134,6 +134,7 @@ struct bas_cardstate {
134#define BS_ATRDPEND 0x040 /* urb_cmd_in in use */ 134#define BS_ATRDPEND 0x040 /* urb_cmd_in in use */
135#define BS_ATWRPEND 0x080 /* urb_cmd_out in use */ 135#define BS_ATWRPEND 0x080 /* urb_cmd_out in use */
136#define BS_SUSPEND 0x100 /* USB port suspended */ 136#define BS_SUSPEND 0x100 /* USB port suspended */
137#define BS_RESETTING 0x200 /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */
137 138
138 139
139static struct gigaset_driver *driver = NULL; 140static struct gigaset_driver *driver = NULL;
@@ -319,6 +320,21 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
319 return -EINVAL; 320 return -EINVAL;
320} 321}
321 322
323/* set/clear bits in base connection state, return previous state
324 */
325static inline int update_basstate(struct bas_cardstate *ucs,
326 int set, int clear)
327{
328 unsigned long flags;
329 int state;
330
331 spin_lock_irqsave(&ucs->lock, flags);
332 state = ucs->basstate;
333 ucs->basstate = (state & ~clear) | set;
334 spin_unlock_irqrestore(&ucs->lock, flags);
335 return state;
336}
337
322/* error_hangup 338/* error_hangup
323 * hang up any existing connection because of an unrecoverable error 339 * hang up any existing connection because of an unrecoverable error
324 * This function may be called from any context and takes care of scheduling 340 * This function may be called from any context and takes care of scheduling
@@ -350,12 +366,9 @@ static inline void error_hangup(struct bc_state *bcs)
350 */ 366 */
351static inline void error_reset(struct cardstate *cs) 367static inline void error_reset(struct cardstate *cs)
352{ 368{
353 /* close AT command channel to recover (ignore errors) */ 369 /* reset interrupt pipe to recover (ignore errors) */
354 req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); 370 update_basstate(cs->hw.bas, BS_RESETTING, 0);
355 371 req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT);
356 //FIXME try to recover without bothering the user
357 dev_err(cs->dev,
358 "unrecoverable error - please disconnect Gigaset base to reset\n");
359} 372}
360 373
361/* check_pending 374/* check_pending
@@ -398,8 +411,13 @@ static void check_pending(struct bas_cardstate *ucs)
398 case HD_DEVICE_INIT_ACK: /* no reply expected */ 411 case HD_DEVICE_INIT_ACK: /* no reply expected */
399 ucs->pending = 0; 412 ucs->pending = 0;
400 break; 413 break;
401 /* HD_READ_ATMESSAGE, HD_WRITE_ATMESSAGE, HD_RESET_INTERRUPTPIPE 414 case HD_RESET_INTERRUPT_PIPE:
402 * are handled separately and should never end up here 415 if (!(ucs->basstate & BS_RESETTING))
416 ucs->pending = 0;
417 break;
418 /*
419 * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately
420 * and should never end up here
403 */ 421 */
404 default: 422 default:
405 dev_warn(&ucs->interface->dev, 423 dev_warn(&ucs->interface->dev,
@@ -449,21 +467,6 @@ static void cmd_in_timeout(unsigned long data)
449 error_reset(cs); 467 error_reset(cs);
450} 468}
451 469
452/* set/clear bits in base connection state, return previous state
453 */
454inline static int update_basstate(struct bas_cardstate *ucs,
455 int set, int clear)
456{
457 unsigned long flags;
458 int state;
459
460 spin_lock_irqsave(&ucs->lock, flags);
461 state = ucs->basstate;
462 ucs->basstate = (state & ~clear) | set;
463 spin_unlock_irqrestore(&ucs->lock, flags);
464 return state;
465}
466
467/* read_ctrl_callback 470/* read_ctrl_callback
468 * USB completion handler for control pipe input 471 * USB completion handler for control pipe input
469 * called by the USB subsystem in interrupt context 472 * called by the USB subsystem in interrupt context
@@ -762,7 +765,8 @@ static void read_int_callback(struct urb *urb)
762 break; 765 break;
763 766
764 case HD_RESET_INTERRUPT_PIPE_ACK: 767 case HD_RESET_INTERRUPT_PIPE_ACK:
765 gig_dbg(DEBUG_USBREQ, "HD_RESET_INTERRUPT_PIPE_ACK"); 768 update_basstate(ucs, 0, BS_RESETTING);
769 dev_notice(cs->dev, "interrupt pipe reset\n");
766 break; 770 break;
767 771
768 case HD_SUSPEND_END: 772 case HD_SUSPEND_END:
@@ -1331,28 +1335,24 @@ static void read_iso_tasklet(unsigned long data)
1331 rcvbuf = urb->transfer_buffer; 1335 rcvbuf = urb->transfer_buffer;
1332 totleft = urb->actual_length; 1336 totleft = urb->actual_length;
1333 for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) { 1337 for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) {
1334 if (unlikely(urb->iso_frame_desc[frame].status)) { 1338 numbytes = urb->iso_frame_desc[frame].actual_length;
1339 if (unlikely(urb->iso_frame_desc[frame].status))
1335 dev_warn(cs->dev, 1340 dev_warn(cs->dev,
1336 "isochronous read: frame %d: %s\n", 1341 "isochronous read: frame %d[%d]: %s\n",
1337 frame, 1342 frame, numbytes,
1338 get_usb_statmsg( 1343 get_usb_statmsg(
1339 urb->iso_frame_desc[frame].status)); 1344 urb->iso_frame_desc[frame].status));
1340 break; 1345 if (unlikely(numbytes > BAS_MAXFRAME))
1341 }
1342 numbytes = urb->iso_frame_desc[frame].actual_length;
1343 if (unlikely(numbytes > BAS_MAXFRAME)) {
1344 dev_warn(cs->dev, 1346 dev_warn(cs->dev,
1345 "isochronous read: frame %d: " 1347 "isochronous read: frame %d: "
1346 "numbytes (%d) > BAS_MAXFRAME\n", 1348 "numbytes (%d) > BAS_MAXFRAME\n",
1347 frame, numbytes); 1349 frame, numbytes);
1348 break;
1349 }
1350 if (unlikely(numbytes > totleft)) { 1350 if (unlikely(numbytes > totleft)) {
1351 dev_warn(cs->dev, 1351 dev_warn(cs->dev,
1352 "isochronous read: frame %d: " 1352 "isochronous read: frame %d: "
1353 "numbytes (%d) > totleft (%d)\n", 1353 "numbytes (%d) > totleft (%d)\n",
1354 frame, numbytes, totleft); 1354 frame, numbytes, totleft);
1355 break; 1355 numbytes = totleft;
1356 } 1356 }
1357 offset = urb->iso_frame_desc[frame].offset; 1357 offset = urb->iso_frame_desc[frame].offset;
1358 if (unlikely(offset + numbytes > BAS_INBUFSIZE)) { 1358 if (unlikely(offset + numbytes > BAS_INBUFSIZE)) {
@@ -1361,7 +1361,7 @@ static void read_iso_tasklet(unsigned long data)
1361 "offset (%d) + numbytes (%d) " 1361 "offset (%d) + numbytes (%d) "
1362 "> BAS_INBUFSIZE\n", 1362 "> BAS_INBUFSIZE\n",
1363 frame, offset, numbytes); 1363 frame, offset, numbytes);
1364 break; 1364 numbytes = BAS_INBUFSIZE - offset;
1365 } 1365 }
1366 gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs); 1366 gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs);
1367 totleft -= numbytes; 1367 totleft -= numbytes;
@@ -1433,6 +1433,7 @@ static void req_timeout(unsigned long data)
1433 1433
1434 case HD_CLOSE_ATCHANNEL: 1434 case HD_CLOSE_ATCHANNEL:
1435 dev_err(bcs->cs->dev, "timeout closing AT channel\n"); 1435 dev_err(bcs->cs->dev, "timeout closing AT channel\n");
1436 error_reset(bcs->cs);
1436 break; 1437 break;
1437 1438
1438 case HD_CLOSE_B2CHANNEL: 1439 case HD_CLOSE_B2CHANNEL:
@@ -1442,6 +1443,13 @@ static void req_timeout(unsigned long data)
1442 error_reset(bcs->cs); 1443 error_reset(bcs->cs);
1443 break; 1444 break;
1444 1445
1446 case HD_RESET_INTERRUPT_PIPE:
1447 /* error recovery escalation */
1448 dev_err(bcs->cs->dev,
1449 "reset interrupt pipe timeout, attempting USB reset\n");
1450 usb_queue_reset_device(bcs->cs->hw.bas->interface);
1451 break;
1452
1445 default: 1453 default:
1446 dev_warn(bcs->cs->dev, "request 0x%02x timed out, clearing\n", 1454 dev_warn(bcs->cs->dev, "request 0x%02x timed out, clearing\n",
1447 pending); 1455 pending);
@@ -1934,6 +1942,15 @@ static int gigaset_write_cmd(struct cardstate *cs,
1934 goto notqueued; 1942 goto notqueued;
1935 } 1943 }
1936 1944
1945 /* translate "+++" escape sequence sent as a single separate command
1946 * into "close AT channel" command for error recovery
1947 * The next command will reopen the AT channel automatically.
1948 */
1949 if (len == 3 && !memcmp(buf, "+++", 3)) {
1950 rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT);
1951 goto notqueued;
1952 }
1953
1937 if (len > IF_WRITEBUF) 1954 if (len > IF_WRITEBUF)
1938 len = IF_WRITEBUF; 1955 len = IF_WRITEBUF;
1939 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { 1956 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index e4141bf8b2f3..33dcd8d72b7c 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -22,6 +22,12 @@
22#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" 22#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers"
23#define DRIVER_DESC "Driver for Gigaset 307x" 23#define DRIVER_DESC "Driver for Gigaset 307x"
24 24
25#ifdef CONFIG_GIGASET_DEBUG
26#define DRIVER_DESC_DEBUG " (debug build)"
27#else
28#define DRIVER_DESC_DEBUG ""
29#endif
30
25/* Module parameters */ 31/* Module parameters */
26int gigaset_debuglevel = DEBUG_DEFAULT; 32int gigaset_debuglevel = DEBUG_DEFAULT;
27EXPORT_SYMBOL_GPL(gigaset_debuglevel); 33EXPORT_SYMBOL_GPL(gigaset_debuglevel);
@@ -32,6 +38,17 @@ MODULE_PARM_DESC(debug, "debug level");
32#define VALID_MINOR 0x01 38#define VALID_MINOR 0x01
33#define VALID_ID 0x02 39#define VALID_ID 0x02
34 40
41/**
42 * gigaset_dbg_buffer() - dump data in ASCII and hex for debugging
43 * @level: debugging level.
44 * @msg: message prefix.
45 * @len: number of bytes to dump.
46 * @buf: data to dump.
47 *
48 * If the current debugging level includes one of the bits set in @level,
49 * @len bytes starting at @buf are logged to dmesg at KERN_DEBUG prio,
50 * prefixed by the text @msg.
51 */
35void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, 52void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
36 size_t len, const unsigned char *buf) 53 size_t len, const unsigned char *buf)
37{ 54{
@@ -274,6 +291,20 @@ static void clear_events(struct cardstate *cs)
274 spin_unlock_irqrestore(&cs->ev_lock, flags); 291 spin_unlock_irqrestore(&cs->ev_lock, flags);
275} 292}
276 293
294/**
295 * gigaset_add_event() - add event to device event queue
296 * @cs: device descriptor structure.
297 * @at_state: connection state structure.
298 * @type: event type.
299 * @ptr: pointer parameter for event.
300 * @parameter: integer parameter for event.
301 * @arg: pointer parameter for event.
302 *
303 * Allocate an event queue entry from the device's event queue, and set it up
304 * with the parameters given.
305 *
306 * Return value: added event
307 */
277struct event_t *gigaset_add_event(struct cardstate *cs, 308struct event_t *gigaset_add_event(struct cardstate *cs,
278 struct at_state_t *at_state, int type, 309 struct at_state_t *at_state, int type,
279 void *ptr, int parameter, void *arg) 310 void *ptr, int parameter, void *arg)
@@ -398,6 +429,15 @@ static void make_invalid(struct cardstate *cs, unsigned mask)
398 spin_unlock_irqrestore(&drv->lock, flags); 429 spin_unlock_irqrestore(&drv->lock, flags);
399} 430}
400 431
432/**
433 * gigaset_freecs() - free all associated ressources of a device
434 * @cs: device descriptor structure.
435 *
436 * Stops all tasklets and timers, unregisters the device from all
437 * subsystems it was registered to, deallocates the device structure
438 * @cs and all structures referenced from it.
439 * Operations on the device should be stopped before calling this.
440 */
401void gigaset_freecs(struct cardstate *cs) 441void gigaset_freecs(struct cardstate *cs)
402{ 442{
403 int i; 443 int i;
@@ -506,7 +546,12 @@ static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs,
506 inbuf->inputstate = inputstate; 546 inbuf->inputstate = inputstate;
507} 547}
508 548
509/* append received bytes to inbuf */ 549/**
550 * gigaset_fill_inbuf() - append received data to input buffer
551 * @inbuf: buffer structure.
552 * @src: received data.
553 * @numbytes: number of bytes received.
554 */
510int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, 555int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
511 unsigned numbytes) 556 unsigned numbytes)
512{ 557{
@@ -606,20 +651,22 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
606 return NULL; 651 return NULL;
607} 652}
608 653
609/* gigaset_initcs 654/**
655 * gigaset_initcs() - initialize device structure
656 * @drv: hardware driver the device belongs to
657 * @channels: number of B channels supported by device
658 * @onechannel: !=0 if B channel data and AT commands share one
659 * communication channel (M10x),
660 * ==0 if B channels have separate communication channels (base)
661 * @ignoreframes: number of frames to ignore after setting up B channel
662 * @cidmode: !=0: start in CallID mode
663 * @modulename: name of driver module for LL registration
664 *
610 * Allocate and initialize cardstate structure for Gigaset driver 665 * Allocate and initialize cardstate structure for Gigaset driver
611 * Calls hardware dependent gigaset_initcshw() function 666 * Calls hardware dependent gigaset_initcshw() function
612 * Calls B channel initialization function gigaset_initbcs() for each B channel 667 * Calls B channel initialization function gigaset_initbcs() for each B channel
613 * parameters: 668 *
614 * drv hardware driver the device belongs to 669 * Return value:
615 * channels number of B channels supported by device
616 * onechannel !=0: B channel data and AT commands share one
617 * communication channel
618 * ==0: B channels have separate communication channels
619 * ignoreframes number of frames to ignore after setting up B channel
620 * cidmode !=0: start in CallID mode
621 * modulename name of driver module (used for I4L registration)
622 * return value:
623 * pointer to cardstate structure 670 * pointer to cardstate structure
624 */ 671 */
625struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, 672struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
@@ -837,6 +884,17 @@ static void cleanup_cs(struct cardstate *cs)
837} 884}
838 885
839 886
887/**
888 * gigaset_start() - start device operations
889 * @cs: device descriptor structure.
890 *
891 * Prepares the device for use by setting up communication parameters,
892 * scheduling an EV_START event to initiate device initialization, and
893 * waiting for completion of the initialization.
894 *
895 * Return value:
896 * 1 - success, 0 - error
897 */
840int gigaset_start(struct cardstate *cs) 898int gigaset_start(struct cardstate *cs)
841{ 899{
842 unsigned long flags; 900 unsigned long flags;
@@ -879,9 +937,15 @@ error:
879} 937}
880EXPORT_SYMBOL_GPL(gigaset_start); 938EXPORT_SYMBOL_GPL(gigaset_start);
881 939
882/* gigaset_shutdown 940/**
883 * check if a device is associated to the cardstate structure and stop it 941 * gigaset_shutdown() - shut down device operations
884 * return value: 0 if ok, -1 if no device was associated 942 * @cs: device descriptor structure.
943 *
944 * Deactivates the device by scheduling an EV_SHUTDOWN event and
945 * waiting for completion of the shutdown.
946 *
947 * Return value:
948 * 0 - success, -1 - error (no device associated)
885 */ 949 */
886int gigaset_shutdown(struct cardstate *cs) 950int gigaset_shutdown(struct cardstate *cs)
887{ 951{
@@ -912,6 +976,13 @@ exit:
912} 976}
913EXPORT_SYMBOL_GPL(gigaset_shutdown); 977EXPORT_SYMBOL_GPL(gigaset_shutdown);
914 978
979/**
980 * gigaset_stop() - stop device operations
981 * @cs: device descriptor structure.
982 *
983 * Stops operations on the device by scheduling an EV_STOP event and
984 * waiting for completion of the shutdown.
985 */
915void gigaset_stop(struct cardstate *cs) 986void gigaset_stop(struct cardstate *cs)
916{ 987{
917 mutex_lock(&cs->mutex); 988 mutex_lock(&cs->mutex);
@@ -1020,6 +1091,14 @@ struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty)
1020 return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start); 1091 return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start);
1021} 1092}
1022 1093
1094/**
1095 * gigaset_freedriver() - free all associated ressources of a driver
1096 * @drv: driver descriptor structure.
1097 *
1098 * Unregisters the driver from the system and deallocates the driver
1099 * structure @drv and all structures referenced from it.
1100 * All devices should be shut down before calling this.
1101 */
1023void gigaset_freedriver(struct gigaset_driver *drv) 1102void gigaset_freedriver(struct gigaset_driver *drv)
1024{ 1103{
1025 unsigned long flags; 1104 unsigned long flags;
@@ -1035,14 +1114,16 @@ void gigaset_freedriver(struct gigaset_driver *drv)
1035} 1114}
1036EXPORT_SYMBOL_GPL(gigaset_freedriver); 1115EXPORT_SYMBOL_GPL(gigaset_freedriver);
1037 1116
1038/* gigaset_initdriver 1117/**
1118 * gigaset_initdriver() - initialize driver structure
1119 * @minor: First minor number
1120 * @minors: Number of minors this driver can handle
1121 * @procname: Name of the driver
1122 * @devname: Name of the device files (prefix without minor number)
1123 *
1039 * Allocate and initialize gigaset_driver structure. Initialize interface. 1124 * Allocate and initialize gigaset_driver structure. Initialize interface.
1040 * parameters: 1125 *
1041 * minor First minor number 1126 * Return value:
1042 * minors Number of minors this driver can handle
1043 * procname Name of the driver
1044 * devname Name of the device files (prefix without minor number)
1045 * return value:
1046 * Pointer to the gigaset_driver structure on success, NULL on failure. 1127 * Pointer to the gigaset_driver structure on success, NULL on failure.
1047 */ 1128 */
1048struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, 1129struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
@@ -1095,6 +1176,13 @@ error:
1095} 1176}
1096EXPORT_SYMBOL_GPL(gigaset_initdriver); 1177EXPORT_SYMBOL_GPL(gigaset_initdriver);
1097 1178
1179/**
1180 * gigaset_blockdriver() - block driver
1181 * @drv: driver descriptor structure.
1182 *
1183 * Prevents the driver from attaching new devices, in preparation for
1184 * deregistration.
1185 */
1098void gigaset_blockdriver(struct gigaset_driver *drv) 1186void gigaset_blockdriver(struct gigaset_driver *drv)
1099{ 1187{
1100 drv->blocked = 1; 1188 drv->blocked = 1;
@@ -1110,7 +1198,7 @@ static int __init gigaset_init_module(void)
1110 if (gigaset_debuglevel == 1) 1198 if (gigaset_debuglevel == 1)
1111 gigaset_debuglevel = DEBUG_DEFAULT; 1199 gigaset_debuglevel = DEBUG_DEFAULT;
1112 1200
1113 pr_info(DRIVER_DESC "\n"); 1201 pr_info(DRIVER_DESC DRIVER_DESC_DEBUG "\n");
1114 return 0; 1202 return 0;
1115} 1203}
1116 1204
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index 2d91049571a4..cc768caa38f5 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -207,7 +207,6 @@ struct reply_t gigaset_tab_nocid[] =
207 /* leave dle mode */ 207 /* leave dle mode */
208 {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, 208 {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
209 {RSP_OK, 201,201, -1, 202,-1}, 209 {RSP_OK, 201,201, -1, 202,-1},
210 //{RSP_ZDLE, 202,202, 0, 202, 0, {ACT_ERROR}},//DELETE
211 {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}}, 210 {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}},
212 {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}}, 211 {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}},
213 {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, 212 {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}},
@@ -265,6 +264,7 @@ struct reply_t gigaset_tab_nocid[] =
265 {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME 264 {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME
266 265
267 /* misc. */ 266 /* misc. */
267 {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} },
268 {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 268 {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
269 {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 269 {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
270 {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 270 {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
@@ -328,10 +328,9 @@ struct reply_t gigaset_tab_cid[] =
328 {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1? 328 {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1?
329 {RSP_OK, 401,401, -1, 402, 5}, 329 {RSP_OK, 401,401, -1, 402, 5},
330 {RSP_ZVLS, 402,402, 0, 403, 5}, 330 {RSP_ZVLS, 402,402, 0, 403, 5},
331 {RSP_ZSAU, 403,403,ZSAU_DISCONNECT_REQ, -1,-1, {ACT_DEBUG}}, /* if not remote hup */ 331 {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
332 //{RSP_ZSAU, 403,403,ZSAU_NULL, 401, 0, {ACT_ERROR}}, //DELETE//FIXME -> DLE0 // should we do this _before_ hanging up for base driver? 332 {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} },
333 {RSP_ZSAU, 403,403,ZSAU_NULL, 0, 0, {ACT_DISCONNECT}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver? 333 {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} },
334 {RSP_NODEV, 401,403, -1, 0, 0, {ACT_FAKEHUP}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver?
335 {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}}, 334 {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}},
336 {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}}, 335 {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}},
337 336
@@ -474,8 +473,13 @@ static int cid_of_response(char *s)
474 //FIXME is ;<digit>+ at end of non-CID response really impossible? 473 //FIXME is ;<digit>+ at end of non-CID response really impossible?
475} 474}
476 475
477/* This function will be called via task queue from the callback handler. 476/**
478 * We received a modem response and have to handle it.. 477 * gigaset_handle_modem_response() - process received modem response
478 * @cs: device descriptor structure.
479 *
480 * Called by asyncdata/isocdata if a block of data received from the
481 * device must be processed as a modem command response. The data is
482 * already in the cs structure.
479 */ 483 */
480void gigaset_handle_modem_response(struct cardstate *cs) 484void gigaset_handle_modem_response(struct cardstate *cs)
481{ 485{
@@ -707,6 +711,11 @@ static void disconnect(struct at_state_t **at_state_p)
707 if (bcs) { 711 if (bcs) {
708 /* B channel assigned: invoke hardware specific handler */ 712 /* B channel assigned: invoke hardware specific handler */
709 cs->ops->close_bchannel(bcs); 713 cs->ops->close_bchannel(bcs);
714 /* notify LL */
715 if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
716 bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
717 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP);
718 }
710 } else { 719 } else {
711 /* no B channel assigned: just deallocate */ 720 /* no B channel assigned: just deallocate */
712 spin_lock_irqsave(&cs->lock, flags); 721 spin_lock_irqsave(&cs->lock, flags);
@@ -1429,11 +1438,12 @@ static void do_action(int action, struct cardstate *cs,
1429 cs->gotfwver = -1; 1438 cs->gotfwver = -1;
1430 dev_err(cs->dev, "could not read firmware version.\n"); 1439 dev_err(cs->dev, "could not read firmware version.\n");
1431 break; 1440 break;
1432#ifdef CONFIG_GIGASET_DEBUG
1433 case ACT_ERROR: 1441 case ACT_ERROR:
1434 *p_genresp = 1; 1442 gig_dbg(DEBUG_ANY, "%s: ERROR response in ConState %d",
1435 *p_resp_code = RSP_ERROR; 1443 __func__, at_state->ConState);
1444 cs->cur_at_seq = SEQ_NONE;
1436 break; 1445 break;
1446#ifdef CONFIG_GIGASET_DEBUG
1437 case ACT_TEST: 1447 case ACT_TEST:
1438 { 1448 {
1439 static int count = 3; //2; //1; 1449 static int count = 3; //2; //1;
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 9b22f9cf2f33..654489d836cd 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -51,6 +51,12 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
51 return -ENODEV; 51 return -ENODEV;
52 } 52 }
53 bcs = &cs->bcs[channel]; 53 bcs = &cs->bcs[channel];
54
55 /* can only handle linear sk_buffs */
56 if (skb_linearize(skb) < 0) {
57 dev_err(cs->dev, "%s: skb_linearize failed\n", __func__);
58 return -ENOMEM;
59 }
54 len = skb->len; 60 len = skb->len;
55 61
56 gig_dbg(DEBUG_LLDATA, 62 gig_dbg(DEBUG_LLDATA,
@@ -79,6 +85,14 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
79 return cs->ops->send_skb(bcs, skb); 85 return cs->ops->send_skb(bcs, skb);
80} 86}
81 87
88/**
89 * gigaset_skb_sent() - acknowledge sending an skb
90 * @bcs: B channel descriptor structure.
91 * @skb: sent data.
92 *
93 * Called by hardware module {bas,ser,usb}_gigaset when the data in a
94 * skb has been successfully sent, for signalling completion to the LL.
95 */
82void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb) 96void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
83{ 97{
84 unsigned len; 98 unsigned len;
@@ -455,6 +469,15 @@ int gigaset_isdn_setup_accept(struct at_state_t *at_state)
455 return 0; 469 return 0;
456} 470}
457 471
472/**
473 * gigaset_isdn_icall() - signal incoming call
474 * @at_state: connection state structure.
475 *
476 * Called by main module to notify the LL that an incoming call has been
477 * received. @at_state contains the parameters of the call.
478 *
479 * Return value: call disposition (ICALL_*)
480 */
458int gigaset_isdn_icall(struct at_state_t *at_state) 481int gigaset_isdn_icall(struct at_state_t *at_state)
459{ 482{
460 struct cardstate *cs = at_state->cs; 483 struct cardstate *cs = at_state->cs;
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index f33ac27de643..6a8e1384e7bd 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -616,6 +616,15 @@ void gigaset_if_free(struct cardstate *cs)
616 tty_unregister_device(drv->tty, cs->minor_index); 616 tty_unregister_device(drv->tty, cs->minor_index);
617} 617}
618 618
619/**
620 * gigaset_if_receive() - pass a received block of data to the tty device
621 * @cs: device descriptor structure.
622 * @buffer: received data.
623 * @len: number of bytes received.
624 *
625 * Called by asyncdata/isocdata if a block of data received from the
626 * device must be sent to userspace through the ttyG* device.
627 */
619void gigaset_if_receive(struct cardstate *cs, 628void gigaset_if_receive(struct cardstate *cs,
620 unsigned char *buffer, size_t len) 629 unsigned char *buffer, size_t len)
621{ 630{
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index bed38fcc432b..9f3ef7b4248c 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -429,7 +429,7 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
429 return -EAGAIN; 429 return -EAGAIN;
430 } 430 }
431 431
432 dump_bytes(DEBUG_STREAM, "snd data", in, count); 432 dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count);
433 433
434 /* bitstuff and checksum input data */ 434 /* bitstuff and checksum input data */
435 fcs = PPP_INITFCS; 435 fcs = PPP_INITFCS;
@@ -448,7 +448,6 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
448 /* put closing flag and repeat byte for flag idle */ 448 /* put closing flag and repeat byte for flag idle */
449 isowbuf_putflag(iwb); 449 isowbuf_putflag(iwb);
450 end = isowbuf_donewrite(iwb); 450 end = isowbuf_donewrite(iwb);
451 dump_bytes(DEBUG_STREAM_DUMP, "isowbuf", iwb->data, end + 1);
452 return end; 451 return end;
453} 452}
454 453
@@ -482,6 +481,8 @@ static inline int trans_buildframe(struct isowbuf_t *iwb,
482 } 481 }
483 482
484 gig_dbg(DEBUG_STREAM, "put %d bytes", count); 483 gig_dbg(DEBUG_STREAM, "put %d bytes", count);
484 dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count);
485
485 write = iwb->write; 486 write = iwb->write;
486 do { 487 do {
487 c = bitrev8(*in++); 488 c = bitrev8(*in++);
@@ -583,7 +584,7 @@ static inline void hdlc_done(struct bc_state *bcs)
583 procskb->tail -= 2; 584 procskb->tail -= 2;
584 gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", 585 gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)",
585 __func__, procskb->len); 586 __func__, procskb->len);
586 dump_bytes(DEBUG_STREAM, 587 dump_bytes(DEBUG_STREAM_DUMP,
587 "rcv data", procskb->data, procskb->len); 588 "rcv data", procskb->data, procskb->len);
588 bcs->hw.bas->goodbytes += procskb->len; 589 bcs->hw.bas->goodbytes += procskb->len;
589 gigaset_rcv_skb(procskb, bcs->cs, bcs); 590 gigaset_rcv_skb(procskb, bcs->cs, bcs);
@@ -878,6 +879,8 @@ static inline void trans_receive(unsigned char *src, unsigned count,
878 dobytes--; 879 dobytes--;
879 } 880 }
880 if (dobytes == 0) { 881 if (dobytes == 0) {
882 dump_bytes(DEBUG_STREAM_DUMP,
883 "rcv data", skb->data, skb->len);
881 gigaset_rcv_skb(skb, bcs->cs, bcs); 884 gigaset_rcv_skb(skb, bcs->cs, bcs);
882 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); 885 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN);
883 if (!skb) { 886 if (!skb) {
@@ -973,16 +976,17 @@ void gigaset_isoc_input(struct inbuf_t *inbuf)
973 976
974/* == data output ========================================================== */ 977/* == data output ========================================================== */
975 978
976/* gigaset_send_skb 979/**
977 * called by common.c to queue an skb for sending 980 * gigaset_isoc_send_skb() - queue an skb for sending
978 * and start transmission if necessary 981 * @bcs: B channel descriptor structure.
979 * parameters: 982 * @skb: data to send.
980 * B Channel control structure 983 *
981 * skb 984 * Called by i4l.c to queue an skb for sending, and start transmission if
982 * return value: 985 * necessary.
983 * number of bytes accepted for sending 986 *
984 * (skb->len if ok, 0 if out of buffer space) 987 * Return value:
985 * or error code (< 0, eg. -EINVAL) 988 * number of bytes accepted for sending (skb->len) if ok,
989 * error code < 0 (eg. -ENODEV) on error
986 */ 990 */
987int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb) 991int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb)
988{ 992{
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index e1035c895808..f85dcd536508 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -29,6 +29,8 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
29 unsigned i, nr_strings; 29 unsigned i, nr_strings;
30 char **buffer, *string; 30 char **buffer, *string;
31 31
32 /* Find all null-terminated (including zero length) strings in
33 the TPLLV1_INFO field. Trailing garbage is ignored. */
32 buf += 2; 34 buf += 2;
33 size -= 2; 35 size -= 2;
34 36
@@ -39,11 +41,8 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
39 if (buf[i] == 0) 41 if (buf[i] == 0)
40 nr_strings++; 42 nr_strings++;
41 } 43 }
42 44 if (nr_strings == 0)
43 if (nr_strings < 4) {
44 printk(KERN_WARNING "SDIO: ignoring broken CISTPL_VERS_1\n");
45 return 0; 45 return 0;
46 }
47 46
48 size = i; 47 size = i;
49 48
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index fdf5937233fc..04f63c77071d 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -721,7 +721,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status)
721 ps->rx_errors++; 721 ps->rx_errors++;
722 if (status & RX_MISSED_FRAME) 722 if (status & RX_MISSED_FRAME)
723 ps->rx_missed_errors++; 723 ps->rx_missed_errors++;
724 if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR)) 724 if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
725 ps->rx_length_errors++; 725 ps->rx_length_errors++;
726 if (status & RX_CRC_ERROR) 726 if (status & RX_CRC_ERROR)
727 ps->rx_crc_errors++; 727 ps->rx_crc_errors++;
@@ -794,8 +794,6 @@ static int au1000_rx(struct net_device *dev)
794 printk("rx len error\n"); 794 printk("rx len error\n");
795 if (status & RX_U_CNTRL_FRAME) 795 if (status & RX_U_CNTRL_FRAME)
796 printk("rx u control frame\n"); 796 printk("rx u control frame\n");
797 if (status & RX_MISSED_FRAME)
798 printk("rx miss\n");
799 } 797 }
800 } 798 }
801 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); 799 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 79d35d122c08..89876ade5e33 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1129,7 +1129,6 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1129 spin_lock_bh(&adapter->mcc_lock); 1129 spin_lock_bh(&adapter->mcc_lock);
1130 1130
1131 wrb = wrb_from_mccq(adapter); 1131 wrb = wrb_from_mccq(adapter);
1132 req = embedded_payload(wrb);
1133 sge = nonembedded_sgl(wrb); 1132 sge = nonembedded_sgl(wrb);
1134 1133
1135 be_wrb_hdr_prepare(wrb, cmd->size, false, 1); 1134 be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 8b4c2cb9ad62..a86f917f85f4 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -62,7 +62,7 @@ enum {
62 MCC_STATUS_QUEUE_FLUSHING = 0x4, 62 MCC_STATUS_QUEUE_FLUSHING = 0x4,
63/* The command is completing with a DMA error */ 63/* The command is completing with a DMA error */
64 MCC_STATUS_DMA_FAILED = 0x5, 64 MCC_STATUS_DMA_FAILED = 0x5,
65 MCC_STATUS_NOT_SUPPORTED = 0x66 65 MCC_STATUS_NOT_SUPPORTED = 66
66}; 66};
67 67
68#define CQE_STATUS_COMPL_MASK 0xFFFF 68#define CQE_STATUS_COMPL_MASK 0xFFFF
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 11445df3dbc0..cda5bf2fc50a 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -358,7 +358,7 @@ const struct ethtool_ops be_ethtool_ops = {
358 .get_rx_csum = be_get_rx_csum, 358 .get_rx_csum = be_get_rx_csum,
359 .set_rx_csum = be_set_rx_csum, 359 .set_rx_csum = be_set_rx_csum,
360 .get_tx_csum = ethtool_op_get_tx_csum, 360 .get_tx_csum = ethtool_op_get_tx_csum,
361 .set_tx_csum = ethtool_op_set_tx_csum, 361 .set_tx_csum = ethtool_op_set_tx_hw_csum,
362 .get_sg = ethtool_op_get_sg, 362 .get_sg = ethtool_op_get_sg,
363 .set_sg = ethtool_op_set_sg, 363 .set_sg = ethtool_op_set_sg,
364 .get_tso = ethtool_op_get_tso, 364 .get_tso = ethtool_op_get_tso,
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 2f9b50156e0c..6d5e81f7046f 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -197,7 +197,7 @@ void netdev_stats_update(struct be_adapter *adapter)
197 /* no space available in linux */ 197 /* no space available in linux */
198 dev_stats->tx_dropped = 0; 198 dev_stats->tx_dropped = 0;
199 199
200 dev_stats->multicast = port_stats->tx_multicastframes; 200 dev_stats->multicast = port_stats->rx_multicast_frames;
201 dev_stats->collisions = 0; 201 dev_stats->collisions = 0;
202 202
203 /* detailed tx_errors */ 203 /* detailed tx_errors */
@@ -1899,8 +1899,8 @@ static void be_netdev_init(struct net_device *netdev)
1899 struct be_adapter *adapter = netdev_priv(netdev); 1899 struct be_adapter *adapter = netdev_priv(netdev);
1900 1900
1901 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | 1901 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
1902 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | 1902 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
1903 NETIF_F_IPV6_CSUM | NETIF_F_GRO; 1903 NETIF_F_GRO;
1904 1904
1905 netdev->flags |= IFF_MULTICAST; 1905 netdev->flags |= IFF_MULTICAST;
1906 1906
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index b53b40ba88a8..d1e0563a67df 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1803,7 +1803,7 @@ struct e1000_info e1000_82574_info = {
1803 | FLAG_HAS_AMT 1803 | FLAG_HAS_AMT
1804 | FLAG_HAS_CTRLEXT_ON_LOAD, 1804 | FLAG_HAS_CTRLEXT_ON_LOAD,
1805 .pba = 20, 1805 .pba = 20,
1806 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, 1806 .max_hw_frame_size = DEFAULT_JUMBO,
1807 .get_variants = e1000_get_variants_82571, 1807 .get_variants = e1000_get_variants_82571,
1808 .mac_ops = &e82571_mac_ops, 1808 .mac_ops = &e82571_mac_ops,
1809 .phy_ops = &e82_phy_ops_bm, 1809 .phy_ops = &e82_phy_ops_bm,
@@ -1820,7 +1820,7 @@ struct e1000_info e1000_82583_info = {
1820 | FLAG_HAS_AMT 1820 | FLAG_HAS_AMT
1821 | FLAG_HAS_CTRLEXT_ON_LOAD, 1821 | FLAG_HAS_CTRLEXT_ON_LOAD,
1822 .pba = 20, 1822 .pba = 20,
1823 .max_hw_frame_size = DEFAULT_JUMBO, 1823 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
1824 .get_variants = e1000_get_variants_82571, 1824 .get_variants = e1000_get_variants_82571,
1825 .mac_ops = &e82571_mac_ops, 1825 .mac_ops = &e82571_mac_ops,
1826 .phy_ops = &e82_phy_ops_bm, 1826 .phy_ops = &e82_phy_ops_bm,
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index b7311bc00258..34d0c69e67f7 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -19,6 +19,10 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <net/ethoc.h> 20#include <net/ethoc.h>
21 21
22static int buffer_size = 0x8000; /* 32 KBytes */
23module_param(buffer_size, int, 0);
24MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
25
22/* register offsets */ 26/* register offsets */
23#define MODER 0x00 27#define MODER 0x00
24#define INT_SOURCE 0x04 28#define INT_SOURCE 0x04
@@ -167,6 +171,7 @@
167 * struct ethoc - driver-private device structure 171 * struct ethoc - driver-private device structure
168 * @iobase: pointer to I/O memory region 172 * @iobase: pointer to I/O memory region
169 * @membase: pointer to buffer memory region 173 * @membase: pointer to buffer memory region
174 * @dma_alloc: dma allocated buffer size
170 * @num_tx: number of send buffers 175 * @num_tx: number of send buffers
171 * @cur_tx: last send buffer written 176 * @cur_tx: last send buffer written
172 * @dty_tx: last buffer actually sent 177 * @dty_tx: last buffer actually sent
@@ -185,6 +190,7 @@
185struct ethoc { 190struct ethoc {
186 void __iomem *iobase; 191 void __iomem *iobase;
187 void __iomem *membase; 192 void __iomem *membase;
193 int dma_alloc;
188 194
189 unsigned int num_tx; 195 unsigned int num_tx;
190 unsigned int cur_tx; 196 unsigned int cur_tx;
@@ -284,7 +290,7 @@ static int ethoc_init_ring(struct ethoc *dev)
284 dev->cur_rx = 0; 290 dev->cur_rx = 0;
285 291
286 /* setup transmission buffers */ 292 /* setup transmission buffers */
287 bd.addr = 0; 293 bd.addr = virt_to_phys(dev->membase);
288 bd.stat = TX_BD_IRQ | TX_BD_CRC; 294 bd.stat = TX_BD_IRQ | TX_BD_CRC;
289 295
290 for (i = 0; i < dev->num_tx; i++) { 296 for (i = 0; i < dev->num_tx; i++) {
@@ -295,7 +301,6 @@ static int ethoc_init_ring(struct ethoc *dev)
295 bd.addr += ETHOC_BUFSIZ; 301 bd.addr += ETHOC_BUFSIZ;
296 } 302 }
297 303
298 bd.addr = dev->num_tx * ETHOC_BUFSIZ;
299 bd.stat = RX_BD_EMPTY | RX_BD_IRQ; 304 bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
300 305
301 for (i = 0; i < dev->num_rx; i++) { 306 for (i = 0; i < dev->num_rx; i++) {
@@ -400,8 +405,12 @@ static int ethoc_rx(struct net_device *dev, int limit)
400 if (ethoc_update_rx_stats(priv, &bd) == 0) { 405 if (ethoc_update_rx_stats(priv, &bd) == 0) {
401 int size = bd.stat >> 16; 406 int size = bd.stat >> 16;
402 struct sk_buff *skb = netdev_alloc_skb(dev, size); 407 struct sk_buff *skb = netdev_alloc_skb(dev, size);
408
409 size -= 4; /* strip the CRC */
410 skb_reserve(skb, 2); /* align TCP/IP header */
411
403 if (likely(skb)) { 412 if (likely(skb)) {
404 void *src = priv->membase + bd.addr; 413 void *src = phys_to_virt(bd.addr);
405 memcpy_fromio(skb_put(skb, size), src, size); 414 memcpy_fromio(skb_put(skb, size), src, size);
406 skb->protocol = eth_type_trans(skb, dev); 415 skb->protocol = eth_type_trans(skb, dev);
407 priv->stats.rx_packets++; 416 priv->stats.rx_packets++;
@@ -653,9 +662,9 @@ static int ethoc_open(struct net_device *dev)
653 if (ret) 662 if (ret)
654 return ret; 663 return ret;
655 664
656 /* calculate the number of TX/RX buffers */ 665 /* calculate the number of TX/RX buffers, maximum 128 supported */
657 num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ; 666 num_bd = min(128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
658 priv->num_tx = min(min_tx, num_bd / 4); 667 priv->num_tx = max(min_tx, num_bd / 4);
659 priv->num_rx = num_bd - priv->num_tx; 668 priv->num_rx = num_bd - priv->num_tx;
660 ethoc_write(priv, TX_BD_NUM, priv->num_tx); 669 ethoc_write(priv, TX_BD_NUM, priv->num_tx);
661 670
@@ -823,7 +832,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
823 else 832 else
824 bd.stat &= ~TX_BD_PAD; 833 bd.stat &= ~TX_BD_PAD;
825 834
826 dest = priv->membase + bd.addr; 835 dest = phys_to_virt(bd.addr);
827 memcpy_toio(dest, skb->data, skb->len); 836 memcpy_toio(dest, skb->data, skb->len);
828 837
829 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); 838 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
@@ -903,22 +912,19 @@ static int ethoc_probe(struct platform_device *pdev)
903 912
904 /* obtain buffer memory space */ 913 /* obtain buffer memory space */
905 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 914 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
906 if (!res) { 915 if (res) {
907 dev_err(&pdev->dev, "cannot obtain memory space\n"); 916 mem = devm_request_mem_region(&pdev->dev, res->start,
908 ret = -ENXIO;
909 goto free;
910 }
911
912 mem = devm_request_mem_region(&pdev->dev, res->start,
913 res->end - res->start + 1, res->name); 917 res->end - res->start + 1, res->name);
914 if (!mem) { 918 if (!mem) {
915 dev_err(&pdev->dev, "cannot request memory space\n"); 919 dev_err(&pdev->dev, "cannot request memory space\n");
916 ret = -ENXIO; 920 ret = -ENXIO;
917 goto free; 921 goto free;
922 }
923
924 netdev->mem_start = mem->start;
925 netdev->mem_end = mem->end;
918 } 926 }
919 927
920 netdev->mem_start = mem->start;
921 netdev->mem_end = mem->end;
922 928
923 /* obtain device IRQ number */ 929 /* obtain device IRQ number */
924 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 930 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -933,6 +939,7 @@ static int ethoc_probe(struct platform_device *pdev)
933 /* setup driver-private data */ 939 /* setup driver-private data */
934 priv = netdev_priv(netdev); 940 priv = netdev_priv(netdev);
935 priv->netdev = netdev; 941 priv->netdev = netdev;
942 priv->dma_alloc = 0;
936 943
937 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, 944 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
938 mmio->end - mmio->start + 1); 945 mmio->end - mmio->start + 1);
@@ -942,12 +949,27 @@ static int ethoc_probe(struct platform_device *pdev)
942 goto error; 949 goto error;
943 } 950 }
944 951
945 priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start, 952 if (netdev->mem_end) {
946 mem->end - mem->start + 1); 953 priv->membase = devm_ioremap_nocache(&pdev->dev,
947 if (!priv->membase) { 954 netdev->mem_start, mem->end - mem->start + 1);
948 dev_err(&pdev->dev, "cannot remap memory space\n"); 955 if (!priv->membase) {
949 ret = -ENXIO; 956 dev_err(&pdev->dev, "cannot remap memory space\n");
950 goto error; 957 ret = -ENXIO;
958 goto error;
959 }
960 } else {
961 /* Allocate buffer memory */
962 priv->membase = dma_alloc_coherent(NULL,
963 buffer_size, (void *)&netdev->mem_start,
964 GFP_KERNEL);
965 if (!priv->membase) {
966 dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
967 buffer_size);
968 ret = -ENOMEM;
969 goto error;
970 }
971 netdev->mem_end = netdev->mem_start + buffer_size;
972 priv->dma_alloc = buffer_size;
951 } 973 }
952 974
953 /* Allow the platform setup code to pass in a MAC address. */ 975 /* Allow the platform setup code to pass in a MAC address. */
@@ -1034,6 +1056,9 @@ free_mdio:
1034 kfree(priv->mdio->irq); 1056 kfree(priv->mdio->irq);
1035 mdiobus_free(priv->mdio); 1057 mdiobus_free(priv->mdio);
1036free: 1058free:
1059 if (priv->dma_alloc)
1060 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1061 netdev->mem_start);
1037 free_netdev(netdev); 1062 free_netdev(netdev);
1038out: 1063out:
1039 return ret; 1064 return ret;
@@ -1059,7 +1084,9 @@ static int ethoc_remove(struct platform_device *pdev)
1059 kfree(priv->mdio->irq); 1084 kfree(priv->mdio->irq);
1060 mdiobus_free(priv->mdio); 1085 mdiobus_free(priv->mdio);
1061 } 1086 }
1062 1087 if (priv->dma_alloc)
1088 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1089 netdev->mem_start);
1063 unregister_netdev(netdev); 1090 unregister_netdev(netdev);
1064 free_netdev(netdev); 1091 free_netdev(netdev);
1065 } 1092 }
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 2ec58dcdb82b..34b04924c8a1 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -330,6 +330,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
330 330
331 switch (hw->device_id) { 331 switch (hw->device_id) {
332 case IXGBE_DEV_ID_82599_KX4: 332 case IXGBE_DEV_ID_82599_KX4:
333 case IXGBE_DEV_ID_82599_KX4_MEZZ:
334 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
333 case IXGBE_DEV_ID_82599_XAUI_LOM: 335 case IXGBE_DEV_ID_82599_XAUI_LOM:
334 /* Default device ID is mezzanine card KX/KX4 */ 336 /* Default device ID is mezzanine card KX/KX4 */
335 media_type = ixgbe_media_type_backplane; 337 media_type = ixgbe_media_type_backplane;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 28fbb9d281f9..cbb143ca1eb8 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -97,8 +97,12 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
97 board_82599 }, 97 board_82599 },
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), 98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
99 board_82599 }, 99 board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
101 board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
101 board_82599 }, 103 board_82599 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
105 board_82599 },
102 106
103 /* required last entry */ 107 /* required last entry */
104 {0, } 108 {0, }
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 7c93e923bf2e..ef4bdd58e016 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -49,9 +49,11 @@
49#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 49#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
50#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 50#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
51#define IXGBE_DEV_ID_82599_KX4 0x10F7 51#define IXGBE_DEV_ID_82599_KX4 0x10F7
52#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
52#define IXGBE_DEV_ID_82599_CX4 0x10F9 53#define IXGBE_DEV_ID_82599_CX4 0x10F9
53#define IXGBE_DEV_ID_82599_SFP 0x10FB 54#define IXGBE_DEV_ID_82599_SFP 0x10FB
54#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC 55#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
56#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
55 57
56/* General Registers */ 58/* General Registers */
57#define IXGBE_CTRL 0x00000 59#define IXGBE_CTRL 0x00000
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index b5aa974827e5..9b9eab107704 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1714,7 +1714,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1714 /* 4 fragments per cmd des */ 1714 /* 4 fragments per cmd des */
1715 no_of_desc = (frag_count + 3) >> 2; 1715 no_of_desc = (frag_count + 3) >> 2;
1716 1716
1717 if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) { 1717 if (unlikely(no_of_desc + 2 > netxen_tx_avail(tx_ring))) {
1718 netif_stop_queue(netdev); 1718 netif_stop_queue(netdev);
1719 return NETDEV_TX_BUSY; 1719 return NETDEV_TX_BUSY;
1720 } 1720 }
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c
index 064a4fe1dd90..28a86224879d 100644
--- a/drivers/net/pasemi_mac_ethtool.c
+++ b/drivers/net/pasemi_mac_ethtool.c
@@ -71,6 +71,9 @@ pasemi_mac_ethtool_get_settings(struct net_device *netdev,
71 struct pasemi_mac *mac = netdev_priv(netdev); 71 struct pasemi_mac *mac = netdev_priv(netdev);
72 struct phy_device *phydev = mac->phydev; 72 struct phy_device *phydev = mac->phydev;
73 73
74 if (!phydev)
75 return -EOPNOTSUPP;
76
74 return phy_ethtool_gset(phydev, cmd); 77 return phy_ethtool_gset(phydev, cmd);
75} 78}
76 79
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 474876c879cb..bd3447f04902 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1754,14 +1754,14 @@ static struct pcmcia_device_id pcnet_ids[] = {
1754 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), 1754 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"),
1755 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), 1755 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"),
1756 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), 1756 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"),
1757 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"), 1757 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
1758 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"), 1758 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
1759 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "DP83903.cis"), 1759 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
1760 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), 1760 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
1761 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), 1761 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
1762 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"), 1762 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
1763 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), 1763 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"),
1764 PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"), 1764 PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"),
1765 PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), 1765 PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b),
1766 PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", 1766 PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0",
1767 0xb4be14e3, 0x43ac239b, 0x0877b627), 1767 0xb4be14e3, 0x43ac239b, 0x0877b627),
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 30d5585beeee..3ec6e85587a2 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -9,6 +9,7 @@
9 9
10#include <linux/pci.h> 10#include <linux/pci.h>
11#include <linux/netdevice.h> 11#include <linux/netdevice.h>
12#include <linux/rtnetlink.h>
12 13
13/* 14/*
14 * General definitions... 15 * General definitions...
@@ -135,9 +136,9 @@ enum {
135 RST_FO_TFO = (1 << 0), 136 RST_FO_TFO = (1 << 0),
136 RST_FO_RR_MASK = 0x00060000, 137 RST_FO_RR_MASK = 0x00060000,
137 RST_FO_RR_CQ_CAM = 0x00000000, 138 RST_FO_RR_CQ_CAM = 0x00000000,
138 RST_FO_RR_DROP = 0x00000001, 139 RST_FO_RR_DROP = 0x00000002,
139 RST_FO_RR_DQ = 0x00000002, 140 RST_FO_RR_DQ = 0x00000004,
140 RST_FO_RR_RCV_FUNC_CQ = 0x00000003, 141 RST_FO_RR_RCV_FUNC_CQ = 0x00000006,
141 RST_FO_FRB = (1 << 12), 142 RST_FO_FRB = (1 << 12),
142 RST_FO_MOP = (1 << 13), 143 RST_FO_MOP = (1 << 13),
143 RST_FO_REG = (1 << 14), 144 RST_FO_REG = (1 << 14),
@@ -1477,7 +1478,6 @@ struct ql_adapter {
1477 u32 mailbox_in; 1478 u32 mailbox_in;
1478 u32 mailbox_out; 1479 u32 mailbox_out;
1479 struct mbox_params idc_mbc; 1480 struct mbox_params idc_mbc;
1480 struct mutex mpi_mutex;
1481 1481
1482 int tx_ring_size; 1482 int tx_ring_size;
1483 int rx_ring_size; 1483 int rx_ring_size;
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 68f9bd280f86..52073946bce3 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -45,7 +45,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
45 if (!netif_running(qdev->ndev)) 45 if (!netif_running(qdev->ndev))
46 return status; 46 return status;
47 47
48 spin_lock(&qdev->hw_lock);
49 /* Skip the default queue, and update the outbound handler 48 /* Skip the default queue, and update the outbound handler
50 * queues if they changed. 49 * queues if they changed.
51 */ 50 */
@@ -92,7 +91,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
92 } 91 }
93 } 92 }
94exit: 93exit:
95 spin_unlock(&qdev->hw_lock);
96 return status; 94 return status;
97} 95}
98 96
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 3d0efea32111..61680715cde0 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -34,7 +34,6 @@
34#include <linux/etherdevice.h> 34#include <linux/etherdevice.h>
35#include <linux/ethtool.h> 35#include <linux/ethtool.h>
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/rtnetlink.h>
38#include <linux/if_vlan.h> 37#include <linux/if_vlan.h>
39#include <linux/delay.h> 38#include <linux/delay.h>
40#include <linux/mm.h> 39#include <linux/mm.h>
@@ -1926,12 +1925,10 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1926 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 1925 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1927 if (status) 1926 if (status)
1928 return; 1927 return;
1929 spin_lock(&qdev->hw_lock);
1930 if (ql_set_mac_addr_reg 1928 if (ql_set_mac_addr_reg
1931 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { 1929 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1932 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); 1930 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1933 } 1931 }
1934 spin_unlock(&qdev->hw_lock);
1935 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 1932 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1936} 1933}
1937 1934
@@ -1945,12 +1942,10 @@ static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1945 if (status) 1942 if (status)
1946 return; 1943 return;
1947 1944
1948 spin_lock(&qdev->hw_lock);
1949 if (ql_set_mac_addr_reg 1945 if (ql_set_mac_addr_reg
1950 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { 1946 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1951 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); 1947 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1952 } 1948 }
1953 spin_unlock(&qdev->hw_lock);
1954 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 1949 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1955 1950
1956} 1951}
@@ -2001,15 +1996,17 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2001 /* 1996 /*
2002 * Check MPI processor activity. 1997 * Check MPI processor activity.
2003 */ 1998 */
2004 if (var & STS_PI) { 1999 if ((var & STS_PI) &&
2000 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2005 /* 2001 /*
2006 * We've got an async event or mailbox completion. 2002 * We've got an async event or mailbox completion.
2007 * Handle it and clear the source of the interrupt. 2003 * Handle it and clear the source of the interrupt.
2008 */ 2004 */
2009 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); 2005 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2010 ql_disable_completion_interrupt(qdev, intr_context->intr); 2006 ql_disable_completion_interrupt(qdev, intr_context->intr);
2011 queue_delayed_work_on(smp_processor_id(), qdev->workqueue, 2007 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2012 &qdev->mpi_work, 0); 2008 queue_delayed_work_on(smp_processor_id(),
2009 qdev->workqueue, &qdev->mpi_work, 0);
2013 work_done++; 2010 work_done++;
2014 } 2011 }
2015 2012
@@ -3585,7 +3582,6 @@ static void qlge_set_multicast_list(struct net_device *ndev)
3585 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 3582 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3586 if (status) 3583 if (status)
3587 return; 3584 return;
3588 spin_lock(&qdev->hw_lock);
3589 /* 3585 /*
3590 * Set or clear promiscuous mode if a 3586 * Set or clear promiscuous mode if a
3591 * transition is taking place. 3587 * transition is taking place.
@@ -3662,7 +3658,6 @@ static void qlge_set_multicast_list(struct net_device *ndev)
3662 } 3658 }
3663 } 3659 }
3664exit: 3660exit:
3665 spin_unlock(&qdev->hw_lock);
3666 ql_sem_unlock(qdev, SEM_RT_IDX_MASK); 3661 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3667} 3662}
3668 3663
@@ -3682,10 +3677,8 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
3682 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 3677 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3683 if (status) 3678 if (status)
3684 return status; 3679 return status;
3685 spin_lock(&qdev->hw_lock);
3686 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, 3680 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3687 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); 3681 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
3688 spin_unlock(&qdev->hw_lock);
3689 if (status) 3682 if (status)
3690 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); 3683 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3691 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 3684 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
@@ -3928,7 +3921,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3928 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); 3921 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3929 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); 3922 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
3930 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); 3923 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
3931 mutex_init(&qdev->mpi_mutex);
3932 init_completion(&qdev->ide_completion); 3924 init_completion(&qdev->ide_completion);
3933 3925
3934 if (!cards_found) { 3926 if (!cards_found) {
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 6685bd97da91..c2e43073047e 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -472,7 +472,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
472{ 472{
473 int status, count; 473 int status, count;
474 474
475 mutex_lock(&qdev->mpi_mutex);
476 475
477 /* Begin polled mode for MPI */ 476 /* Begin polled mode for MPI */
478 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 477 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
@@ -541,7 +540,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
541 status = -EIO; 540 status = -EIO;
542 } 541 }
543end: 542end:
544 mutex_unlock(&qdev->mpi_mutex);
545 /* End polled mode for MPI */ 543 /* End polled mode for MPI */
546 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 544 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
547 return status; 545 return status;
@@ -776,7 +774,9 @@ static int ql_idc_wait(struct ql_adapter *qdev)
776static int ql_set_port_cfg(struct ql_adapter *qdev) 774static int ql_set_port_cfg(struct ql_adapter *qdev)
777{ 775{
778 int status; 776 int status;
777 rtnl_lock();
779 status = ql_mb_set_port_cfg(qdev); 778 status = ql_mb_set_port_cfg(qdev);
779 rtnl_unlock();
780 if (status) 780 if (status)
781 return status; 781 return status;
782 status = ql_idc_wait(qdev); 782 status = ql_idc_wait(qdev);
@@ -797,7 +797,9 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
797 container_of(work, struct ql_adapter, mpi_port_cfg_work.work); 797 container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
798 int status; 798 int status;
799 799
800 rtnl_lock();
800 status = ql_mb_get_port_cfg(qdev); 801 status = ql_mb_get_port_cfg(qdev);
802 rtnl_unlock();
801 if (status) { 803 if (status) {
802 QPRINTK(qdev, DRV, ERR, 804 QPRINTK(qdev, DRV, ERR,
803 "Bug: Failed to get port config data.\n"); 805 "Bug: Failed to get port config data.\n");
@@ -855,7 +857,9 @@ void ql_mpi_idc_work(struct work_struct *work)
855 * needs to be set. 857 * needs to be set.
856 * */ 858 * */
857 set_bit(QL_CAM_RT_SET, &qdev->flags); 859 set_bit(QL_CAM_RT_SET, &qdev->flags);
860 rtnl_lock();
858 status = ql_mb_idc_ack(qdev); 861 status = ql_mb_idc_ack(qdev);
862 rtnl_unlock();
859 if (status) { 863 if (status) {
860 QPRINTK(qdev, DRV, ERR, 864 QPRINTK(qdev, DRV, ERR,
861 "Bug: No pending IDC!\n"); 865 "Bug: No pending IDC!\n");
@@ -871,7 +875,7 @@ void ql_mpi_work(struct work_struct *work)
871 struct mbox_params *mbcp = &mbc; 875 struct mbox_params *mbcp = &mbc;
872 int err = 0; 876 int err = 0;
873 877
874 mutex_lock(&qdev->mpi_mutex); 878 rtnl_lock();
875 879
876 while (ql_read32(qdev, STS) & STS_PI) { 880 while (ql_read32(qdev, STS) & STS_PI) {
877 memset(mbcp, 0, sizeof(struct mbox_params)); 881 memset(mbcp, 0, sizeof(struct mbox_params));
@@ -884,7 +888,7 @@ void ql_mpi_work(struct work_struct *work)
884 break; 888 break;
885 } 889 }
886 890
887 mutex_unlock(&qdev->mpi_mutex); 891 rtnl_unlock();
888 ql_enable_completion_interrupt(qdev, 0); 892 ql_enable_completion_interrupt(qdev, 0);
889} 893}
890 894
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f09bc5dfe8b2..ba5d3fe753b6 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -902,11 +902,12 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
902 struct tg3 *tp = bp->priv; 902 struct tg3 *tp = bp->priv;
903 u32 val; 903 u32 val;
904 904
905 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) 905 spin_lock_bh(&tp->lock);
906 return -EAGAIN;
907 906
908 if (tg3_readphy(tp, reg, &val)) 907 if (tg3_readphy(tp, reg, &val))
909 return -EIO; 908 val = -EIO;
909
910 spin_unlock_bh(&tp->lock);
910 911
911 return val; 912 return val;
912} 913}
@@ -914,14 +915,16 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
914static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 915static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
915{ 916{
916 struct tg3 *tp = bp->priv; 917 struct tg3 *tp = bp->priv;
918 u32 ret = 0;
917 919
918 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) 920 spin_lock_bh(&tp->lock);
919 return -EAGAIN;
920 921
921 if (tg3_writephy(tp, reg, val)) 922 if (tg3_writephy(tp, reg, val))
922 return -EIO; 923 ret = -EIO;
923 924
924 return 0; 925 spin_unlock_bh(&tp->lock);
926
927 return ret;
925} 928}
926 929
927static int tg3_mdio_reset(struct mii_bus *bp) 930static int tg3_mdio_reset(struct mii_bus *bp)
@@ -1011,12 +1014,6 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
1011 1014
1012static void tg3_mdio_start(struct tg3 *tp) 1015static void tg3_mdio_start(struct tg3 *tp)
1013{ 1016{
1014 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1015 mutex_lock(&tp->mdio_bus->mdio_lock);
1016 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1017 mutex_unlock(&tp->mdio_bus->mdio_lock);
1018 }
1019
1020 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1017 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1021 tw32_f(MAC_MI_MODE, tp->mi_mode); 1018 tw32_f(MAC_MI_MODE, tp->mi_mode);
1022 udelay(80); 1019 udelay(80);
@@ -1041,15 +1038,6 @@ static void tg3_mdio_start(struct tg3 *tp)
1041 tg3_mdio_config_5785(tp); 1038 tg3_mdio_config_5785(tp);
1042} 1039}
1043 1040
1044static void tg3_mdio_stop(struct tg3 *tp)
1045{
1046 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1047 mutex_lock(&tp->mdio_bus->mdio_lock);
1048 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
1049 mutex_unlock(&tp->mdio_bus->mdio_lock);
1050 }
1051}
1052
1053static int tg3_mdio_init(struct tg3 *tp) 1041static int tg3_mdio_init(struct tg3 *tp)
1054{ 1042{
1055 int i; 1043 int i;
@@ -1141,7 +1129,6 @@ static void tg3_mdio_fini(struct tg3 *tp)
1141 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; 1129 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1142 mdiobus_unregister(tp->mdio_bus); 1130 mdiobus_unregister(tp->mdio_bus);
1143 mdiobus_free(tp->mdio_bus); 1131 mdiobus_free(tp->mdio_bus);
1144 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1145 } 1132 }
1146} 1133}
1147 1134
@@ -1363,7 +1350,7 @@ static void tg3_adjust_link(struct net_device *dev)
1363 struct tg3 *tp = netdev_priv(dev); 1350 struct tg3 *tp = netdev_priv(dev);
1364 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1351 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1365 1352
1366 spin_lock(&tp->lock); 1353 spin_lock_bh(&tp->lock);
1367 1354
1368 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 1355 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1369 MAC_MODE_HALF_DUPLEX); 1356 MAC_MODE_HALF_DUPLEX);
@@ -1431,7 +1418,7 @@ static void tg3_adjust_link(struct net_device *dev)
1431 tp->link_config.active_speed = phydev->speed; 1418 tp->link_config.active_speed = phydev->speed;
1432 tp->link_config.active_duplex = phydev->duplex; 1419 tp->link_config.active_duplex = phydev->duplex;
1433 1420
1434 spin_unlock(&tp->lock); 1421 spin_unlock_bh(&tp->lock);
1435 1422
1436 if (linkmesg) 1423 if (linkmesg)
1437 tg3_link_report(tp); 1424 tg3_link_report(tp);
@@ -6392,8 +6379,6 @@ static int tg3_chip_reset(struct tg3 *tp)
6392 6379
6393 tg3_nvram_lock(tp); 6380 tg3_nvram_lock(tp);
6394 6381
6395 tg3_mdio_stop(tp);
6396
6397 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 6382 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6398 6383
6399 /* No matching tg3_nvram_unlock() after this because 6384 /* No matching tg3_nvram_unlock() after this because
@@ -8698,6 +8683,8 @@ static int tg3_close(struct net_device *dev)
8698 8683
8699 del_timer_sync(&tp->timer); 8684 del_timer_sync(&tp->timer);
8700 8685
8686 tg3_phy_stop(tp);
8687
8701 tg3_full_lock(tp, 1); 8688 tg3_full_lock(tp, 1);
8702#if 0 8689#if 0
8703 tg3_dump_state(tp); 8690 tg3_dump_state(tp);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 524691cd9896..bab7940158e6 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2748,7 +2748,6 @@ struct tg3 {
2748#define TG3_FLG3_5701_DMA_BUG 0x00000008 2748#define TG3_FLG3_5701_DMA_BUG 0x00000008
2749#define TG3_FLG3_USE_PHYLIB 0x00000010 2749#define TG3_FLG3_USE_PHYLIB 0x00000010
2750#define TG3_FLG3_MDIOBUS_INITED 0x00000020 2750#define TG3_FLG3_MDIOBUS_INITED 0x00000020
2751#define TG3_FLG3_MDIOBUS_PAUSED 0x00000040
2752#define TG3_FLG3_PHY_CONNECTED 0x00000080 2751#define TG3_FLG3_PHY_CONNECTED 0x00000080
2753#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100 2752#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100
2754#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 2753#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index d032bba9bc4c..0caa8008c51c 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -418,6 +418,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
418 goto halt_fail_and_release; 418 goto halt_fail_and_release;
419 } 419 }
420 memcpy(net->dev_addr, bp, ETH_ALEN); 420 memcpy(net->dev_addr, bp, ETH_ALEN);
421 memcpy(net->perm_addr, bp, ETH_ALEN);
421 422
422 /* set a nonzero filter to enable data transfers */ 423 /* set a nonzero filter to enable data transfers */
423 memset(u.set, 0, sizeof *u.set); 424 memset(u.set, 0, sizeof *u.set);
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index a3bb49031a7f..ff4617e21426 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -873,10 +873,10 @@ static struct pcmcia_device_id serial_ids[] = {
873 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), 873 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"),
874 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), 874 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"),
875 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), 875 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"),
876 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"), 876 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
877 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"), 877 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
878 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"), 878 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"),
879 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "DP83903.cis"), 879 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "cis/DP83903.cis"),
880 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), 880 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"),
881 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), 881 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"),
882 PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ 882 PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */
@@ -884,9 +884,9 @@ static struct pcmcia_device_id serial_ids[] = {
884 PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ 884 PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */
885 PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ 885 PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */
886 PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"), 886 PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"),
887 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "COMpad2.cis"), 887 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "cis/COMpad2.cis"),
888 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"), 888 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"),
889 PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"), 889 PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"),
890 PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"), 890 PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"),
891 PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"), 891 PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"),
892 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b), 892 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b),
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 42e1005e2916..d065894ce38f 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -26,7 +26,6 @@
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/uaccess.h> 28#include <linux/uaccess.h>
29#include <linux/device.h>
30#include <linux/interrupt.h> 29#include <linux/interrupt.h>
31#include <linux/clk.h> 30#include <linux/clk.h>
32#include <video/da8xx-fb.h> 31#include <video/da8xx-fb.h>
diff --git a/drivers/video/msm/mddi.c b/drivers/video/msm/mddi.c
index f2de5a1acd6d..5c5a1ad1d397 100644
--- a/drivers/video/msm/mddi.c
+++ b/drivers/video/msm/mddi.c
@@ -27,8 +27,6 @@
27#include <mach/msm_iomap.h> 27#include <mach/msm_iomap.h>
28#include <mach/irqs.h> 28#include <mach/irqs.h>
29#include <mach/board.h> 29#include <mach/board.h>
30#include <linux/delay.h>
31
32#include <mach/msm_fb.h> 30#include <mach/msm_fb.h>
33#include "mddi_hw.h" 31#include "mddi_hw.h"
34 32
diff --git a/firmware/Makefile b/firmware/Makefile
index 5ea80b19785b..a6c7c3e47e42 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -67,10 +67,13 @@ fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin
67fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \ 67fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \
68 e100/d102e_ucode.bin 68 e100/d102e_ucode.bin
69fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin 69fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin
70fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis cis/PCMLM28.cis 70fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis cis/PCMLM28.cis \
71 cis/DP83903.cis cis/NE2K.cis \
72 cis/tamarack.cis
71fw-shipped-$(CONFIG_PCMCIA_3C589) += cis/3CXEM556.cis 73fw-shipped-$(CONFIG_PCMCIA_3C589) += cis/3CXEM556.cis
72fw-shipped-$(CONFIG_PCMCIA_3C574) += cis/3CCFEM556.cis 74fw-shipped-$(CONFIG_PCMCIA_3C574) += cis/3CCFEM556.cis
73fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis 75fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis \
76 cis/COMpad2.cis cis/COMpad4.cis
74fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin 77fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin
75fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \ 78fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
76 advansys/3550.bin advansys/38C0800.bin 79 advansys/3550.bin advansys/38C0800.bin
diff --git a/firmware/WHENCE b/firmware/WHENCE
index 3f8c4f6bc43f..c437e14f0b11 100644
--- a/firmware/WHENCE
+++ b/firmware/WHENCE
@@ -597,6 +597,9 @@ Driver: PCMCIA_PCNET - NE2000 compatible PCMCIA adapter
597 597
598File: cis/LA-PCM.cis 598File: cis/LA-PCM.cis
599 cis/PCMLM28.cis 599 cis/PCMLM28.cis
600 cis/DP83903.cis
601 cis/NE2K.cis
602 cis/tamarack.cis
600 603
601Licence: GPL 604Licence: GPL
602 605
@@ -628,6 +631,8 @@ Driver: SERIAL_8250_CS - Serial PCMCIA adapter
628 631
629File: cis/MT5634ZLX.cis 632File: cis/MT5634ZLX.cis
630 cis/RS-COM-2P.cis 633 cis/RS-COM-2P.cis
634 cis/COMpad2.cis
635 cis/COMpad4.cis
631 636
632Licence: GPL 637Licence: GPL
633 638
diff --git a/firmware/cis/COMpad2.cis.ihex b/firmware/cis/COMpad2.cis.ihex
new file mode 100644
index 000000000000..1671c5e48caa
--- /dev/null
+++ b/firmware/cis/COMpad2.cis.ihex
@@ -0,0 +1,11 @@
1:1000000001030000FF151F0401414456414E5445B1
2:10001000434800434F4D7061642D33322F38350013
3:10002000312E300000FF210202011A0501050001F6
4:10003000031B0EC18118AA61E80207E8030730B864
5:100040009E1B08820108AA6030030F1B0883010869
6:10005000AA6040030F1B08840108AA6050030F1B0D
7:0D00600008850108AA6060030F1400FF006E
8:00000001FF
9#
10# Replacement CIS for Advantech COMpad-32/85
11#
diff --git a/firmware/cis/COMpad4.cis.ihex b/firmware/cis/COMpad4.cis.ihex
new file mode 100644
index 000000000000..27bbec1921b3
--- /dev/null
+++ b/firmware/cis/COMpad4.cis.ihex
@@ -0,0 +1,9 @@
1:1000000001030000FF151F0401414456414E5445B1
2:10001000434800434F4D7061642D33322F383542D1
3:100020002D34000000FF210202011A050102000127
4:10003000011B0BC18118AA6040021F30B89E1B082B
5:0C004000820108AA6040031F1400FF00AA
6:00000001FF
7#
8# Replacement CIS for Advantech COMpad-32/85B-4
9#
diff --git a/firmware/cis/DP83903.cis.ihex b/firmware/cis/DP83903.cis.ihex
new file mode 100644
index 000000000000..6d73ea3cf1b8
--- /dev/null
+++ b/firmware/cis/DP83903.cis.ihex
@@ -0,0 +1,14 @@
1:1000000001030000FF152904014D756C74696675C4
2:100010006E6374696F6E20436172640000004E531A
3:1000200043204D46204C414E2F4D6F64656D00FFBF
4:1000300020047501000021020000060B02004900A7
5:100040000000006A000000FF00130343495321022F
6:1000500006001A060517201077021B0C970179017C
7:10006000556530FFFF284000FF001303434953212B
8:100070000202001A060507401077021B09870119C2
9:0800800001552330FFFFFF00D2
10:00000001FF
11#
12# This CIS is for cards based on the National Semiconductor
13# DP83903 Multiple Function Interface Chip
14#
diff --git a/firmware/cis/NE2K.cis.ihex b/firmware/cis/NE2K.cis.ihex
new file mode 100644
index 000000000000..1bb40fc4759f
--- /dev/null
+++ b/firmware/cis/NE2K.cis.ihex
@@ -0,0 +1,8 @@
1:1000000001030000FF1515040150434D4349410011
2:1000100045746865726E6574000000FF2102060079
3:100020001A050120F803031B09E001190155653089
4:06003000FFFF1400FF00B9
5:00000001FF
6#
7# Replacement CIS for various busted NE2000-compatible cards
8#
diff --git a/firmware/cis/tamarack.cis.ihex b/firmware/cis/tamarack.cis.ihex
new file mode 100644
index 000000000000..1e86547fb361
--- /dev/null
+++ b/firmware/cis/tamarack.cis.ihex
@@ -0,0 +1,10 @@
1:100000000103D400FF17034100FF152404015441EC
2:100010004D415241434B0045746865726E657400F2
3:10002000410030303437343331313830303100FF33
4:10003000210206001A050120F803031B14E08119B0
5:100040003F554D5D06864626E551000F100F30FFE7
6:05005000FF1400FF0099
7:00000001FF
8#
9# Replacement CIS for Surecom, Tamarack NE2000 cards
10#
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 56013371f9f3..a44a7897fd4d 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -23,7 +23,6 @@
23#include <asm/io.h> 23#include <asm/io.h>
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/ioport.h> 25#include <linux/ioport.h>
26#include <linux/mm.h>
27#include <linux/memory.h> 26#include <linux/memory.h>
28#include <asm/sections.h> 27#include <asm/sections.h>
29 28
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 2281c2cbfe2b..5033ce0d254b 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -94,6 +94,7 @@ static const struct file_operations proc_kpagecount_operations = {
94#define KPF_COMPOUND_TAIL 16 94#define KPF_COMPOUND_TAIL 16
95#define KPF_HUGE 17 95#define KPF_HUGE 17
96#define KPF_UNEVICTABLE 18 96#define KPF_UNEVICTABLE 18
97#define KPF_HWPOISON 19
97#define KPF_NOPAGE 20 98#define KPF_NOPAGE 20
98 99
99#define KPF_KSM 21 100#define KPF_KSM 21
@@ -180,6 +181,10 @@ static u64 get_uflags(struct page *page)
180 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); 181 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
181 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); 182 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
182 183
184#ifdef CONFIG_MEMORY_FAILURE
185 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
186#endif
187
183#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR 188#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
184 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); 189 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
185#endif 190#endif
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 3b461dffe244..3273a0c5043b 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -16,7 +16,7 @@ struct __kernel_sockaddr_storage {
16 /* _SS_MAXSIZE value minus size of ss_family */ 16 /* _SS_MAXSIZE value minus size of ss_family */
17} __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */ 17} __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */
18 18
19#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) 19#ifdef __KERNEL__
20 20
21#include <asm/socket.h> /* arch-dependent defines */ 21#include <asm/socket.h> /* arch-dependent defines */
22#include <linux/sockios.h> /* the SIOCxxx I/O controls */ 22#include <linux/sockios.h> /* the SIOCxxx I/O controls */
@@ -101,21 +101,6 @@ struct cmsghdr {
101 ((char *)(cmsg) - (char *)(mhdr)->msg_control))) 101 ((char *)(cmsg) - (char *)(mhdr)->msg_control)))
102 102
103/* 103/*
104 * This mess will go away with glibc
105 */
106
107#ifdef __KERNEL__
108#define __KINLINE static inline
109#elif defined(__GNUC__)
110#define __KINLINE static __inline__
111#elif defined(__cplusplus)
112#define __KINLINE static inline
113#else
114#define __KINLINE static
115#endif
116
117
118/*
119 * Get the next cmsg header 104 * Get the next cmsg header
120 * 105 *
121 * PLEASE, do not touch this function. If you think, that it is 106 * PLEASE, do not touch this function. If you think, that it is
@@ -128,7 +113,7 @@ struct cmsghdr {
128 * ancillary object DATA. --ANK (980731) 113 * ancillary object DATA. --ANK (980731)
129 */ 114 */
130 115
131__KINLINE struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, 116static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
132 struct cmsghdr *__cmsg) 117 struct cmsghdr *__cmsg)
133{ 118{
134 struct cmsghdr * __ptr; 119 struct cmsghdr * __ptr;
@@ -140,7 +125,7 @@ __KINLINE struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
140 return __ptr; 125 return __ptr;
141} 126}
142 127
143__KINLINE struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg) 128static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg)
144{ 129{
145 return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg); 130 return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
146} 131}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 6d7020490f94..3e1c36e7998f 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -726,8 +726,6 @@ static int hrtimer_switch_to_hres(void)
726 /* "Retrigger" the interrupt to get things going */ 726 /* "Retrigger" the interrupt to get things going */
727 retrigger_next_event(NULL); 727 retrigger_next_event(NULL);
728 local_irq_restore(flags); 728 local_irq_restore(flags);
729 printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
730 smp_processor_id());
731 return 1; 729 return 1;
732} 730}
733 731
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 0f86feb6db0c..e491fb087939 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1030,14 +1030,10 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1030 update_context_time(ctx); 1030 update_context_time(ctx);
1031 1031
1032 perf_disable(); 1032 perf_disable();
1033 if (ctx->nr_active) { 1033 if (ctx->nr_active)
1034 list_for_each_entry(event, &ctx->group_list, group_entry) { 1034 list_for_each_entry(event, &ctx->group_list, group_entry)
1035 if (event != event->group_leader) 1035 group_sched_out(event, cpuctx, ctx);
1036 event_sched_out(event, cpuctx, ctx); 1036
1037 else
1038 group_sched_out(event, cpuctx, ctx);
1039 }
1040 }
1041 perf_enable(); 1037 perf_enable();
1042 out: 1038 out:
1043 spin_unlock(&ctx->lock); 1039 spin_unlock(&ctx->lock);
@@ -1258,12 +1254,8 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1258 if (event->cpu != -1 && event->cpu != cpu) 1254 if (event->cpu != -1 && event->cpu != cpu)
1259 continue; 1255 continue;
1260 1256
1261 if (event != event->group_leader) 1257 if (group_can_go_on(event, cpuctx, 1))
1262 event_sched_in(event, cpuctx, ctx, cpu); 1258 group_sched_in(event, cpuctx, ctx, cpu);
1263 else {
1264 if (group_can_go_on(event, cpuctx, 1))
1265 group_sched_in(event, cpuctx, ctx, cpu);
1266 }
1267 1259
1268 /* 1260 /*
1269 * If this pinned group hasn't been scheduled, 1261 * If this pinned group hasn't been scheduled,
@@ -1291,15 +1283,9 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1291 if (event->cpu != -1 && event->cpu != cpu) 1283 if (event->cpu != -1 && event->cpu != cpu)
1292 continue; 1284 continue;
1293 1285
1294 if (event != event->group_leader) { 1286 if (group_can_go_on(event, cpuctx, can_add_hw))
1295 if (event_sched_in(event, cpuctx, ctx, cpu)) 1287 if (group_sched_in(event, cpuctx, ctx, cpu))
1296 can_add_hw = 0; 1288 can_add_hw = 0;
1297 } else {
1298 if (group_can_go_on(event, cpuctx, can_add_hw)) {
1299 if (group_sched_in(event, cpuctx, ctx, cpu))
1300 can_add_hw = 0;
1301 }
1302 }
1303 } 1289 }
1304 perf_enable(); 1290 perf_enable();
1305 out: 1291 out:
@@ -4781,9 +4767,7 @@ int perf_event_init_task(struct task_struct *child)
4781 * We dont have to disable NMIs - we are only looking at 4767 * We dont have to disable NMIs - we are only looking at
4782 * the list, not manipulating it: 4768 * the list, not manipulating it:
4783 */ 4769 */
4784 list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) { 4770 list_for_each_entry(event, &parent_ctx->group_list, group_entry) {
4785 if (event != event->group_leader)
4786 continue;
4787 4771
4788 if (!event->attr.inherit) { 4772 if (!event->attr.inherit) {
4789 inherited_all = 0; 4773 inherited_all = 0;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 46592feab5a6..3724756e41ca 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -225,7 +225,11 @@ static void ftrace_update_pid_func(void)
225 if (ftrace_trace_function == ftrace_stub) 225 if (ftrace_trace_function == ftrace_stub)
226 return; 226 return;
227 227
228#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
228 func = ftrace_trace_function; 229 func = ftrace_trace_function;
230#else
231 func = __ftrace_trace_function;
232#endif
229 233
230 if (ftrace_pid_trace) { 234 if (ftrace_pid_trace) {
231 set_ftrace_pid_function(func); 235 set_ftrace_pid_function(func);
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index 81b1645c8549..a91da69f153a 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -501,7 +501,7 @@ static int __init init_kmem_tracer(void)
501 return 1; 501 return 1;
502 } 502 }
503 503
504 if (!register_tracer(&kmem_tracer)) { 504 if (register_tracer(&kmem_tracer) != 0) {
505 pr_warning("Warning: could not register the kmem tracer\n"); 505 pr_warning("Warning: could not register the kmem tracer\n");
506 return 1; 506 return 1;
507 } 507 }
diff --git a/mm/Kconfig b/mm/Kconfig
index edd300aca173..57963c6063d1 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -224,7 +224,9 @@ config KSM
224 the many instances by a single resident page with that content, so 224 the many instances by a single resident page with that content, so
225 saving memory until one or another app needs to modify the content. 225 saving memory until one or another app needs to modify the content.
226 Recommended for use with KVM, or with other duplicative applications. 226 Recommended for use with KVM, or with other duplicative applications.
227 See Documentation/vm/ksm.txt for more information. 227 See Documentation/vm/ksm.txt for more information: KSM is inactive
228 until a program has madvised that an area is MADV_MERGEABLE, and
229 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
228 230
229config DEFAULT_MMAP_MIN_ADDR 231config DEFAULT_MMAP_MIN_ADDR
230 int "Low address space to protect from user allocation" 232 int "Low address space to protect from user allocation"
diff --git a/mm/ksm.c b/mm/ksm.c
index f7edac356f46..bef1af4f77e3 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -184,11 +184,6 @@ static DEFINE_SPINLOCK(ksm_mmlist_lock);
184 sizeof(struct __struct), __alignof__(struct __struct),\ 184 sizeof(struct __struct), __alignof__(struct __struct),\
185 (__flags), NULL) 185 (__flags), NULL)
186 186
187static void __init ksm_init_max_kernel_pages(void)
188{
189 ksm_max_kernel_pages = nr_free_buffer_pages() / 4;
190}
191
192static int __init ksm_slab_init(void) 187static int __init ksm_slab_init(void)
193{ 188{
194 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); 189 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
@@ -1673,7 +1668,7 @@ static int __init ksm_init(void)
1673 struct task_struct *ksm_thread; 1668 struct task_struct *ksm_thread;
1674 int err; 1669 int err;
1675 1670
1676 ksm_init_max_kernel_pages(); 1671 ksm_max_kernel_pages = totalram_pages / 4;
1677 1672
1678 err = ksm_slab_init(); 1673 err = ksm_slab_init();
1679 if (err) 1674 if (err)
@@ -1697,6 +1692,9 @@ static int __init ksm_init(void)
1697 kthread_stop(ksm_thread); 1692 kthread_stop(ksm_thread);
1698 goto out_free2; 1693 goto out_free2;
1699 } 1694 }
1695#else
1696 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
1697
1700#endif /* CONFIG_SYSFS */ 1698#endif /* CONFIG_SYSFS */
1701 1699
1702 return 0; 1700 return 0;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 69511e663234..2f7c9d75c552 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -25,7 +25,6 @@
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/pfn.h> 26#include <linux/pfn.h>
27#include <linux/kmemleak.h> 27#include <linux/kmemleak.h>
28#include <linux/highmem.h>
29#include <asm/atomic.h> 28#include <asm/atomic.h>
30#include <asm/uaccess.h> 29#include <asm/uaccess.h>
31#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 821d30918cfc..427ded841224 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -366,13 +366,13 @@ static ssize_t wireless_show(struct device *d, char *buf,
366 const struct iw_statistics *iw; 366 const struct iw_statistics *iw;
367 ssize_t ret = -EINVAL; 367 ssize_t ret = -EINVAL;
368 368
369 read_lock(&dev_base_lock); 369 rtnl_lock();
370 if (dev_isalive(dev)) { 370 if (dev_isalive(dev)) {
371 iw = get_wireless_stats(dev); 371 iw = get_wireless_stats(dev);
372 if (iw) 372 if (iw)
373 ret = (*format)(iw, buf); 373 ret = (*format)(iw, buf);
374 } 374 }
375 read_unlock(&dev_base_lock); 375 rtnl_unlock();
376 376
377 return ret; 377 return ret;
378} 378}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b69455217ed6..86acdba0a97d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -964,7 +964,7 @@ static ssize_t pktgen_if_write(struct file *file,
964 if (value == 0x7FFFFFFF) 964 if (value == 0x7FFFFFFF)
965 pkt_dev->delay = ULLONG_MAX; 965 pkt_dev->delay = ULLONG_MAX;
966 else 966 else
967 pkt_dev->delay = (u64)value * NSEC_PER_USEC; 967 pkt_dev->delay = (u64)value;
968 968
969 sprintf(pg_result, "OK: delay=%llu", 969 sprintf(pg_result, "OK: delay=%llu",
970 (unsigned long long) pkt_dev->delay); 970 (unsigned long long) pkt_dev->delay);
@@ -2212,7 +2212,7 @@ static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
2212 if (pkt_dev->flags & F_QUEUE_MAP_CPU) 2212 if (pkt_dev->flags & F_QUEUE_MAP_CPU)
2213 pkt_dev->cur_queue_map = smp_processor_id(); 2213 pkt_dev->cur_queue_map = smp_processor_id();
2214 2214
2215 else if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { 2215 else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) {
2216 __u16 t; 2216 __u16 t;
2217 if (pkt_dev->flags & F_QUEUE_MAP_RND) { 2217 if (pkt_dev->flags & F_QUEUE_MAP_RND) {
2218 t = random32() % 2218 t = random32() %
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e92f1fd28aa5..5df2f6a0b0f0 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1077,12 +1077,16 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1077 ip_mc_up(in_dev); 1077 ip_mc_up(in_dev);
1078 /* fall through */ 1078 /* fall through */
1079 case NETDEV_CHANGEADDR: 1079 case NETDEV_CHANGEADDR:
1080 if (IN_DEV_ARP_NOTIFY(in_dev)) 1080 /* Send gratuitous ARP to notify of link change */
1081 arp_send(ARPOP_REQUEST, ETH_P_ARP, 1081 if (IN_DEV_ARP_NOTIFY(in_dev)) {
1082 in_dev->ifa_list->ifa_address, 1082 struct in_ifaddr *ifa = in_dev->ifa_list;
1083 dev, 1083
1084 in_dev->ifa_list->ifa_address, 1084 if (ifa)
1085 NULL, dev->dev_addr, NULL); 1085 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1086 ifa->ifa_address, dev,
1087 ifa->ifa_address, NULL,
1088 dev->dev_addr, NULL);
1089 }
1086 break; 1090 break;
1087 case NETDEV_DOWN: 1091 case NETDEV_DOWN:
1088 ip_mc_down(in_dev); 1092 ip_mc_down(in_dev);
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
index 1c2ed3090cce..a7910099d6fd 100644
--- a/tools/perf/Documentation/perf-timechart.txt
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -31,6 +31,9 @@ OPTIONS
31-w:: 31-w::
32--width=:: 32--width=::
33 Select the width of the SVG file (default: 1000) 33 Select the width of the SVG file (default: 1000)
34-p::
35--power-only::
36 Only output the CPU power section of the diagram
34 37
35 38
36SEE ALSO 39SEE ALSO
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index b5f1953b6144..5881943f0c34 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -728,7 +728,7 @@ $(BUILT_INS): perf$X
728common-cmds.h: util/generate-cmdlist.sh command-list.txt 728common-cmds.h: util/generate-cmdlist.sh command-list.txt
729 729
730common-cmds.h: $(wildcard Documentation/perf-*.txt) 730common-cmds.h: $(wildcard Documentation/perf-*.txt)
731 $(QUIET_GEN)util/generate-cmdlist.sh > $@+ && mv $@+ $@ 731 $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@
732 732
733$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh 733$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh
734 $(QUIET_GEN)$(RM) $@ $@+ && \ 734 $(QUIET_GEN)$(RM) $@ $@+ && \
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 4405681b3134..702d8fe58fbc 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -46,6 +46,8 @@ static u64 turbo_frequency;
46 46
47static u64 first_time, last_time; 47static u64 first_time, last_time;
48 48
49static int power_only;
50
49 51
50static struct perf_header *header; 52static struct perf_header *header;
51 53
@@ -547,7 +549,7 @@ static void end_sample_processing(void)
547 u64 cpu; 549 u64 cpu;
548 struct power_event *pwr; 550 struct power_event *pwr;
549 551
550 for (cpu = 0; cpu < numcpus; cpu++) { 552 for (cpu = 0; cpu <= numcpus; cpu++) {
551 pwr = malloc(sizeof(struct power_event)); 553 pwr = malloc(sizeof(struct power_event));
552 if (!pwr) 554 if (!pwr)
553 return; 555 return;
@@ -871,7 +873,7 @@ static int determine_display_tasks(u64 threshold)
871 /* no exit marker, task kept running to the end */ 873 /* no exit marker, task kept running to the end */
872 if (p->end_time == 0) 874 if (p->end_time == 0)
873 p->end_time = last_time; 875 p->end_time = last_time;
874 if (p->total_time >= threshold) 876 if (p->total_time >= threshold && !power_only)
875 p->display = 1; 877 p->display = 1;
876 878
877 c = p->all; 879 c = p->all;
@@ -882,7 +884,7 @@ static int determine_display_tasks(u64 threshold)
882 if (c->start_time == 1) 884 if (c->start_time == 1)
883 c->start_time = first_time; 885 c->start_time = first_time;
884 886
885 if (c->total_time >= threshold) { 887 if (c->total_time >= threshold && !power_only) {
886 c->display = 1; 888 c->display = 1;
887 count++; 889 count++;
888 } 890 }
@@ -1134,6 +1136,8 @@ static const struct option options[] = {
1134 "output file name"), 1136 "output file name"),
1135 OPT_INTEGER('w', "width", &svg_page_width, 1137 OPT_INTEGER('w', "width", &svg_page_width,
1136 "page width"), 1138 "page width"),
1139 OPT_BOOLEAN('p', "power-only", &power_only,
1140 "output power data only"),
1137 OPT_END() 1141 OPT_END()
1138}; 1142};
1139 1143
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1ca88896eee4..37512e936235 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -782,6 +782,7 @@ static const char *skip_symbols[] = {
782 "exit_idle", 782 "exit_idle",
783 "mwait_idle", 783 "mwait_idle",
784 "mwait_idle_with_hints", 784 "mwait_idle_with_hints",
785 "poll_idle",
785 "ppc64_runlatch_off", 786 "ppc64_runlatch_off",
786 "pseries_dedicated_idle_sleep", 787 "pseries_dedicated_idle_sleep",
787 NULL 788 NULL
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index a778fd0f4ae4..856655d8b0b8 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -28,7 +28,7 @@ static u64 turbo_frequency, max_freq;
28 28
29int svg_page_width = 1000; 29int svg_page_width = 1000;
30 30
31#define MIN_TEXT_SIZE 0.001 31#define MIN_TEXT_SIZE 0.01
32 32
33static u64 total_height; 33static u64 total_height;
34static FILE *svgfile; 34static FILE *svgfile;
@@ -217,6 +217,18 @@ static char *cpu_model(void)
217 } 217 }
218 fclose(file); 218 fclose(file);
219 } 219 }
220
221 /* CPU type */
222 file = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", "r");
223 if (file) {
224 while (fgets(buf, 255, file)) {
225 unsigned int freq;
226 freq = strtoull(buf, NULL, 10);
227 if (freq > max_freq)
228 max_freq = freq;
229 }
230 fclose(file);
231 }
220 return cpu_m; 232 return cpu_m;
221} 233}
222 234
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e79c54034bcd..b7c78a403dc2 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -850,6 +850,19 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
850 850
851} 851}
852 852
853static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
854 struct mm_struct *mm,
855 unsigned long address,
856 pte_t pte)
857{
858 struct kvm *kvm = mmu_notifier_to_kvm(mn);
859
860 spin_lock(&kvm->mmu_lock);
861 kvm->mmu_notifier_seq++;
862 kvm_set_spte_hva(kvm, address, pte);
863 spin_unlock(&kvm->mmu_lock);
864}
865
853static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 866static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
854 struct mm_struct *mm, 867 struct mm_struct *mm,
855 unsigned long start, 868 unsigned long start,
@@ -929,6 +942,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
929 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 942 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
930 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 943 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
931 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 944 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
945 .change_pte = kvm_mmu_notifier_change_pte,
932 .release = kvm_mmu_notifier_release, 946 .release = kvm_mmu_notifier_release,
933}; 947};
934#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 948#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */