aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/input/event-codes.txt262
-rw-r--r--MAINTAINERS46
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/Makefile2
-rw-r--r--arch/alpha/kernel/core_mcpcia.c12
-rw-r--r--arch/alpha/kernel/err_titan.c4
-rw-r--r--arch/alpha/kernel/irq_alpha.c2
-rw-r--r--arch/alpha/kernel/setup.c6
-rw-r--r--arch/alpha/kernel/smc37c93x.c3
-rw-r--r--arch/alpha/kernel/sys_wildfire.c5
-rw-r--r--arch/alpha/kernel/time.c1
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/Kconfig.debug11
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/include/asm/thread_notify.h1
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/elf.c17
-rw-r--r--arch/arm/kernel/hw_breakpoint.c7
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/traps.c3
-rw-r--r--arch/arm/mach-mmp/include/mach/gpio.h2
-rw-r--r--arch/arm/mach-mmp/include/mach/mfp-pxa168.h9
-rw-r--r--arch/arm/mach-msm/board-qsd8x50.c5
-rw-r--r--arch/arm/mach-msm/timer.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/gpio.h17
-rw-r--r--arch/arm/mach-pxa/include/mach/irqs.h3
-rw-r--r--arch/arm/mach-pxa/pxa25x.c2
-rw-r--r--arch/arm/mach-pxa/pxa27x.c2
-rw-r--r--arch/arm/mach-tegra/gpio.c6
-rw-r--r--arch/arm/mach-tegra/tegra2_clocks.c9
-rw-r--r--arch/arm/mm/mmap.c4
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/plat-s5p/pm.c11
-rw-r--r--arch/arm/plat-samsung/pm-check.c6
-rw-r--r--arch/arm/plat-samsung/pm.c5
-rw-r--r--arch/arm/vfp/vfpmodule.c34
-rw-r--r--arch/blackfin/include/asm/system.h36
-rw-r--r--arch/blackfin/kernel/gptimers.c2
-rw-r--r--arch/blackfin/kernel/time-ts.c8
-rw-r--r--arch/blackfin/mach-common/smp.c19
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/cputable.h16
-rw-r--r--arch/powerpc/include/asm/pte-common.h2
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/kernel/crash.c12
-rw-r--r--arch/powerpc/kernel/legacy_serial.c8
-rw-r--r--arch/powerpc/kernel/perf_event.c37
-rw-r--r--arch/powerpc/kernel/time.c3
-rw-r--r--arch/powerpc/platforms/powermac/smp.c8
-rw-r--r--arch/powerpc/platforms/pseries/setup.c12
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c5
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c4
-rw-r--r--arch/um/Kconfig.x864
-rw-r--r--arch/um/include/asm/bug.h6
-rw-r--r--arch/x86/include/asm/msr-index.h4
-rw-r--r--arch/x86/kernel/cpu/amd.c19
-rw-r--r--arch/x86/kernel/smpboot.c23
-rw-r--r--arch/x86/platform/ce4100/falconfalls.dts2
-rw-r--r--arch/x86/platform/mrst/mrst.c10
-rw-r--r--block/blk-core.c169
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c4
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--block/blk.h1
-rw-r--r--block/cfq-iosched.c6
-rw-r--r--block/elevator.c4
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c22
-rw-r--r--drivers/i2c/i2c-core.c6
-rw-r--r--drivers/input/evdev.c33
-rw-r--r--drivers/input/input.c40
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c6
-rw-r--r--drivers/input/misc/xen-kbdfront.c13
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c17
-rw-r--r--drivers/leds/leds-regulator.c4
-rw-r--r--drivers/md/dm-raid.c8
-rw-r--r--drivers/md/md.c87
-rw-r--r--drivers/md/md.h26
-rw-r--r--drivers/md/raid1.c29
-rw-r--r--drivers/md/raid10.c27
-rw-r--r--drivers/md/raid5.c61
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/misc/sgi-gru/grufile.c8
-rw-r--r--drivers/pcmcia/pxa2xx_balloon3.c5
-rw-r--r--drivers/pcmcia/pxa2xx_trizeps4.c15
-rw-r--r--drivers/rapidio/rio.c5
-rw-r--r--drivers/rapidio/switches/idt_gen2.c1
-rw-r--r--drivers/rtc/class.c2
-rw-r--r--drivers/rtc/interface.c26
-rw-r--r--drivers/rtc/rtc-bfin.c2
-rw-r--r--drivers/rtc/rtc-mc13xxx.c1
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/core/devices.c10
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hub.c12
-rw-r--r--drivers/usb/gadget/f_audio.c1
-rw-r--r--drivers/usb/gadget/f_eem.c8
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c20
-rw-r--r--drivers/usb/gadget/inode.c4
-rw-r--r--drivers/usb/gadget/pch_udc.c8
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c2
-rw-r--r--drivers/usb/host/ehci-q.c15
-rw-r--r--drivers/usb/host/isp1760-hcd.c2
-rw-r--r--drivers/usb/host/ohci-au1xxx.c2
-rw-r--r--drivers/usb/host/pci-quirks.c117
-rw-r--r--drivers/usb/host/xhci-mem.c106
-rw-r--r--drivers/usb/host/xhci-pci.c4
-rw-r--r--drivers/usb/host/xhci-ring.c219
-rw-r--r--drivers/usb/host/xhci.c23
-rw-r--r--drivers/usb/host/xhci.h11
-rw-r--r--drivers/usb/musb/Kconfig6
-rw-r--r--drivers/usb/musb/blackfin.c24
-rw-r--r--drivers/usb/musb/cppi_dma.c27
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_core.h5
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/musb/musbhsdma.c8
-rw-r--r--drivers/usb/musb/omap2430.c3
-rw-r--r--drivers/usb/musb/ux500.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h12
-rw-r--r--drivers/usb/serial/option.c5
-rw-r--r--drivers/usb/serial/qcserial.c31
-rw-r--r--drivers/video/pxafb.c4
-rw-r--r--fs/9p/fid.c15
-rw-r--r--fs/9p/v9fs.h1
-rw-r--r--fs/9p/vfs_dentry.c4
-rw-r--r--fs/9p/vfs_inode_dotl.c2
-rw-r--r--fs/9p/vfs_super.c80
-rw-r--r--fs/binfmt_elf.c6
-rw-r--r--fs/btrfs/acl.c9
-rw-r--r--fs/btrfs/ctree.h9
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c125
-rw-r--r--fs/btrfs/extent_io.c82
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/file.c21
-rw-r--r--fs/btrfs/free-space-cache.c119
-rw-r--r--fs/btrfs/inode.c165
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/super.c42
-rw-r--r--fs/btrfs/transaction.c48
-rw-r--r--fs/btrfs/transaction.h4
-rw-r--r--fs/btrfs/xattr.c33
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/fhandle.c1
-rw-r--r--fs/filesystems.c3
-rw-r--r--fs/namei.c1
-rw-r--r--fs/partitions/ldm.c16
-rw-r--r--fs/proc/base.c9
-rw-r--r--fs/ramfs/file-nommu.c1
-rw-r--r--fs/ubifs/debug.h152
-rw-r--r--fs/ubifs/file.c3
-rw-r--r--include/linux/blkdev.h28
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/input.h10
-rw-r--r--include/linux/input/mt.h6
-rw-r--r--include/linux/memcontrol.h2
-rw-r--r--include/linux/pid.h2
-rw-r--r--include/linux/rio.h2
-rw-r--r--include/linux/rio_ids.h1
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/vmstat.h7
-rw-r--r--include/net/9p/9p.h2
-rw-r--r--include/net/9p/client.h5
-rw-r--r--include/trace/events/block.h30
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/perf_event.c12
-rw-r--r--kernel/pid.c5
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/trace/blktrace.c33
-rw-r--r--lib/kstrtox.c9
-rw-r--r--lib/test-kstrtox.c32
-rw-r--r--mm/huge_memory.c49
-rw-r--r--mm/memory.c2
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/oom_kill.c28
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/vmscan.c24
-rw-r--r--mm/vmstat.c18
-rw-r--r--net/9p/client.c29
-rw-r--r--net/9p/protocol.c7
-rw-r--r--net/9p/trans_common.c2
-rw-r--r--net/9p/trans_virtio.c15
-rw-r--r--net/ceph/osd_client.c12
-rw-r--r--tools/perf/util/cgroup.c2
199 files changed, 2308 insertions, 1163 deletions
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
new file mode 100644
index 000000000000..23fcb05175be
--- /dev/null
+++ b/Documentation/input/event-codes.txt
@@ -0,0 +1,262 @@
1The input protocol uses a map of types and codes to express input device values
2to userspace. This document describes the types and codes and how and when they
3may be used.
4
5A single hardware event generates multiple input events. Each input event
6contains the new value of a single data item. A special event type, EV_SYN, is
7used to separate input events into packets of input data changes occurring at
8the same moment in time. In the following, the term "event" refers to a single
9input event encompassing a type, code, and value.
10
11The input protocol is a stateful protocol. Events are emitted only when values
12of event codes have changed. However, the state is maintained within the Linux
13input subsystem; drivers do not need to maintain the state and may attempt to
14emit unchanged values without harm. Userspace may obtain the current state of
15event code values using the EVIOCG* ioctls defined in linux/input.h. The event
16reports supported by a device are also provided by sysfs in
17class/input/event*/device/capabilities/, and the properties of a device are
18provided in class/input/event*/device/properties.
19
20Types:
21==========
22Types are groupings of codes under a logical input construct. Each type has a
23set of applicable codes to be used in generating events. See the Codes section
24for details on valid codes for each type.
25
26* EV_SYN:
27 - Used as markers to separate events. Events may be separated in time or in
28 space, such as with the multitouch protocol.
29
30* EV_KEY:
31 - Used to describe state changes of keyboards, buttons, or other key-like
32 devices.
33
34* EV_REL:
35 - Used to describe relative axis value changes, e.g. moving the mouse 5 units
36 to the left.
37
38* EV_ABS:
39 - Used to describe absolute axis value changes, e.g. describing the
40 coordinates of a touch on a touchscreen.
41
42* EV_MSC:
43 - Used to describe miscellaneous input data that do not fit into other types.
44
45* EV_SW:
46 - Used to describe binary state input switches.
47
48* EV_LED:
49 - Used to turn LEDs on devices on and off.
50
51* EV_SND:
52 - Used to output sound to devices.
53
54* EV_REP:
55 - Used for autorepeating devices.
56
57* EV_FF:
58 - Used to send force feedback commands to an input device.
59
60* EV_PWR:
61 - A special type for power button and switch input.
62
63* EV_FF_STATUS:
64 - Used to receive force feedback device status.
65
66Codes:
67==========
68Codes define the precise type of event.
69
70EV_SYN:
71----------
72EV_SYN event values are undefined. Their usage is defined only by when they are
73sent in the evdev event stream.
74
75* SYN_REPORT:
76 - Used to synchronize and separate events into packets of input data changes
77 occurring at the same moment in time. For example, motion of a mouse may set
78 the REL_X and REL_Y values for one motion, then emit a SYN_REPORT. The next
79 motion will emit more REL_X and REL_Y values and send another SYN_REPORT.
80
81* SYN_CONFIG:
82 - TBD
83
84* SYN_MT_REPORT:
85 - Used to synchronize and separate touch events. See the
86 multi-touch-protocol.txt document for more information.
87
88* SYN_DROPPED:
89 - Used to indicate buffer overrun in the evdev client's event queue.
90 Client should ignore all events up to and including next SYN_REPORT
91 event and query the device (using EVIOCG* ioctls) to obtain its
92 current state.
93
94EV_KEY:
95----------
96EV_KEY events take the form KEY_<name> or BTN_<name>. For example, KEY_A is used
97to represent the 'A' key on a keyboard. When a key is depressed, an event with
98the key's code is emitted with value 1. When the key is released, an event is
99emitted with value 0. Some hardware send events when a key is repeated. These
100events have a value of 2. In general, KEY_<name> is used for keyboard keys, and
101BTN_<name> is used for other types of momentary switch events.
102
103A few EV_KEY codes have special meanings:
104
105* BTN_TOOL_<name>:
106 - These codes are used in conjunction with input trackpads, tablets, and
107 touchscreens. These devices may be used with fingers, pens, or other tools.
108 When an event occurs and a tool is used, the corresponding BTN_TOOL_<name>
109 code should be set to a value of 1. When the tool is no longer interacting
110 with the input device, the BTN_TOOL_<name> code should be reset to 0. All
111 trackpads, tablets, and touchscreens should use at least one BTN_TOOL_<name>
112 code when events are generated.
113
114* BTN_TOUCH:
115 BTN_TOUCH is used for touch contact. While an input tool is determined to be
116 within meaningful physical contact, the value of this property must be set
117 to 1. Meaningful physical contact may mean any contact, or it may mean
118 contact conditioned by an implementation defined property. For example, a
119 touchpad may set the value to 1 only when the touch pressure rises above a
120 certain value. BTN_TOUCH may be combined with BTN_TOOL_<name> codes. For
121 example, a pen tablet may set BTN_TOOL_PEN to 1 and BTN_TOUCH to 0 while the
122 pen is hovering over but not touching the tablet surface.
123
124Note: For appropriate function of the legacy mousedev emulation driver,
125BTN_TOUCH must be the first evdev code emitted in a synchronization frame.
126
127Note: Historically a touch device with BTN_TOOL_FINGER and BTN_TOUCH was
128interpreted as a touchpad by userspace, while a similar device without
129BTN_TOOL_FINGER was interpreted as a touchscreen. For backwards compatibility
130with current userspace it is recommended to follow this distinction. In the
131future, this distinction will be deprecated and the device properties ioctl
132EVIOCGPROP, defined in linux/input.h, will be used to convey the device type.
133
134* BTN_TOOL_FINGER, BTN_TOOL_DOUBLETAP, BTN_TOOL_TRIPLETAP, BTN_TOOL_QUADTAP:
135 - These codes denote one, two, three, and four finger interaction on a
136 trackpad or touchscreen. For example, if the user uses two fingers and moves
137 them on the touchpad in an effort to scroll content on screen,
138 BTN_TOOL_DOUBLETAP should be set to value 1 for the duration of the motion.
139 Note that all BTN_TOOL_<name> codes and the BTN_TOUCH code are orthogonal in
140 purpose. A trackpad event generated by finger touches should generate events
141 for one code from each group. At most only one of these BTN_TOOL_<name>
142 codes should have a value of 1 during any synchronization frame.
143
144Note: Historically some drivers emitted multiple of the finger count codes with
145a value of 1 in the same synchronization frame. This usage is deprecated.
146
147Note: In multitouch drivers, the input_mt_report_finger_count() function should
148be used to emit these codes. Please see multi-touch-protocol.txt for details.
149
150EV_REL:
151----------
152EV_REL events describe relative changes in a property. For example, a mouse may
153move to the left by a certain number of units, but its absolute position in
154space is unknown. If the absolute position is known, EV_ABS codes should be used
155instead of EV_REL codes.
156
157A few EV_REL codes have special meanings:
158
159* REL_WHEEL, REL_HWHEEL:
160 - These codes are used for vertical and horizontal scroll wheels,
161 respectively.
162
163EV_ABS:
164----------
165EV_ABS events describe absolute changes in a property. For example, a touchpad
166may emit coordinates for a touch location.
167
168A few EV_ABS codes have special meanings:
169
170* ABS_DISTANCE:
171 - Used to describe the distance of a tool from an interaction surface. This
172 event should only be emitted while the tool is hovering, meaning in close
173 proximity of the device and while the value of the BTN_TOUCH code is 0. If
174 the input device may be used freely in three dimensions, consider ABS_Z
175 instead.
176
177* ABS_MT_<name>:
178 - Used to describe multitouch input events. Please see
179 multi-touch-protocol.txt for details.
180
181EV_SW:
182----------
183EV_SW events describe stateful binary switches. For example, the SW_LID code is
184used to denote when a laptop lid is closed.
185
186Upon binding to a device or resuming from suspend, a driver must report
187the current switch state. This ensures that the device, kernel, and userspace
188state is in sync.
189
190Upon resume, if the switch state is the same as before suspend, then the input
191subsystem will filter out the duplicate switch state reports. The driver does
192not need to keep the state of the switch at any time.
193
194EV_MSC:
195----------
196EV_MSC events are used for input and output events that do not fall under other
197categories.
198
199EV_LED:
200----------
201EV_LED events are used for input and output to set and query the state of
202various LEDs on devices.
203
204EV_REP:
205----------
206EV_REP events are used for specifying autorepeating events.
207
208EV_SND:
209----------
210EV_SND events are used for sending sound commands to simple sound output
211devices.
212
213EV_FF:
214----------
215EV_FF events are used to initialize a force feedback capable device and to cause
216such device to feedback.
217
218EV_PWR:
219----------
220EV_PWR events are a special type of event used specifically for power
221mangement. Its usage is not well defined. To be addressed later.
222
223Guidelines:
224==========
225The guidelines below ensure proper single-touch and multi-finger functionality.
226For multi-touch functionality, see the multi-touch-protocol.txt document for
227more information.
228
229Mice:
230----------
231REL_{X,Y} must be reported when the mouse moves. BTN_LEFT must be used to report
232the primary button press. BTN_{MIDDLE,RIGHT,4,5,etc.} should be used to report
233further buttons of the device. REL_WHEEL and REL_HWHEEL should be used to report
234scroll wheel events where available.
235
236Touchscreens:
237----------
238ABS_{X,Y} must be reported with the location of the touch. BTN_TOUCH must be
239used to report when a touch is active on the screen.
240BTN_{MOUSE,LEFT,MIDDLE,RIGHT} must not be reported as the result of touch
241contact. BTN_TOOL_<name> events should be reported where possible.
242
243Trackpads:
244----------
245Legacy trackpads that only provide relative position information must report
246events like mice described above.
247
248Trackpads that provide absolute touch position must report ABS_{X,Y} for the
249location of the touch. BTN_TOUCH should be used to report when a touch is active
250on the trackpad. Where multi-finger support is available, BTN_TOOL_<name> should
251be used to report the number of touches active on the trackpad.
252
253Tablets:
254----------
255BTN_TOOL_<name> events must be reported when a stylus or other tool is active on
256the tablet. ABS_{X,Y} must be reported with the location of the tool. BTN_TOUCH
257should be used to report when the tool is in contact with the tablet.
258BTN_{STYLUS,STYLUS2} should be used to report buttons on the tool itself. Any
259button may be used for buttons on the tablet except BTN_{MOUSE,LEFT}.
260BTN_{0,1,2,etc} are good generic codes for unlabeled buttons. Do not use
261meaningful buttons, like BTN_FORWARD, unless the button is labeled for that
262purpose on the device.
diff --git a/MAINTAINERS b/MAINTAINERS
index 649600cb8ec9..ec3600306289 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -184,10 +184,9 @@ F: Documentation/filesystems/9p.txt
184F: fs/9p/ 184F: fs/9p/
185 185
186A2232 SERIAL BOARD DRIVER 186A2232 SERIAL BOARD DRIVER
187M: Enver Haase <A2232@gmx.net>
188L: linux-m68k@lists.linux-m68k.org 187L: linux-m68k@lists.linux-m68k.org
189S: Maintained 188S: Orphan
190F: drivers/char/ser_a2232* 189F: drivers/staging/generic_serial/ser_a2232*
191 190
192AACRAID SCSI RAID DRIVER 191AACRAID SCSI RAID DRIVER
193M: Adaptec OEM Raid Solutions <aacraid@adaptec.com> 192M: Adaptec OEM Raid Solutions <aacraid@adaptec.com>
@@ -877,6 +876,13 @@ F: arch/arm/mach-mv78xx0/
877F: arch/arm/mach-orion5x/ 876F: arch/arm/mach-orion5x/
878F: arch/arm/plat-orion/ 877F: arch/arm/plat-orion/
879 878
879ARM/Orion SoC/Technologic Systems TS-78xx platform support
880M: Alexander Clouter <alex@digriz.org.uk>
881L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
882W: http://www.digriz.org.uk/ts78xx/kernel
883S: Maintained
884F: arch/arm/mach-orion5x/ts78xx-*
885
880ARM/MIOA701 MACHINE SUPPORT 886ARM/MIOA701 MACHINE SUPPORT
881M: Robert Jarzmik <robert.jarzmik@free.fr> 887M: Robert Jarzmik <robert.jarzmik@free.fr>
882L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 888L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1063,7 +1069,7 @@ F: arch/arm/mach-shmobile/
1063F: drivers/sh/ 1069F: drivers/sh/
1064 1070
1065ARM/TELECHIPS ARM ARCHITECTURE 1071ARM/TELECHIPS ARM ARCHITECTURE
1066M: "Hans J. Koch" <hjk@linutronix.de> 1072M: "Hans J. Koch" <hjk@hansjkoch.de>
1067L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1073L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1068S: Maintained 1074S: Maintained
1069F: arch/arm/plat-tcc/ 1075F: arch/arm/plat-tcc/
@@ -1823,11 +1829,10 @@ S: Maintained
1823F: drivers/platform/x86/compal-laptop.c 1829F: drivers/platform/x86/compal-laptop.c
1824 1830
1825COMPUTONE INTELLIPORT MULTIPORT CARD 1831COMPUTONE INTELLIPORT MULTIPORT CARD
1826M: "Michael H. Warfield" <mhw@wittsend.com>
1827W: http://www.wittsend.com/computone.html 1832W: http://www.wittsend.com/computone.html
1828S: Maintained 1833S: Orphan
1829F: Documentation/serial/computone.txt 1834F: Documentation/serial/computone.txt
1830F: drivers/char/ip2/ 1835F: drivers/staging/tty/ip2/
1831 1836
1832CONEXANT ACCESSRUNNER USB DRIVER 1837CONEXANT ACCESSRUNNER USB DRIVER
1833M: Simon Arlott <cxacru@fire.lp0.eu> 1838M: Simon Arlott <cxacru@fire.lp0.eu>
@@ -2010,7 +2015,7 @@ F: drivers/net/wan/cycx*
2010CYCLADES ASYNC MUX DRIVER 2015CYCLADES ASYNC MUX DRIVER
2011W: http://www.cyclades.com/ 2016W: http://www.cyclades.com/
2012S: Orphan 2017S: Orphan
2013F: drivers/char/cyclades.c 2018F: drivers/tty/cyclades.c
2014F: include/linux/cyclades.h 2019F: include/linux/cyclades.h
2015 2020
2016CYCLADES PC300 DRIVER 2021CYCLADES PC300 DRIVER
@@ -2124,8 +2129,8 @@ L: Eng.Linux@digi.com
2124W: http://www.digi.com 2129W: http://www.digi.com
2125S: Orphan 2130S: Orphan
2126F: Documentation/serial/digiepca.txt 2131F: Documentation/serial/digiepca.txt
2127F: drivers/char/epca* 2132F: drivers/staging/tty/epca*
2128F: drivers/char/digi* 2133F: drivers/staging/tty/digi*
2129 2134
2130DIOLAN U2C-12 I2C DRIVER 2135DIOLAN U2C-12 I2C DRIVER
2131M: Guenter Roeck <guenter.roeck@ericsson.com> 2136M: Guenter Roeck <guenter.roeck@ericsson.com>
@@ -4077,7 +4082,7 @@ F: drivers/video/matrox/matroxfb_*
4077F: include/linux/matroxfb.h 4082F: include/linux/matroxfb.h
4078 4083
4079MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER 4084MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
4080M: "Hans J. Koch" <hjk@linutronix.de> 4085M: "Hans J. Koch" <hjk@hansjkoch.de>
4081L: lm-sensors@lm-sensors.org 4086L: lm-sensors@lm-sensors.org
4082S: Maintained 4087S: Maintained
4083F: Documentation/hwmon/max6650 4088F: Documentation/hwmon/max6650
@@ -4192,7 +4197,7 @@ MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
4192M: Jiri Slaby <jirislaby@gmail.com> 4197M: Jiri Slaby <jirislaby@gmail.com>
4193S: Maintained 4198S: Maintained
4194F: Documentation/serial/moxa-smartio 4199F: Documentation/serial/moxa-smartio
4195F: drivers/char/mxser.* 4200F: drivers/tty/mxser.*
4196 4201
4197MSI LAPTOP SUPPORT 4202MSI LAPTOP SUPPORT
4198M: "Lee, Chun-Yi" <jlee@novell.com> 4203M: "Lee, Chun-Yi" <jlee@novell.com>
@@ -4234,7 +4239,7 @@ F: sound/oss/msnd*
4234 4239
4235MULTITECH MULTIPORT CARD (ISICOM) 4240MULTITECH MULTIPORT CARD (ISICOM)
4236S: Orphan 4241S: Orphan
4237F: drivers/char/isicom.c 4242F: drivers/tty/isicom.c
4238F: include/linux/isicom.h 4243F: include/linux/isicom.h
4239 4244
4240MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER 4245MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
@@ -5273,14 +5278,14 @@ F: drivers/memstick/host/r592.*
5273RISCOM8 DRIVER 5278RISCOM8 DRIVER
5274S: Orphan 5279S: Orphan
5275F: Documentation/serial/riscom8.txt 5280F: Documentation/serial/riscom8.txt
5276F: drivers/char/riscom8* 5281F: drivers/staging/tty/riscom8*
5277 5282
5278ROCKETPORT DRIVER 5283ROCKETPORT DRIVER
5279P: Comtrol Corp. 5284P: Comtrol Corp.
5280W: http://www.comtrol.com 5285W: http://www.comtrol.com
5281S: Maintained 5286S: Maintained
5282F: Documentation/serial/rocket.txt 5287F: Documentation/serial/rocket.txt
5283F: drivers/char/rocket* 5288F: drivers/tty/rocket*
5284 5289
5285ROSE NETWORK LAYER 5290ROSE NETWORK LAYER
5286M: Ralf Baechle <ralf@linux-mips.org> 5291M: Ralf Baechle <ralf@linux-mips.org>
@@ -5916,10 +5921,9 @@ F: arch/arm/mach-spear6xx/spear600.c
5916F: arch/arm/mach-spear6xx/spear600_evb.c 5921F: arch/arm/mach-spear6xx/spear600_evb.c
5917 5922
5918SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER 5923SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER
5919M: Roger Wolff <R.E.Wolff@BitWizard.nl> 5924S: Orphan
5920S: Supported
5921F: Documentation/serial/specialix.txt 5925F: Documentation/serial/specialix.txt
5922F: drivers/char/specialix* 5926F: drivers/staging/tty/specialix*
5923 5927
5924SPI SUBSYSTEM 5928SPI SUBSYSTEM
5925M: David Brownell <dbrownell@users.sourceforge.net> 5929M: David Brownell <dbrownell@users.sourceforge.net>
@@ -5964,7 +5968,6 @@ F: arch/alpha/kernel/srm_env.c
5964 5968
5965STABLE BRANCH 5969STABLE BRANCH
5966M: Greg Kroah-Hartman <greg@kroah.com> 5970M: Greg Kroah-Hartman <greg@kroah.com>
5967M: Chris Wright <chrisw@sous-sol.org>
5968L: stable@kernel.org 5971L: stable@kernel.org
5969S: Maintained 5972S: Maintained
5970 5973
@@ -6248,7 +6251,8 @@ M: Greg Ungerer <gerg@uclinux.org>
6248W: http://www.uclinux.org/ 6251W: http://www.uclinux.org/
6249L: uclinux-dev@uclinux.org (subscribers-only) 6252L: uclinux-dev@uclinux.org (subscribers-only)
6250S: Maintained 6253S: Maintained
6251F: arch/m68knommu/ 6254F: arch/m68k/*/*_no.*
6255F: arch/m68k/include/asm/*_no.*
6252 6256
6253UCLINUX FOR RENESAS H8/300 (H8300) 6257UCLINUX FOR RENESAS H8/300 (H8300)
6254M: Yoshinori Sato <ysato@users.sourceforge.jp> 6258M: Yoshinori Sato <ysato@users.sourceforge.jp>
@@ -6618,7 +6622,7 @@ F: fs/hostfs/
6618F: fs/hppfs/ 6622F: fs/hppfs/
6619 6623
6620USERSPACE I/O (UIO) 6624USERSPACE I/O (UIO)
6621M: "Hans J. Koch" <hjk@linutronix.de> 6625M: "Hans J. Koch" <hjk@hansjkoch.de>
6622M: Greg Kroah-Hartman <gregkh@suse.de> 6626M: Greg Kroah-Hartman <gregkh@suse.de>
6623S: Maintained 6627S: Maintained
6624F: Documentation/DocBook/uio-howto.tmpl 6628F: Documentation/DocBook/uio-howto.tmpl
diff --git a/Makefile b/Makefile
index 322e7334ccb9..b967b967572b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 39 3SUBLEVEL = 39
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc4
5NAME = Flesh-Eating Bats with Fangs 5NAME = Flesh-Eating Bats with Fangs
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index 9bb7b858ed23..7a6d908bb865 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -4,7 +4,7 @@
4 4
5extra-y := head.o vmlinux.lds 5extra-y := head.o vmlinux.lds
6asflags-y := $(KBUILD_CFLAGS) 6asflags-y := $(KBUILD_CFLAGS)
7ccflags-y := -Werror -Wno-sign-compare 7ccflags-y := -Wno-sign-compare
8 8
9obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ 9obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
10 irq_alpha.o signal.o setup.o ptrace.o time.o \ 10 irq_alpha.o signal.o setup.o ptrace.o time.o \
diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c
index 381fec0af52e..da7bcc372f16 100644
--- a/arch/alpha/kernel/core_mcpcia.c
+++ b/arch/alpha/kernel/core_mcpcia.c
@@ -88,7 +88,7 @@ conf_read(unsigned long addr, unsigned char type1,
88{ 88{
89 unsigned long flags; 89 unsigned long flags;
90 unsigned long mid = MCPCIA_HOSE2MID(hose->index); 90 unsigned long mid = MCPCIA_HOSE2MID(hose->index);
91 unsigned int stat0, value, temp, cpu; 91 unsigned int stat0, value, cpu;
92 92
93 cpu = smp_processor_id(); 93 cpu = smp_processor_id();
94 94
@@ -101,7 +101,7 @@ conf_read(unsigned long addr, unsigned char type1,
101 stat0 = *(vuip)MCPCIA_CAP_ERR(mid); 101 stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
102 *(vuip)MCPCIA_CAP_ERR(mid) = stat0; 102 *(vuip)MCPCIA_CAP_ERR(mid) = stat0;
103 mb(); 103 mb();
104 temp = *(vuip)MCPCIA_CAP_ERR(mid); 104 *(vuip)MCPCIA_CAP_ERR(mid);
105 DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0)); 105 DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0));
106 106
107 mb(); 107 mb();
@@ -136,7 +136,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1,
136{ 136{
137 unsigned long flags; 137 unsigned long flags;
138 unsigned long mid = MCPCIA_HOSE2MID(hose->index); 138 unsigned long mid = MCPCIA_HOSE2MID(hose->index);
139 unsigned int stat0, temp, cpu; 139 unsigned int stat0, cpu;
140 140
141 cpu = smp_processor_id(); 141 cpu = smp_processor_id();
142 142
@@ -145,7 +145,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1,
145 /* Reset status register to avoid losing errors. */ 145 /* Reset status register to avoid losing errors. */
146 stat0 = *(vuip)MCPCIA_CAP_ERR(mid); 146 stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
147 *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); 147 *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb();
148 temp = *(vuip)MCPCIA_CAP_ERR(mid); 148 *(vuip)MCPCIA_CAP_ERR(mid);
149 DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0)); 149 DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0));
150 150
151 draina(); 151 draina();
@@ -157,7 +157,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1,
157 *((vuip)addr) = value; 157 *((vuip)addr) = value;
158 mb(); 158 mb();
159 mb(); /* magic */ 159 mb(); /* magic */
160 temp = *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ 160 *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */
161 mcheck_expected(cpu) = 0; 161 mcheck_expected(cpu) = 0;
162 mb(); 162 mb();
163 163
@@ -572,12 +572,10 @@ mcpcia_print_system_area(unsigned long la_ptr)
572void 572void
573mcpcia_machine_check(unsigned long vector, unsigned long la_ptr) 573mcpcia_machine_check(unsigned long vector, unsigned long la_ptr)
574{ 574{
575 struct el_common *mchk_header;
576 struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; 575 struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout;
577 unsigned int cpu = smp_processor_id(); 576 unsigned int cpu = smp_processor_id();
578 int expected; 577 int expected;
579 578
580 mchk_header = (struct el_common *)la_ptr;
581 mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; 579 mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr;
582 expected = mcheck_expected(cpu); 580 expected = mcheck_expected(cpu);
583 581
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c
index c3b3781a03de..14b26c466c89 100644
--- a/arch/alpha/kernel/err_titan.c
+++ b/arch/alpha/kernel/err_titan.c
@@ -533,8 +533,6 @@ static struct el_subpacket_annotation el_titan_annotations[] = {
533static struct el_subpacket * 533static struct el_subpacket *
534el_process_regatta_subpacket(struct el_subpacket *header) 534el_process_regatta_subpacket(struct el_subpacket *header)
535{ 535{
536 int status;
537
538 if (header->class != EL_CLASS__REGATTA_FAMILY) { 536 if (header->class != EL_CLASS__REGATTA_FAMILY) {
539 printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", 537 printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n",
540 err_print_prefix, 538 err_print_prefix,
@@ -551,7 +549,7 @@ el_process_regatta_subpacket(struct el_subpacket *header)
551 printk("%s ** Occurred on CPU %d:\n", 549 printk("%s ** Occurred on CPU %d:\n",
552 err_print_prefix, 550 err_print_prefix,
553 (int)header->by_type.regatta_frame.cpuid); 551 (int)header->by_type.regatta_frame.cpuid);
554 status = privateer_process_logout_frame((struct el_common *) 552 privateer_process_logout_frame((struct el_common *)
555 header->by_type.regatta_frame.data_start, 1); 553 header->by_type.regatta_frame.data_start, 1);
556 break; 554 break;
557 default: 555 default:
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 1479dc6ebd97..51b7fbd9e4c1 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -228,7 +228,7 @@ struct irqaction timer_irqaction = {
228void __init 228void __init
229init_rtc_irq(void) 229init_rtc_irq(void)
230{ 230{
231 irq_set_chip_and_handler_name(RTC_IRQ, &no_irq_chip, 231 irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip,
232 handle_simple_irq, "RTC"); 232 handle_simple_irq, "RTC");
233 setup_irq(RTC_IRQ, &timer_irqaction); 233 setup_irq(RTC_IRQ, &timer_irqaction);
234} 234}
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index d2634e4476b4..edbddcbd5bc6 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -1404,8 +1404,6 @@ determine_cpu_caches (unsigned int cpu_type)
1404 case PCA56_CPU: 1404 case PCA56_CPU:
1405 case PCA57_CPU: 1405 case PCA57_CPU:
1406 { 1406 {
1407 unsigned long cbox_config, size;
1408
1409 if (cpu_type == PCA56_CPU) { 1407 if (cpu_type == PCA56_CPU) {
1410 L1I = CSHAPE(16*1024, 6, 1); 1408 L1I = CSHAPE(16*1024, 6, 1);
1411 L1D = CSHAPE(8*1024, 5, 1); 1409 L1D = CSHAPE(8*1024, 5, 1);
@@ -1415,10 +1413,12 @@ determine_cpu_caches (unsigned int cpu_type)
1415 } 1413 }
1416 L3 = -1; 1414 L3 = -1;
1417 1415
1416#if 0
1417 unsigned long cbox_config, size;
1418
1418 cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); 1419 cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
1419 size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); 1420 size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
1420 1421
1421#if 0
1422 L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); 1422 L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
1423#else 1423#else
1424 L2 = external_cache_probe(512*1024, 6); 1424 L2 = external_cache_probe(512*1024, 6);
diff --git a/arch/alpha/kernel/smc37c93x.c b/arch/alpha/kernel/smc37c93x.c
index 3e6a2893af9f..6886b834f487 100644
--- a/arch/alpha/kernel/smc37c93x.c
+++ b/arch/alpha/kernel/smc37c93x.c
@@ -79,7 +79,6 @@
79static unsigned long __init SMCConfigState(unsigned long baseAddr) 79static unsigned long __init SMCConfigState(unsigned long baseAddr)
80{ 80{
81 unsigned char devId; 81 unsigned char devId;
82 unsigned char devRev;
83 82
84 unsigned long configPort; 83 unsigned long configPort;
85 unsigned long indexPort; 84 unsigned long indexPort;
@@ -100,7 +99,7 @@ static unsigned long __init SMCConfigState(unsigned long baseAddr)
100 devId = inb(dataPort); 99 devId = inb(dataPort);
101 if (devId == VALID_DEVICE_ID) { 100 if (devId == VALID_DEVICE_ID) {
102 outb(DEVICE_REV, indexPort); 101 outb(DEVICE_REV, indexPort);
103 devRev = inb(dataPort); 102 /* unsigned char devRev = */ inb(dataPort);
104 break; 103 break;
105 } 104 }
106 else 105 else
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index d3cb28bb8eb0..d92cdc715c65 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -156,7 +156,6 @@ static void __init
156wildfire_init_irq_per_pca(int qbbno, int pcano) 156wildfire_init_irq_per_pca(int qbbno, int pcano)
157{ 157{
158 int i, irq_bias; 158 int i, irq_bias;
159 unsigned long io_bias;
160 static struct irqaction isa_enable = { 159 static struct irqaction isa_enable = {
161 .handler = no_action, 160 .handler = no_action,
162 .name = "isa_enable", 161 .name = "isa_enable",
@@ -165,10 +164,12 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
165 irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) 164 irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA)
166 + pcano * WILDFIRE_IRQ_PER_PCA; 165 + pcano * WILDFIRE_IRQ_PER_PCA;
167 166
167#if 0
168 unsigned long io_bias;
169
168 /* Only need the following for first PCI bus per PCA. */ 170 /* Only need the following for first PCI bus per PCA. */
169 io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS; 171 io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS;
170 172
171#if 0
172 outb(0, DMA1_RESET_REG + io_bias); 173 outb(0, DMA1_RESET_REG + io_bias);
173 outb(0, DMA2_RESET_REG + io_bias); 174 outb(0, DMA2_RESET_REG + io_bias);
174 outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias); 175 outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias);
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index a58e84f1a63b..918e8e0b72ff 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -153,6 +153,7 @@ void read_persistent_clock(struct timespec *ts)
153 year += 100; 153 year += 100;
154 154
155 ts->tv_sec = mktime(year, mon, day, hour, min, sec); 155 ts->tv_sec = mktime(year, mon, day, hour, min, sec);
156 ts->tv_nsec = 0;
156} 157}
157 158
158 159
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index fdc9d4dbf85b..377a7a595b08 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1540,7 +1540,6 @@ config HIGHMEM
1540config HIGHPTE 1540config HIGHPTE
1541 bool "Allocate 2nd-level pagetables from highmem" 1541 bool "Allocate 2nd-level pagetables from highmem"
1542 depends on HIGHMEM 1542 depends on HIGHMEM
1543 depends on !OUTER_CACHE
1544 1543
1545config HW_PERF_EVENTS 1544config HW_PERF_EVENTS
1546 bool "Enable hardware performance counter support for perf events" 1545 bool "Enable hardware performance counter support for perf events"
@@ -2012,6 +2011,8 @@ source "kernel/power/Kconfig"
2012 2011
2013config ARCH_SUSPEND_POSSIBLE 2012config ARCH_SUSPEND_POSSIBLE
2014 depends on !ARCH_S5P64X0 && !ARCH_S5P6442 2013 depends on !ARCH_S5P64X0 && !ARCH_S5P6442
2014 depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \
2015 CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE
2015 def_bool y 2016 def_bool y
2016 2017
2017endmenu 2018endmenu
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 494224a9b459..03d01d783e3b 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -63,17 +63,6 @@ config DEBUG_USER
63 8 - SIGSEGV faults 63 8 - SIGSEGV faults
64 16 - SIGBUS faults 64 16 - SIGBUS faults
65 65
66config DEBUG_ERRORS
67 bool "Verbose kernel error messages"
68 depends on DEBUG_KERNEL
69 help
70 This option controls verbose debugging information which can be
71 printed when the kernel detects an internal error. This debugging
72 information is useful to kernel hackers when tracking down problems,
73 but mostly meaningless to other people. It's safe to say Y unless
74 you are concerned with the code size or don't want to see these
75 messages.
76
77config DEBUG_STACK_USAGE 66config DEBUG_STACK_USAGE
78 bool "Enable stack utilization instrumentation" 67 bool "Enable stack utilization instrumentation"
79 depends on DEBUG_KERNEL 68 depends on DEBUG_KERNEL
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index e7521bca2c35..6ea9b6f3607a 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -16,5 +16,4 @@ obj-$(CONFIG_SHARP_SCOOP) += scoop.o
16obj-$(CONFIG_ARCH_IXP2000) += uengine.o 16obj-$(CONFIG_ARCH_IXP2000) += uengine.o
17obj-$(CONFIG_ARCH_IXP23XX) += uengine.o 17obj-$(CONFIG_ARCH_IXP23XX) += uengine.o
18obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o 18obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
19obj-$(CONFIG_COMMON_CLKDEV) += clkdev.o
20obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o 19obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
diff --git a/arch/arm/include/asm/thread_notify.h b/arch/arm/include/asm/thread_notify.h
index c4391ba20350..1dc980675894 100644
--- a/arch/arm/include/asm/thread_notify.h
+++ b/arch/arm/include/asm/thread_notify.h
@@ -43,6 +43,7 @@ static inline void thread_notify(unsigned long rc, struct thread_info *thread)
43#define THREAD_NOTIFY_FLUSH 0 43#define THREAD_NOTIFY_FLUSH 0
44#define THREAD_NOTIFY_EXIT 1 44#define THREAD_NOTIFY_EXIT 1
45#define THREAD_NOTIFY_SWITCH 2 45#define THREAD_NOTIFY_SWITCH 2
46#define THREAD_NOTIFY_COPY 3
46 47
47#endif 48#endif
48#endif 49#endif
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 74554f1742d7..8d95446150a3 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
29obj-$(CONFIG_ARTHUR) += arthur.o 29obj-$(CONFIG_ARTHUR) += arthur.o
30obj-$(CONFIG_ISA_DMA) += dma-isa.o 30obj-$(CONFIG_ISA_DMA) += dma-isa.o
31obj-$(CONFIG_PCI) += bios32.o isa.o 31obj-$(CONFIG_PCI) += bios32.o isa.o
32obj-$(CONFIG_PM) += sleep.o 32obj-$(CONFIG_PM_SLEEP) += sleep.o
33obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o 33obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
34obj-$(CONFIG_SMP) += smp.o smp_tlb.o 34obj-$(CONFIG_SMP) += smp.o smp_tlb.o
35obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o 35obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index d4a0da1e48f4..9b05c6a0dcea 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -40,15 +40,22 @@ EXPORT_SYMBOL(elf_check_arch);
40void elf_set_personality(const struct elf32_hdr *x) 40void elf_set_personality(const struct elf32_hdr *x)
41{ 41{
42 unsigned int eflags = x->e_flags; 42 unsigned int eflags = x->e_flags;
43 unsigned int personality = PER_LINUX_32BIT; 43 unsigned int personality = current->personality & ~PER_MASK;
44
45 /*
46 * We only support Linux ELF executables, so always set the
47 * personality to LINUX.
48 */
49 personality |= PER_LINUX;
44 50
45 /* 51 /*
46 * APCS-26 is only valid for OABI executables 52 * APCS-26 is only valid for OABI executables
47 */ 53 */
48 if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) { 54 if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN &&
49 if (eflags & EF_ARM_APCS_26) 55 (eflags & EF_ARM_APCS_26))
50 personality = PER_LINUX; 56 personality &= ~ADDR_LIMIT_32BIT;
51 } 57 else
58 personality |= ADDR_LIMIT_32BIT;
52 59
53 set_personality(personality); 60 set_personality(personality);
54 61
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 8dbc126f7152..87acc25d7a3e 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -868,6 +868,13 @@ static void reset_ctrl_regs(void *info)
868 */ 868 */
869 asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); 869 asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
870 isb(); 870 isb();
871
872 /*
873 * Clear any configured vector-catch events before
874 * enabling monitor mode.
875 */
876 asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
877 isb();
871 } 878 }
872 879
873 if (enable_monitor_mode()) 880 if (enable_monitor_mode())
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 69cfee0fe00f..979da3947f42 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -221,7 +221,7 @@ again:
221 prev_raw_count &= armpmu->max_period; 221 prev_raw_count &= armpmu->max_period;
222 222
223 if (overflow) 223 if (overflow)
224 delta = armpmu->max_period - prev_raw_count + new_raw_count; 224 delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
225 else 225 else
226 delta = new_raw_count - prev_raw_count; 226 delta = new_raw_count - prev_raw_count;
227 227
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 94bbedbed639..5e1e54197227 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -372,6 +372,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
372 if (clone_flags & CLONE_SETTLS) 372 if (clone_flags & CLONE_SETTLS)
373 thread->tp_value = regs->ARM_r3; 373 thread->tp_value = regs->ARM_r3;
374 374
375 thread_notify(THREAD_NOTIFY_COPY, thread);
376
375 return 0; 377 return 0;
376} 378}
377 379
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index f0000e188c8c..3b54ad19d489 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -410,8 +410,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
410 struct thread_info *thread = current_thread_info(); 410 struct thread_info *thread = current_thread_info();
411 siginfo_t info; 411 siginfo_t info;
412 412
413 if (current->personality != PER_LINUX && 413 if ((current->personality & PER_MASK) != PER_LINUX &&
414 current->personality != PER_LINUX_32BIT &&
415 thread->exec_domain->handler) { 414 thread->exec_domain->handler) {
416 thread->exec_domain->handler(n, regs); 415 thread->exec_domain->handler(n, regs);
417 return regs->ARM_r0; 416 return regs->ARM_r0;
diff --git a/arch/arm/mach-mmp/include/mach/gpio.h b/arch/arm/mach-mmp/include/mach/gpio.h
index ee8b02ed8011..7bfb827f3fe3 100644
--- a/arch/arm/mach-mmp/include/mach/gpio.h
+++ b/arch/arm/mach-mmp/include/mach/gpio.h
@@ -10,7 +10,7 @@
10#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) 10#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
11#define GPIO_REG(x) (*((volatile u32 *)(GPIO_REGS_VIRT + (x)))) 11#define GPIO_REG(x) (*((volatile u32 *)(GPIO_REGS_VIRT + (x))))
12 12
13#define NR_BUILTIN_GPIO (192) 13#define NR_BUILTIN_GPIO IRQ_GPIO_NUM
14 14
15#define gpio_to_bank(gpio) ((gpio) >> 5) 15#define gpio_to_bank(gpio) ((gpio) >> 5)
16#define gpio_to_irq(gpio) (IRQ_GPIO_START + (gpio)) 16#define gpio_to_irq(gpio) (IRQ_GPIO_START + (gpio))
diff --git a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h
index 4621067c7720..713be155a44d 100644
--- a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h
+++ b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h
@@ -8,6 +8,15 @@
8#define MFP_DRIVE_MEDIUM (0x2 << 13) 8#define MFP_DRIVE_MEDIUM (0x2 << 13)
9#define MFP_DRIVE_FAST (0x3 << 13) 9#define MFP_DRIVE_FAST (0x3 << 13)
10 10
11#undef MFP_CFG
12#undef MFP_CFG_DRV
13
14#define MFP_CFG(pin, af) \
15 (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_MEDIUM)
16
17#define MFP_CFG_DRV(pin, af, drv) \
18 (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_##drv)
19
11/* GPIO */ 20/* GPIO */
12#define GPIO0_GPIO MFP_CFG(GPIO0, AF5) 21#define GPIO0_GPIO MFP_CFG(GPIO0, AF5)
13#define GPIO1_GPIO MFP_CFG(GPIO1, AF5) 22#define GPIO1_GPIO MFP_CFG(GPIO1, AF5)
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
index 7f568611547e..6a96911b0ad5 100644
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -160,10 +160,7 @@ static struct msm_mmc_platform_data qsd8x50_sdc1_data = {
160 160
161static void __init qsd8x50_init_mmc(void) 161static void __init qsd8x50_init_mmc(void)
162{ 162{
163 if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa()) 163 vreg_mmc = vreg_get(NULL, "gp5");
164 vreg_mmc = vreg_get(NULL, "gp6");
165 else
166 vreg_mmc = vreg_get(NULL, "gp5");
167 164
168 if (IS_ERR(vreg_mmc)) { 165 if (IS_ERR(vreg_mmc)) {
169 pr_err("vreg get for vreg_mmc failed (%ld)\n", 166 pr_err("vreg get for vreg_mmc failed (%ld)\n",
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 56f920c55b6a..38b95e949d13 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -269,7 +269,7 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
269 269
270 /* Use existing clock_event for cpu 0 */ 270 /* Use existing clock_event for cpu 0 */
271 if (!smp_processor_id()) 271 if (!smp_processor_id())
272 return; 272 return 0;
273 273
274 writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL); 274 writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
275 275
diff --git a/arch/arm/mach-pxa/include/mach/gpio.h b/arch/arm/mach-pxa/include/mach/gpio.h
index b024a8b37439..c4639502efca 100644
--- a/arch/arm/mach-pxa/include/mach/gpio.h
+++ b/arch/arm/mach-pxa/include/mach/gpio.h
@@ -99,11 +99,24 @@
99#define GAFR(x) GPIO_REG(0x54 + (((x) & 0x70) >> 2)) 99#define GAFR(x) GPIO_REG(0x54 + (((x) & 0x70) >> 2))
100 100
101 101
102#define NR_BUILTIN_GPIO 128 102#define NR_BUILTIN_GPIO PXA_GPIO_IRQ_NUM
103 103
104#define gpio_to_bank(gpio) ((gpio) >> 5) 104#define gpio_to_bank(gpio) ((gpio) >> 5)
105#define gpio_to_irq(gpio) IRQ_GPIO(gpio) 105#define gpio_to_irq(gpio) IRQ_GPIO(gpio)
106#define irq_to_gpio(irq) IRQ_TO_GPIO(irq) 106
107static inline int irq_to_gpio(unsigned int irq)
108{
109 int gpio;
110
111 if (irq == IRQ_GPIO0 || irq == IRQ_GPIO1)
112 return irq - IRQ_GPIO0;
113
114 gpio = irq - PXA_GPIO_IRQ_BASE;
115 if (gpio >= 2 && gpio < NR_BUILTIN_GPIO)
116 return gpio;
117
118 return -1;
119}
107 120
108#ifdef CONFIG_CPU_PXA26x 121#ifdef CONFIG_CPU_PXA26x
109/* GPIO86/87/88/89 on PXA26x have their direction bits in GPDR2 inverted, 122/* GPIO86/87/88/89 on PXA26x have their direction bits in GPDR2 inverted,
diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h
index a4285fc00878..038402404e39 100644
--- a/arch/arm/mach-pxa/include/mach/irqs.h
+++ b/arch/arm/mach-pxa/include/mach/irqs.h
@@ -93,9 +93,6 @@
93#define GPIO_2_x_TO_IRQ(x) (PXA_GPIO_IRQ_BASE + (x)) 93#define GPIO_2_x_TO_IRQ(x) (PXA_GPIO_IRQ_BASE + (x))
94#define IRQ_GPIO(x) (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_x_TO_IRQ(x)) 94#define IRQ_GPIO(x) (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_x_TO_IRQ(x))
95 95
96#define IRQ_TO_GPIO_2_x(i) ((i) - PXA_GPIO_IRQ_BASE)
97#define IRQ_TO_GPIO(i) (((i) < IRQ_GPIO(2)) ? ((i) - IRQ_GPIO0) : IRQ_TO_GPIO_2_x(i))
98
99/* 96/*
100 * The following interrupts are for board specific purposes. Since 97 * The following interrupts are for board specific purposes. Since
101 * the kernel can only run on one machine at a time, we can re-use 98 * the kernel can only run on one machine at a time, we can re-use
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index 6bde5956358d..a4af8c52d7ee 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -285,7 +285,7 @@ static inline void pxa25x_init_pm(void) {}
285 285
286static int pxa25x_set_wake(struct irq_data *d, unsigned int on) 286static int pxa25x_set_wake(struct irq_data *d, unsigned int on)
287{ 287{
288 int gpio = IRQ_TO_GPIO(d->irq); 288 int gpio = irq_to_gpio(d->irq);
289 uint32_t mask = 0; 289 uint32_t mask = 0;
290 290
291 if (gpio >= 0 && gpio < 85) 291 if (gpio >= 0 && gpio < 85)
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 1cb5d0f9723f..909756eaf4b7 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -345,7 +345,7 @@ static inline void pxa27x_init_pm(void) {}
345 */ 345 */
346static int pxa27x_set_wake(struct irq_data *d, unsigned int on) 346static int pxa27x_set_wake(struct irq_data *d, unsigned int on)
347{ 347{
348 int gpio = IRQ_TO_GPIO(d->irq); 348 int gpio = irq_to_gpio(d->irq);
349 uint32_t mask; 349 uint32_t mask;
350 350
351 if (gpio >= 0 && gpio < 128) 351 if (gpio >= 0 && gpio < 128)
diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c
index 76a3f654220f..65a1aba6823d 100644
--- a/arch/arm/mach-tegra/gpio.c
+++ b/arch/arm/mach-tegra/gpio.c
@@ -257,7 +257,8 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
257void tegra_gpio_resume(void) 257void tegra_gpio_resume(void)
258{ 258{
259 unsigned long flags; 259 unsigned long flags;
260 int b, p, i; 260 int b;
261 int p;
261 262
262 local_irq_save(flags); 263 local_irq_save(flags);
263 264
@@ -280,7 +281,8 @@ void tegra_gpio_resume(void)
280void tegra_gpio_suspend(void) 281void tegra_gpio_suspend(void)
281{ 282{
282 unsigned long flags; 283 unsigned long flags;
283 int b, p, i; 284 int b;
285 int p;
284 286
285 local_irq_save(flags); 287 local_irq_save(flags);
286 for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { 288 for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c
index 6d7c4eea4dcb..4459470c052d 100644
--- a/arch/arm/mach-tegra/tegra2_clocks.c
+++ b/arch/arm/mach-tegra/tegra2_clocks.c
@@ -1362,14 +1362,15 @@ static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate)
1362{ 1362{
1363 unsigned long flags; 1363 unsigned long flags;
1364 int ret; 1364 int ret;
1365 long new_rate = rate;
1365 1366
1366 rate = clk_round_rate(c->parent, rate); 1367 new_rate = clk_round_rate(c->parent, new_rate);
1367 if (rate < 0) 1368 if (new_rate < 0)
1368 return rate; 1369 return new_rate;
1369 1370
1370 spin_lock_irqsave(&c->parent->spinlock, flags); 1371 spin_lock_irqsave(&c->parent->spinlock, flags);
1371 1372
1372 c->u.shared_bus_user.rate = rate; 1373 c->u.shared_bus_user.rate = new_rate;
1373 ret = tegra_clk_shared_bus_update(c->parent); 1374 ret = tegra_clk_shared_bus_update(c->parent);
1374 1375
1375 spin_unlock_irqrestore(&c->parent->spinlock, flags); 1376 spin_unlock_irqrestore(&c->parent->spinlock, flags);
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index afe209e1e1f8..74be05f3e03a 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -7,6 +7,7 @@
7#include <linux/shm.h> 7#include <linux/shm.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/personality.h>
10#include <linux/random.h> 11#include <linux/random.h>
11#include <asm/cputype.h> 12#include <asm/cputype.h>
12#include <asm/system.h> 13#include <asm/system.h>
@@ -82,7 +83,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
82 mm->cached_hole_size = 0; 83 mm->cached_hole_size = 0;
83 } 84 }
84 /* 8 bits of randomness in 20 address space bits */ 85 /* 8 bits of randomness in 20 address space bits */
85 if (current->flags & PF_RANDOMIZE) 86 if ((current->flags & PF_RANDOMIZE) &&
87 !(current->personality & ADDR_NO_RANDOMIZE))
86 addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; 88 addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
87 89
88full_search: 90full_search:
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index b46eb21f05c7..bf8a1d1cccb6 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -390,7 +390,7 @@ ENTRY(cpu_arm920_set_pte_ext)
390/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 390/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
391.globl cpu_arm920_suspend_size 391.globl cpu_arm920_suspend_size
392.equ cpu_arm920_suspend_size, 4 * 3 392.equ cpu_arm920_suspend_size, 4 * 3
393#ifdef CONFIG_PM 393#ifdef CONFIG_PM_SLEEP
394ENTRY(cpu_arm920_do_suspend) 394ENTRY(cpu_arm920_do_suspend)
395 stmfd sp!, {r4 - r7, lr} 395 stmfd sp!, {r4 - r7, lr}
396 mrc p15, 0, r4, c13, c0, 0 @ PID 396 mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 6a4bdb2c94a7..0ed85d930c09 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -404,7 +404,7 @@ ENTRY(cpu_arm926_set_pte_ext)
404/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 404/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
405.globl cpu_arm926_suspend_size 405.globl cpu_arm926_suspend_size
406.equ cpu_arm926_suspend_size, 4 * 3 406.equ cpu_arm926_suspend_size, 4 * 3
407#ifdef CONFIG_PM 407#ifdef CONFIG_PM_SLEEP
408ENTRY(cpu_arm926_do_suspend) 408ENTRY(cpu_arm926_do_suspend)
409 stmfd sp!, {r4 - r7, lr} 409 stmfd sp!, {r4 - r7, lr}
410 mrc p15, 0, r4, c13, c0, 0 @ PID 410 mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 74483d1977fe..184a9c997e36 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -171,7 +171,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
171 171
172.globl cpu_sa1100_suspend_size 172.globl cpu_sa1100_suspend_size
173.equ cpu_sa1100_suspend_size, 4*4 173.equ cpu_sa1100_suspend_size, 4*4
174#ifdef CONFIG_PM 174#ifdef CONFIG_PM_SLEEP
175ENTRY(cpu_sa1100_do_suspend) 175ENTRY(cpu_sa1100_do_suspend)
176 stmfd sp!, {r4 - r7, lr} 176 stmfd sp!, {r4 - r7, lr}
177 mrc p15, 0, r4, c3, c0, 0 @ domain ID 177 mrc p15, 0, r4, c3, c0, 0 @ domain ID
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index bfa0c9f611c5..7c99cb4c8e4f 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -124,7 +124,7 @@ ENTRY(cpu_v6_set_pte_ext)
124/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ 124/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
125.globl cpu_v6_suspend_size 125.globl cpu_v6_suspend_size
126.equ cpu_v6_suspend_size, 4 * 8 126.equ cpu_v6_suspend_size, 4 * 8
127#ifdef CONFIG_PM 127#ifdef CONFIG_PM_SLEEP
128ENTRY(cpu_v6_do_suspend) 128ENTRY(cpu_v6_do_suspend)
129 stmfd sp!, {r4 - r11, lr} 129 stmfd sp!, {r4 - r11, lr}
130 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 130 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index c35618e42f6f..babfba09c89f 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -211,7 +211,7 @@ cpu_v7_name:
211/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ 211/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
212.globl cpu_v7_suspend_size 212.globl cpu_v7_suspend_size
213.equ cpu_v7_suspend_size, 4 * 8 213.equ cpu_v7_suspend_size, 4 * 8
214#ifdef CONFIG_PM 214#ifdef CONFIG_PM_SLEEP
215ENTRY(cpu_v7_do_suspend) 215ENTRY(cpu_v7_do_suspend)
216 stmfd sp!, {r4 - r11, lr} 216 stmfd sp!, {r4 - r11, lr}
217 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 217 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 63d8b2044e84..596213699f37 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -417,7 +417,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
417 417
418.globl cpu_xsc3_suspend_size 418.globl cpu_xsc3_suspend_size
419.equ cpu_xsc3_suspend_size, 4 * 8 419.equ cpu_xsc3_suspend_size, 4 * 8
420#ifdef CONFIG_PM 420#ifdef CONFIG_PM_SLEEP
421ENTRY(cpu_xsc3_do_suspend) 421ENTRY(cpu_xsc3_do_suspend)
422 stmfd sp!, {r4 - r10, lr} 422 stmfd sp!, {r4 - r10, lr}
423 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 423 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 086038cd86ab..ce233bcbf506 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -518,7 +518,7 @@ ENTRY(cpu_xscale_set_pte_ext)
518 518
519.globl cpu_xscale_suspend_size 519.globl cpu_xscale_suspend_size
520.equ cpu_xscale_suspend_size, 4 * 7 520.equ cpu_xscale_suspend_size, 4 * 7
521#ifdef CONFIG_PM 521#ifdef CONFIG_PM_SLEEP
522ENTRY(cpu_xscale_do_suspend) 522ENTRY(cpu_xscale_do_suspend)
523 stmfd sp!, {r4 - r10, lr} 523 stmfd sp!, {r4 - r10, lr}
524 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 524 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/plat-s5p/pm.c b/arch/arm/plat-s5p/pm.c
index d592b6304b48..d15dc47b0e3d 100644
--- a/arch/arm/plat-s5p/pm.c
+++ b/arch/arm/plat-s5p/pm.c
@@ -19,17 +19,6 @@
19 19
20#define PFX "s5p pm: " 20#define PFX "s5p pm: "
21 21
22/* s3c_pm_check_resume_pin
23 *
24 * check to see if the pin is configured correctly for sleep mode, and
25 * make any necessary adjustments if it is not
26*/
27
28static void s3c_pm_check_resume_pin(unsigned int pin, unsigned int irqoffs)
29{
30 /* nothing here yet */
31}
32
33/* s3c_pm_configure_extint 22/* s3c_pm_configure_extint
34 * 23 *
35 * configure all external interrupt pins 24 * configure all external interrupt pins
diff --git a/arch/arm/plat-samsung/pm-check.c b/arch/arm/plat-samsung/pm-check.c
index e4baf76f374a..6b733fafe7cd 100644
--- a/arch/arm/plat-samsung/pm-check.c
+++ b/arch/arm/plat-samsung/pm-check.c
@@ -164,7 +164,6 @@ static inline int in_region(void *ptr, int size, void *what, size_t whatsz)
164 */ 164 */
165static u32 *s3c_pm_runcheck(struct resource *res, u32 *val) 165static u32 *s3c_pm_runcheck(struct resource *res, u32 *val)
166{ 166{
167 void *save_at = phys_to_virt(s3c_sleep_save_phys);
168 unsigned long addr; 167 unsigned long addr;
169 unsigned long left; 168 unsigned long left;
170 void *stkpage; 169 void *stkpage;
@@ -192,11 +191,6 @@ static u32 *s3c_pm_runcheck(struct resource *res, u32 *val)
192 goto skip_check; 191 goto skip_check;
193 } 192 }
194 193
195 if (in_region(ptr, left, save_at, 32*4 )) {
196 S3C_PMDBG("skipping %08lx, has save block in\n", addr);
197 goto skip_check;
198 }
199
200 /* calculate and check the checksum */ 194 /* calculate and check the checksum */
201 195
202 calc = crc32_le(~0, ptr, left); 196 calc = crc32_le(~0, ptr, left);
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index d5b58d31903c..5c0a440d6e16 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -214,8 +214,9 @@ void s3c_pm_do_restore_core(struct sleep_save *ptr, int count)
214 * 214 *
215 * print any IRQs asserted at resume time (ie, we woke from) 215 * print any IRQs asserted at resume time (ie, we woke from)
216*/ 216*/
217static void s3c_pm_show_resume_irqs(int start, unsigned long which, 217static void __maybe_unused s3c_pm_show_resume_irqs(int start,
218 unsigned long mask) 218 unsigned long which,
219 unsigned long mask)
219{ 220{
220 int i; 221 int i;
221 222
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index bbf3da012afd..f74695075e64 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -78,6 +78,14 @@ static void vfp_thread_exit(struct thread_info *thread)
78 put_cpu(); 78 put_cpu();
79} 79}
80 80
81static void vfp_thread_copy(struct thread_info *thread)
82{
83 struct thread_info *parent = current_thread_info();
84
85 vfp_sync_hwstate(parent);
86 thread->vfpstate = parent->vfpstate;
87}
88
81/* 89/*
82 * When this function is called with the following 'cmd's, the following 90 * When this function is called with the following 'cmd's, the following
83 * is true while this function is being run: 91 * is true while this function is being run:
@@ -104,12 +112,17 @@ static void vfp_thread_exit(struct thread_info *thread)
104static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) 112static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
105{ 113{
106 struct thread_info *thread = v; 114 struct thread_info *thread = v;
115 u32 fpexc;
116#ifdef CONFIG_SMP
117 unsigned int cpu;
118#endif
107 119
108 if (likely(cmd == THREAD_NOTIFY_SWITCH)) { 120 switch (cmd) {
109 u32 fpexc = fmrx(FPEXC); 121 case THREAD_NOTIFY_SWITCH:
122 fpexc = fmrx(FPEXC);
110 123
111#ifdef CONFIG_SMP 124#ifdef CONFIG_SMP
112 unsigned int cpu = thread->cpu; 125 cpu = thread->cpu;
113 126
114 /* 127 /*
115 * On SMP, if VFP is enabled, save the old state in 128 * On SMP, if VFP is enabled, save the old state in
@@ -134,13 +147,20 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
134 * old state. 147 * old state.
135 */ 148 */
136 fmxr(FPEXC, fpexc & ~FPEXC_EN); 149 fmxr(FPEXC, fpexc & ~FPEXC_EN);
137 return NOTIFY_DONE; 150 break;
138 }
139 151
140 if (cmd == THREAD_NOTIFY_FLUSH) 152 case THREAD_NOTIFY_FLUSH:
141 vfp_thread_flush(thread); 153 vfp_thread_flush(thread);
142 else 154 break;
155
156 case THREAD_NOTIFY_EXIT:
143 vfp_thread_exit(thread); 157 vfp_thread_exit(thread);
158 break;
159
160 case THREAD_NOTIFY_COPY:
161 vfp_thread_copy(thread);
162 break;
163 }
144 164
145 return NOTIFY_DONE; 165 return NOTIFY_DONE;
146} 166}
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h
index 19e2c7c3e63a..44bd0cced725 100644
--- a/arch/blackfin/include/asm/system.h
+++ b/arch/blackfin/include/asm/system.h
@@ -19,11 +19,11 @@
19 * Force strict CPU ordering. 19 * Force strict CPU ordering.
20 */ 20 */
21#define nop() __asm__ __volatile__ ("nop;\n\t" : : ) 21#define nop() __asm__ __volatile__ ("nop;\n\t" : : )
22#define mb() __asm__ __volatile__ ("" : : : "memory") 22#define smp_mb() mb()
23#define rmb() __asm__ __volatile__ ("" : : : "memory") 23#define smp_rmb() rmb()
24#define wmb() __asm__ __volatile__ ("" : : : "memory") 24#define smp_wmb() wmb()
25#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) 25#define set_mb(var, value) do { var = value; mb(); } while (0)
26#define read_barrier_depends() do { } while(0) 26#define smp_read_barrier_depends() read_barrier_depends()
27 27
28#ifdef CONFIG_SMP 28#ifdef CONFIG_SMP
29asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); 29asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value);
@@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr,
37 unsigned long new, unsigned long old); 37 unsigned long new, unsigned long old);
38 38
39#ifdef __ARCH_SYNC_CORE_DCACHE 39#ifdef __ARCH_SYNC_CORE_DCACHE
40# define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) 40/* Force Core data cache coherence */
41# define smp_rmb() do { barrier(); smp_check_barrier(); } while (0) 41# define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
42# define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0) 42# define rmb() do { barrier(); smp_check_barrier(); } while (0)
43#define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) 43# define wmb() do { barrier(); smp_mark_barrier(); } while (0)
44 44# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
45#else 45#else
46# define smp_mb() barrier() 46# define mb() barrier()
47# define smp_rmb() barrier() 47# define rmb() barrier()
48# define smp_wmb() barrier() 48# define wmb() barrier()
49#define smp_read_barrier_depends() barrier() 49# define read_barrier_depends() do { } while (0)
50#endif 50#endif
51 51
52static inline unsigned long __xchg(unsigned long x, volatile void *ptr, 52static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
@@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
99 99
100#else /* !CONFIG_SMP */ 100#else /* !CONFIG_SMP */
101 101
102#define smp_mb() barrier() 102#define mb() barrier()
103#define smp_rmb() barrier() 103#define rmb() barrier()
104#define smp_wmb() barrier() 104#define wmb() barrier()
105#define smp_read_barrier_depends() do { } while(0) 105#define read_barrier_depends() do { } while (0)
106 106
107struct __xchg_dummy { 107struct __xchg_dummy {
108 unsigned long a[100]; 108 unsigned long a[100];
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c
index cdbe075de1dc..8b81dc04488a 100644
--- a/arch/blackfin/kernel/gptimers.c
+++ b/arch/blackfin/kernel/gptimers.c
@@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask)
268 _disable_gptimers(mask); 268 _disable_gptimers(mask);
269 for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) 269 for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
270 if (mask & (1 << i)) 270 if (mask & (1 << i))
271 group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i]; 271 group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i];
272 SSYNC(); 272 SSYNC();
273} 273}
274EXPORT_SYMBOL(disable_gptimers); 274EXPORT_SYMBOL(disable_gptimers);
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 8c9a43daf80f..cdb4beb6bc8f 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -206,8 +206,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
206{ 206{
207 struct clock_event_device *evt = dev_id; 207 struct clock_event_device *evt = dev_id;
208 smp_mb(); 208 smp_mb();
209 evt->event_handler(evt); 209 /*
210 * We want to ACK before we handle so that we can handle smaller timer
211 * intervals. This way if the timer expires again while we're handling
212 * things, we're more likely to see that 2nd int rather than swallowing
213 * it by ACKing the int at the end of this handler.
214 */
210 bfin_gptmr0_ack(); 215 bfin_gptmr0_ack();
216 evt->event_handler(evt);
211 return IRQ_HANDLED; 217 return IRQ_HANDLED;
212} 218}
213 219
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 326bb86f4d29..1fbd94c44457 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -109,10 +109,23 @@ static void ipi_flush_icache(void *info)
109 struct blackfin_flush_data *fdata = info; 109 struct blackfin_flush_data *fdata = info;
110 110
111 /* Invalidate the memory holding the bounds of the flushed region. */ 111 /* Invalidate the memory holding the bounds of the flushed region. */
112 invalidate_dcache_range((unsigned long)fdata, 112 blackfin_dcache_invalidate_range((unsigned long)fdata,
113 (unsigned long)fdata + sizeof(*fdata)); 113 (unsigned long)fdata + sizeof(*fdata));
114
115 /* Make sure all write buffers in the data side of the core
116 * are flushed before trying to invalidate the icache. This
117 * needs to be after the data flush and before the icache
118 * flush so that the SSYNC does the right thing in preventing
119 * the instruction prefetcher from hitting things in cached
120 * memory at the wrong time -- it runs much further ahead than
121 * the pipeline.
122 */
123 SSYNC();
114 124
115 flush_icache_range(fdata->start, fdata->end); 125 /* ipi_flaush_icache is invoked by generic flush_icache_range,
126 * so call blackfin arch icache flush directly here.
127 */
128 blackfin_icache_flush_range(fdata->start, fdata->end);
116} 129}
117 130
118static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) 131static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 851b3bf6e962..eccdefe70d4e 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -6,7 +6,6 @@ config MICROBLAZE
6 select HAVE_FUNCTION_GRAPH_TRACER 6 select HAVE_FUNCTION_GRAPH_TRACER
7 select HAVE_DYNAMIC_FTRACE 7 select HAVE_DYNAMIC_FTRACE
8 select HAVE_FTRACE_MCOUNT_RECORD 8 select HAVE_FTRACE_MCOUNT_RECORD
9 select USB_ARCH_HAS_EHCI
10 select ARCH_WANT_OPTIONAL_GPIOLIB 9 select ARCH_WANT_OPTIONAL_GPIOLIB
11 select HAVE_OPROFILE 10 select HAVE_OPROFILE
12 select HAVE_ARCH_KGDB 11 select HAVE_ARCH_KGDB
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b6ff882f695b..8f4d50b0adfa 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -209,7 +209,7 @@ config ARCH_HIBERNATION_POSSIBLE
209config ARCH_SUSPEND_POSSIBLE 209config ARCH_SUSPEND_POSSIBLE
210 def_bool y 210 def_bool y
211 depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \ 211 depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
212 PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x 212 (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x
213 213
214config PPC_DCR_NATIVE 214config PPC_DCR_NATIVE
215 bool 215 bool
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index be3cdf9134ce..1833d1a07e79 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -382,10 +382,12 @@ extern const char *powerpc_base_platform;
382#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 382#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
383 CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ 383 CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
384 CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) 384 CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
385#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 385#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
386 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
387 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ 386 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
388 CPU_FTR_DBELL) 387 CPU_FTR_DBELL)
388#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
389 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
390 CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD)
389#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) 391#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
390 392
391/* 64-bit CPUs */ 393/* 64-bit CPUs */
@@ -435,11 +437,15 @@ extern const char *powerpc_base_platform;
435#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) 437#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
436 438
437#ifdef __powerpc64__ 439#ifdef __powerpc64__
440#ifdef CONFIG_PPC_BOOK3E
441#define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500)
442#else
438#define CPU_FTRS_POSSIBLE \ 443#define CPU_FTRS_POSSIBLE \
439 (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ 444 (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \
440 CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ 445 CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \
441 CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ 446 CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
442 CPU_FTR_1T_SEGMENT | CPU_FTR_VSX) 447 CPU_FTR_1T_SEGMENT | CPU_FTR_VSX)
448#endif
443#else 449#else
444enum { 450enum {
445 CPU_FTRS_POSSIBLE = 451 CPU_FTRS_POSSIBLE =
@@ -473,16 +479,21 @@ enum {
473#endif 479#endif
474#ifdef CONFIG_E500 480#ifdef CONFIG_E500
475 CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | 481 CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
482 CPU_FTRS_E5500 |
476#endif 483#endif
477 0, 484 0,
478}; 485};
479#endif /* __powerpc64__ */ 486#endif /* __powerpc64__ */
480 487
481#ifdef __powerpc64__ 488#ifdef __powerpc64__
489#ifdef CONFIG_PPC_BOOK3E
490#define CPU_FTRS_ALWAYS (CPU_FTRS_E5500)
491#else
482#define CPU_FTRS_ALWAYS \ 492#define CPU_FTRS_ALWAYS \
483 (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ 493 (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \
484 CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ 494 CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \
485 CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) 495 CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE)
496#endif
486#else 497#else
487enum { 498enum {
488 CPU_FTRS_ALWAYS = 499 CPU_FTRS_ALWAYS =
@@ -513,6 +524,7 @@ enum {
513#endif 524#endif
514#ifdef CONFIG_E500 525#ifdef CONFIG_E500
515 CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & 526 CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
527 CPU_FTRS_E5500 &
516#endif 528#endif
517 CPU_FTRS_POSSIBLE, 529 CPU_FTRS_POSSIBLE,
518}; 530};
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index 811f04ac3660..8d1569c29042 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
162 * on platforms where such control is possible. 162 * on platforms where such control is possible.
163 */ 163 */
164#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ 164#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
165 defined(CONFIG_KPROBES) 165 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
166#define PAGE_KERNEL_TEXT PAGE_KERNEL_X 166#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
167#else 167#else
168#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX 168#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index c9b68d07ac4f..b9602ee06deb 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1973,7 +1973,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1973 .pvr_mask = 0xffff0000, 1973 .pvr_mask = 0xffff0000,
1974 .pvr_value = 0x80240000, 1974 .pvr_value = 0x80240000,
1975 .cpu_name = "e5500", 1975 .cpu_name = "e5500",
1976 .cpu_features = CPU_FTRS_E500MC, 1976 .cpu_features = CPU_FTRS_E5500,
1977 .cpu_user_features = COMMON_USER_BOOKE, 1977 .cpu_user_features = COMMON_USER_BOOKE,
1978 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | 1978 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
1979 MMU_FTR_USE_TLBILX, 1979 MMU_FTR_USE_TLBILX,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 3d3d416339dd..5b5e1f002a8e 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -163,7 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu)
163} 163}
164 164
165/* wait for all the CPUs to hit real mode but timeout if they don't come in */ 165/* wait for all the CPUs to hit real mode but timeout if they don't come in */
166#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) 166#ifdef CONFIG_PPC_STD_MMU_64
167static void crash_kexec_wait_realmode(int cpu) 167static void crash_kexec_wait_realmode(int cpu)
168{ 168{
169 unsigned int msecs; 169 unsigned int msecs;
@@ -188,9 +188,7 @@ static void crash_kexec_wait_realmode(int cpu)
188 } 188 }
189 mb(); 189 mb();
190} 190}
191#else 191#endif /* CONFIG_PPC_STD_MMU_64 */
192static inline void crash_kexec_wait_realmode(int cpu) {}
193#endif
194 192
195/* 193/*
196 * This function will be called by secondary cpus or by kexec cpu 194 * This function will be called by secondary cpus or by kexec cpu
@@ -235,7 +233,9 @@ void crash_kexec_secondary(struct pt_regs *regs)
235 crash_ipi_callback(regs); 233 crash_ipi_callback(regs);
236} 234}
237 235
238#else 236#else /* ! CONFIG_SMP */
237static inline void crash_kexec_wait_realmode(int cpu) {}
238
239static void crash_kexec_prepare_cpus(int cpu) 239static void crash_kexec_prepare_cpus(int cpu)
240{ 240{
241 /* 241 /*
@@ -255,7 +255,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
255{ 255{
256 cpus_in_sr = CPU_MASK_NONE; 256 cpus_in_sr = CPU_MASK_NONE;
257} 257}
258#endif 258#endif /* CONFIG_SMP */
259 259
260/* 260/*
261 * Register a function to be called on shutdown. Only use this if you 261 * Register a function to be called on shutdown. Only use this if you
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index c834757bebc0..2b97b80d6d7d 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void)
330 if (!parent) 330 if (!parent)
331 continue; 331 continue;
332 if (of_match_node(legacy_serial_parents, parent) != NULL) { 332 if (of_match_node(legacy_serial_parents, parent) != NULL) {
333 index = add_legacy_soc_port(np, np); 333 if (of_device_is_available(np)) {
334 if (index >= 0 && np == stdout) 334 index = add_legacy_soc_port(np, np);
335 legacy_serial_console = index; 335 if (index >= 0 && np == stdout)
336 legacy_serial_console = index;
337 }
336 } 338 }
337 of_node_put(parent); 339 of_node_put(parent);
338 } 340 }
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index c4063b7f49a0..822f63008ae1 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
398 return 0; 398 return 0;
399} 399}
400 400
401static u64 check_and_compute_delta(u64 prev, u64 val)
402{
403 u64 delta = (val - prev) & 0xfffffffful;
404
405 /*
406 * POWER7 can roll back counter values, if the new value is smaller
407 * than the previous value it will cause the delta and the counter to
408 * have bogus values unless we rolled a counter over. If a coutner is
409 * rolled back, it will be smaller, but within 256, which is the maximum
410 * number of events to rollback at once. If we dectect a rollback
411 * return 0. This can lead to a small lack of precision in the
412 * counters.
413 */
414 if (prev > val && (prev - val) < 256)
415 delta = 0;
416
417 return delta;
418}
419
401static void power_pmu_read(struct perf_event *event) 420static void power_pmu_read(struct perf_event *event)
402{ 421{
403 s64 val, delta, prev; 422 s64 val, delta, prev;
@@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event)
416 prev = local64_read(&event->hw.prev_count); 435 prev = local64_read(&event->hw.prev_count);
417 barrier(); 436 barrier();
418 val = read_pmc(event->hw.idx); 437 val = read_pmc(event->hw.idx);
438 delta = check_and_compute_delta(prev, val);
439 if (!delta)
440 return;
419 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); 441 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
420 442
421 /* The counters are only 32 bits wide */
422 delta = (val - prev) & 0xfffffffful;
423 local64_add(delta, &event->count); 443 local64_add(delta, &event->count);
424 local64_sub(delta, &event->hw.period_left); 444 local64_sub(delta, &event->hw.period_left);
425} 445}
@@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
449 val = (event->hw.idx == 5) ? pmc5 : pmc6; 469 val = (event->hw.idx == 5) ? pmc5 : pmc6;
450 prev = local64_read(&event->hw.prev_count); 470 prev = local64_read(&event->hw.prev_count);
451 event->hw.idx = 0; 471 event->hw.idx = 0;
452 delta = (val - prev) & 0xfffffffful; 472 delta = check_and_compute_delta(prev, val);
453 local64_add(delta, &event->count); 473 if (delta)
474 local64_add(delta, &event->count);
454 } 475 }
455} 476}
456 477
@@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
458 unsigned long pmc5, unsigned long pmc6) 479 unsigned long pmc5, unsigned long pmc6)
459{ 480{
460 struct perf_event *event; 481 struct perf_event *event;
461 u64 val; 482 u64 val, prev;
462 int i; 483 int i;
463 484
464 for (i = 0; i < cpuhw->n_limited; ++i) { 485 for (i = 0; i < cpuhw->n_limited; ++i) {
465 event = cpuhw->limited_counter[i]; 486 event = cpuhw->limited_counter[i];
466 event->hw.idx = cpuhw->limited_hwidx[i]; 487 event->hw.idx = cpuhw->limited_hwidx[i];
467 val = (event->hw.idx == 5) ? pmc5 : pmc6; 488 val = (event->hw.idx == 5) ? pmc5 : pmc6;
468 local64_set(&event->hw.prev_count, val); 489 prev = local64_read(&event->hw.prev_count);
490 if (check_and_compute_delta(prev, val))
491 local64_set(&event->hw.prev_count, val);
469 perf_event_update_userpage(event); 492 perf_event_update_userpage(event);
470 } 493 }
471} 494}
@@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1197 1220
1198 /* we don't have to worry about interrupts here */ 1221 /* we don't have to worry about interrupts here */
1199 prev = local64_read(&event->hw.prev_count); 1222 prev = local64_read(&event->hw.prev_count);
1200 delta = (val - prev) & 0xfffffffful; 1223 delta = check_and_compute_delta(prev, val);
1201 local64_add(delta, &event->count); 1224 local64_add(delta, &event->count);
1202 1225
1203 /* 1226 /*
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 375480c56eb9..f33acfd872ad 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb)
229 u64 stolen = 0; 229 u64 stolen = 0;
230 u64 dtb; 230 u64 dtb;
231 231
232 if (!dtl)
233 return 0;
234
232 if (i == vpa->dtl_idx) 235 if (i == vpa->dtl_idx)
233 return 0; 236 return 0;
234 while (i < vpa->dtl_idx) { 237 while (i < vpa->dtl_idx) {
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index a830c5e80657..bc5f0dc6ae1e 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -842,6 +842,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
842 mpic_setup_this_cpu(); 842 mpic_setup_this_cpu();
843} 843}
844 844
845#ifdef CONFIG_PPC64
845#ifdef CONFIG_HOTPLUG_CPU 846#ifdef CONFIG_HOTPLUG_CPU
846static int smp_core99_cpu_notify(struct notifier_block *self, 847static int smp_core99_cpu_notify(struct notifier_block *self,
847 unsigned long action, void *hcpu) 848 unsigned long action, void *hcpu)
@@ -879,7 +880,6 @@ static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
879 880
880static void __init smp_core99_bringup_done(void) 881static void __init smp_core99_bringup_done(void)
881{ 882{
882#ifdef CONFIG_PPC64
883 extern void g5_phy_disable_cpu1(void); 883 extern void g5_phy_disable_cpu1(void);
884 884
885 /* Close i2c bus if it was used for tb sync */ 885 /* Close i2c bus if it was used for tb sync */
@@ -894,14 +894,14 @@ static void __init smp_core99_bringup_done(void)
894 set_cpu_present(1, false); 894 set_cpu_present(1, false);
895 g5_phy_disable_cpu1(); 895 g5_phy_disable_cpu1();
896 } 896 }
897#endif /* CONFIG_PPC64 */
898
899#ifdef CONFIG_HOTPLUG_CPU 897#ifdef CONFIG_HOTPLUG_CPU
900 register_cpu_notifier(&smp_core99_cpu_nb); 898 register_cpu_notifier(&smp_core99_cpu_nb);
901#endif 899#endif
900
902 if (ppc_md.progress) 901 if (ppc_md.progress)
903 ppc_md.progress("smp_core99_bringup_done", 0x349); 902 ppc_md.progress("smp_core99_bringup_done", 0x349);
904} 903}
904#endif /* CONFIG_PPC64 */
905 905
906#ifdef CONFIG_HOTPLUG_CPU 906#ifdef CONFIG_HOTPLUG_CPU
907 907
@@ -975,7 +975,9 @@ static void pmac_cpu_die(void)
975struct smp_ops_t core99_smp_ops = { 975struct smp_ops_t core99_smp_ops = {
976 .message_pass = smp_mpic_message_pass, 976 .message_pass = smp_mpic_message_pass,
977 .probe = smp_core99_probe, 977 .probe = smp_core99_probe,
978#ifdef CONFIG_PPC64
978 .bringup_done = smp_core99_bringup_done, 979 .bringup_done = smp_core99_bringup_done,
980#endif
979 .kick_cpu = smp_core99_kick_cpu, 981 .kick_cpu = smp_core99_kick_cpu,
980 .setup_cpu = smp_core99_setup_cpu, 982 .setup_cpu = smp_core99_setup_cpu,
981 .give_timebase = smp_core99_give_timebase, 983 .give_timebase = smp_core99_give_timebase,
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 000724149089..6c42cfde8415 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -287,14 +287,22 @@ static int alloc_dispatch_logs(void)
287 int cpu, ret; 287 int cpu, ret;
288 struct paca_struct *pp; 288 struct paca_struct *pp;
289 struct dtl_entry *dtl; 289 struct dtl_entry *dtl;
290 struct kmem_cache *dtl_cache;
290 291
291 if (!firmware_has_feature(FW_FEATURE_SPLPAR)) 292 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
292 return 0; 293 return 0;
293 294
295 dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
296 DISPATCH_LOG_BYTES, 0, NULL);
297 if (!dtl_cache) {
298 pr_warn("Failed to create dispatch trace log buffer cache\n");
299 pr_warn("Stolen time statistics will be unreliable\n");
300 return 0;
301 }
302
294 for_each_possible_cpu(cpu) { 303 for_each_possible_cpu(cpu) {
295 pp = &paca[cpu]; 304 pp = &paca[cpu];
296 dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL, 305 dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
297 cpu_to_node(cpu));
298 if (!dtl) { 306 if (!dtl) {
299 pr_warn("Failed to allocate dispatch trace log for cpu %d\n", 307 pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
300 cpu); 308 cpu);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index f8f7f28c6343..68ca9290df94 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -324,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
324 struct resource rsrc; 324 struct resource rsrc;
325 const int *bus_range; 325 const int *bus_range;
326 326
327 if (!of_device_is_available(dev)) {
328 pr_warning("%s: disabled\n", dev->full_name);
329 return -ENODEV;
330 }
331
327 pr_debug("Adding PCI host bridge %s\n", dev->full_name); 332 pr_debug("Adding PCI host bridge %s\n", dev->full_name);
328 333
329 /* Fetch host bridge registers address */ 334 /* Fetch host bridge registers address */
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 14232d57369c..49798532b477 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -1457,7 +1457,6 @@ int fsl_rio_setup(struct platform_device *dev)
1457 port->ops = ops; 1457 port->ops = ops;
1458 port->priv = priv; 1458 port->priv = priv;
1459 port->phys_efptr = 0x100; 1459 port->phys_efptr = 0x100;
1460 rio_register_mport(port);
1461 1460
1462 priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); 1461 priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
1463 rio_regs_win = priv->regs_win; 1462 rio_regs_win = priv->regs_win;
@@ -1504,6 +1503,9 @@ int fsl_rio_setup(struct platform_device *dev)
1504 dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", 1503 dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
1505 port->sys_size ? 65536 : 256); 1504 port->sys_size ? 65536 : 256);
1506 1505
1506 if (rio_register_mport(port))
1507 goto err;
1508
1507 if (port->host_deviceid >= 0) 1509 if (port->host_deviceid >= 0)
1508 out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | 1510 out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
1509 RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); 1511 RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86
index 02fb017fed47..a9da516a5274 100644
--- a/arch/um/Kconfig.x86
+++ b/arch/um/Kconfig.x86
@@ -4,6 +4,10 @@ menu "UML-specific options"
4 4
5menu "Host processor type and features" 5menu "Host processor type and features"
6 6
7config CMPXCHG_LOCAL
8 bool
9 default n
10
7source "arch/x86/Kconfig.cpu" 11source "arch/x86/Kconfig.cpu"
8 12
9endmenu 13endmenu
diff --git a/arch/um/include/asm/bug.h b/arch/um/include/asm/bug.h
new file mode 100644
index 000000000000..9e33b864c359
--- /dev/null
+++ b/arch/um/include/asm/bug.h
@@ -0,0 +1,6 @@
1#ifndef __UM_BUG_H
2#define __UM_BUG_H
3
4#include <asm-generic/bug.h>
5
6#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index fd5a1f365c95..3cce71413d0b 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -96,11 +96,15 @@
96#define MSR_IA32_MC0_ADDR 0x00000402 96#define MSR_IA32_MC0_ADDR 0x00000402
97#define MSR_IA32_MC0_MISC 0x00000403 97#define MSR_IA32_MC0_MISC 0x00000403
98 98
99#define MSR_AMD64_MC0_MASK 0xc0010044
100
99#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) 101#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
100#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) 102#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x))
101#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) 103#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x))
102#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) 104#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x))
103 105
106#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x))
107
104/* These are consecutive and not in the normal 4er MCE bank block */ 108/* These are consecutive and not in the normal 4er MCE bank block */
105#define MSR_IA32_MC0_CTL2 0x00000280 109#define MSR_IA32_MC0_CTL2 0x00000280
106#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) 110#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x))
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 3ecece0217ef..3532d3bf8105 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -615,6 +615,25 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
615 /* As a rule processors have APIC timer running in deep C states */ 615 /* As a rule processors have APIC timer running in deep C states */
616 if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) 616 if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400))
617 set_cpu_cap(c, X86_FEATURE_ARAT); 617 set_cpu_cap(c, X86_FEATURE_ARAT);
618
619 /*
620 * Disable GART TLB Walk Errors on Fam10h. We do this here
621 * because this is always needed when GART is enabled, even in a
622 * kernel which has no MCE support built in.
623 */
624 if (c->x86 == 0x10) {
625 /*
626 * BIOS should disable GartTlbWlk Errors themself. If
627 * it doesn't do it here as suggested by the BKDG.
628 *
629 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
630 */
631 u64 mask;
632
633 rdmsrl(MSR_AMD64_MCx_MASK(4), mask);
634 mask |= (1 << 10);
635 wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
636 }
618} 637}
619 638
620#ifdef CONFIG_X86_32 639#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c2871d3c71b6..8ed8908cc9f7 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -312,6 +312,26 @@ void __cpuinit smp_store_cpu_info(int id)
312 identify_secondary_cpu(c); 312 identify_secondary_cpu(c);
313} 313}
314 314
315static void __cpuinit check_cpu_siblings_on_same_node(int cpu1, int cpu2)
316{
317 int node1 = early_cpu_to_node(cpu1);
318 int node2 = early_cpu_to_node(cpu2);
319
320 /*
321 * Our CPU scheduler assumes all logical cpus in the same physical cpu
322 * share the same node. But, buggy ACPI or NUMA emulation might assign
323 * them to different node. Fix it.
324 */
325 if (node1 != node2) {
326 pr_warning("CPU %d in node %d and CPU %d in node %d are in the same physical CPU. forcing same node %d\n",
327 cpu1, node1, cpu2, node2, node2);
328
329 numa_remove_cpu(cpu1);
330 numa_set_node(cpu1, node2);
331 numa_add_cpu(cpu1);
332 }
333}
334
315static void __cpuinit link_thread_siblings(int cpu1, int cpu2) 335static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
316{ 336{
317 cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); 337 cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2));
@@ -320,6 +340,7 @@ static void __cpuinit link_thread_siblings(int cpu1, int cpu2)
320 cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); 340 cpumask_set_cpu(cpu2, cpu_core_mask(cpu1));
321 cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); 341 cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2));
322 cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); 342 cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1));
343 check_cpu_siblings_on_same_node(cpu1, cpu2);
323} 344}
324 345
325 346
@@ -361,10 +382,12 @@ void __cpuinit set_cpu_sibling_map(int cpu)
361 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 382 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
362 cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); 383 cpumask_set_cpu(i, cpu_llc_shared_mask(cpu));
363 cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); 384 cpumask_set_cpu(cpu, cpu_llc_shared_mask(i));
385 check_cpu_siblings_on_same_node(cpu, i);
364 } 386 }
365 if (c->phys_proc_id == cpu_data(i).phys_proc_id) { 387 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
366 cpumask_set_cpu(i, cpu_core_mask(cpu)); 388 cpumask_set_cpu(i, cpu_core_mask(cpu));
367 cpumask_set_cpu(cpu, cpu_core_mask(i)); 389 cpumask_set_cpu(cpu, cpu_core_mask(i));
390 check_cpu_siblings_on_same_node(cpu, i);
368 /* 391 /*
369 * Does this new cpu bringup a new core? 392 * Does this new cpu bringup a new core?
370 */ 393 */
diff --git a/arch/x86/platform/ce4100/falconfalls.dts b/arch/x86/platform/ce4100/falconfalls.dts
index dc701ea58546..2d6d226f2b10 100644
--- a/arch/x86/platform/ce4100/falconfalls.dts
+++ b/arch/x86/platform/ce4100/falconfalls.dts
@@ -74,6 +74,7 @@
74 compatible = "intel,ce4100-pci", "pci"; 74 compatible = "intel,ce4100-pci", "pci";
75 device_type = "pci"; 75 device_type = "pci";
76 bus-range = <1 1>; 76 bus-range = <1 1>;
77 reg = <0x0800 0x0 0x0 0x0 0x0>;
77 ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; 78 ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>;
78 79
79 interrupt-parent = <&ioapic2>; 80 interrupt-parent = <&ioapic2>;
@@ -412,6 +413,7 @@
412 #address-cells = <2>; 413 #address-cells = <2>;
413 #size-cells = <1>; 414 #size-cells = <1>;
414 compatible = "isa"; 415 compatible = "isa";
416 reg = <0xf800 0x0 0x0 0x0 0x0>;
415 ranges = <1 0 0 0 0 0x100>; 417 ranges = <1 0 0 0 0 0x100>;
416 418
417 rtc@70 { 419 rtc@70 {
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index 5c0207bf959b..275dbc19e2cf 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -97,11 +97,11 @@ static int __init sfi_parse_mtmr(struct sfi_table_header *table)
97 pentry->freq_hz, pentry->irq); 97 pentry->freq_hz, pentry->irq);
98 if (!pentry->irq) 98 if (!pentry->irq)
99 continue; 99 continue;
100 mp_irq.type = MP_IOAPIC; 100 mp_irq.type = MP_INTSRC;
101 mp_irq.irqtype = mp_INT; 101 mp_irq.irqtype = mp_INT;
102/* triggering mode edge bit 2-3, active high polarity bit 0-1 */ 102/* triggering mode edge bit 2-3, active high polarity bit 0-1 */
103 mp_irq.irqflag = 5; 103 mp_irq.irqflag = 5;
104 mp_irq.srcbus = 0; 104 mp_irq.srcbus = MP_BUS_ISA;
105 mp_irq.srcbusirq = pentry->irq; /* IRQ */ 105 mp_irq.srcbusirq = pentry->irq; /* IRQ */
106 mp_irq.dstapic = MP_APIC_ALL; 106 mp_irq.dstapic = MP_APIC_ALL;
107 mp_irq.dstirq = pentry->irq; 107 mp_irq.dstirq = pentry->irq;
@@ -168,10 +168,10 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
168 for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { 168 for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
169 pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n", 169 pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n",
170 totallen, (u32)pentry->phys_addr, pentry->irq); 170 totallen, (u32)pentry->phys_addr, pentry->irq);
171 mp_irq.type = MP_IOAPIC; 171 mp_irq.type = MP_INTSRC;
172 mp_irq.irqtype = mp_INT; 172 mp_irq.irqtype = mp_INT;
173 mp_irq.irqflag = 0xf; /* level trigger and active low */ 173 mp_irq.irqflag = 0xf; /* level trigger and active low */
174 mp_irq.srcbus = 0; 174 mp_irq.srcbus = MP_BUS_ISA;
175 mp_irq.srcbusirq = pentry->irq; /* IRQ */ 175 mp_irq.srcbusirq = pentry->irq; /* IRQ */
176 mp_irq.dstapic = MP_APIC_ALL; 176 mp_irq.dstapic = MP_APIC_ALL;
177 mp_irq.dstirq = pentry->irq; 177 mp_irq.dstirq = pentry->irq;
@@ -282,7 +282,7 @@ void __init x86_mrst_early_setup(void)
282 /* Avoid searching for BIOS MP tables */ 282 /* Avoid searching for BIOS MP tables */
283 x86_init.mpparse.find_smp_config = x86_init_noop; 283 x86_init.mpparse.find_smp_config = x86_init_noop;
284 x86_init.mpparse.get_smp_config = x86_init_uint_noop; 284 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
285 285 set_bit(MP_BUS_ISA, mp_bus_not_pci);
286} 286}
287 287
288/* 288/*
diff --git a/block/blk-core.c b/block/blk-core.c
index 90f22cc30799..5fa3dd2705c6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -198,26 +198,13 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
198} 198}
199EXPORT_SYMBOL(blk_dump_rq_flags); 199EXPORT_SYMBOL(blk_dump_rq_flags);
200 200
201/*
202 * Make sure that plugs that were pending when this function was entered,
203 * are now complete and requests pushed to the queue.
204*/
205static inline void queue_sync_plugs(struct request_queue *q)
206{
207 /*
208 * If the current process is plugged and has barriers submitted,
209 * we will livelock if we don't unplug first.
210 */
211 blk_flush_plug(current);
212}
213
214static void blk_delay_work(struct work_struct *work) 201static void blk_delay_work(struct work_struct *work)
215{ 202{
216 struct request_queue *q; 203 struct request_queue *q;
217 204
218 q = container_of(work, struct request_queue, delay_work.work); 205 q = container_of(work, struct request_queue, delay_work.work);
219 spin_lock_irq(q->queue_lock); 206 spin_lock_irq(q->queue_lock);
220 __blk_run_queue(q, false); 207 __blk_run_queue(q);
221 spin_unlock_irq(q->queue_lock); 208 spin_unlock_irq(q->queue_lock);
222} 209}
223 210
@@ -233,7 +220,8 @@ static void blk_delay_work(struct work_struct *work)
233 */ 220 */
234void blk_delay_queue(struct request_queue *q, unsigned long msecs) 221void blk_delay_queue(struct request_queue *q, unsigned long msecs)
235{ 222{
236 schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs)); 223 queue_delayed_work(kblockd_workqueue, &q->delay_work,
224 msecs_to_jiffies(msecs));
237} 225}
238EXPORT_SYMBOL(blk_delay_queue); 226EXPORT_SYMBOL(blk_delay_queue);
239 227
@@ -251,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
251 WARN_ON(!irqs_disabled()); 239 WARN_ON(!irqs_disabled());
252 240
253 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 241 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
254 __blk_run_queue(q, false); 242 __blk_run_queue(q);
255} 243}
256EXPORT_SYMBOL(blk_start_queue); 244EXPORT_SYMBOL(blk_start_queue);
257 245
@@ -298,7 +286,6 @@ void blk_sync_queue(struct request_queue *q)
298{ 286{
299 del_timer_sync(&q->timeout); 287 del_timer_sync(&q->timeout);
300 cancel_delayed_work_sync(&q->delay_work); 288 cancel_delayed_work_sync(&q->delay_work);
301 queue_sync_plugs(q);
302} 289}
303EXPORT_SYMBOL(blk_sync_queue); 290EXPORT_SYMBOL(blk_sync_queue);
304 291
@@ -310,9 +297,8 @@ EXPORT_SYMBOL(blk_sync_queue);
310 * Description: 297 * Description:
311 * See @blk_run_queue. This variant must be called with the queue lock 298 * See @blk_run_queue. This variant must be called with the queue lock
312 * held and interrupts disabled. 299 * held and interrupts disabled.
313 *
314 */ 300 */
315void __blk_run_queue(struct request_queue *q, bool force_kblockd) 301void __blk_run_queue(struct request_queue *q)
316{ 302{
317 if (unlikely(blk_queue_stopped(q))) 303 if (unlikely(blk_queue_stopped(q)))
318 return; 304 return;
@@ -321,7 +307,7 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
321 * Only recurse once to avoid overrunning the stack, let the unplug 307 * Only recurse once to avoid overrunning the stack, let the unplug
322 * handling reinvoke the handler shortly if we already got there. 308 * handling reinvoke the handler shortly if we already got there.
323 */ 309 */
324 if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 310 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
325 q->request_fn(q); 311 q->request_fn(q);
326 queue_flag_clear(QUEUE_FLAG_REENTER, q); 312 queue_flag_clear(QUEUE_FLAG_REENTER, q);
327 } else 313 } else
@@ -330,6 +316,20 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
330EXPORT_SYMBOL(__blk_run_queue); 316EXPORT_SYMBOL(__blk_run_queue);
331 317
332/** 318/**
319 * blk_run_queue_async - run a single device queue in workqueue context
320 * @q: The queue to run
321 *
322 * Description:
323 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
324 * of us.
325 */
326void blk_run_queue_async(struct request_queue *q)
327{
328 if (likely(!blk_queue_stopped(q)))
329 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
330}
331
332/**
333 * blk_run_queue - run a single device queue 333 * blk_run_queue - run a single device queue
334 * @q: The queue to run 334 * @q: The queue to run
335 * 335 *
@@ -342,7 +342,7 @@ void blk_run_queue(struct request_queue *q)
342 unsigned long flags; 342 unsigned long flags;
343 343
344 spin_lock_irqsave(q->queue_lock, flags); 344 spin_lock_irqsave(q->queue_lock, flags);
345 __blk_run_queue(q, false); 345 __blk_run_queue(q);
346 spin_unlock_irqrestore(q->queue_lock, flags); 346 spin_unlock_irqrestore(q->queue_lock, flags);
347} 347}
348EXPORT_SYMBOL(blk_run_queue); 348EXPORT_SYMBOL(blk_run_queue);
@@ -991,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
991 blk_queue_end_tag(q, rq); 991 blk_queue_end_tag(q, rq);
992 992
993 add_acct_request(q, rq, where); 993 add_acct_request(q, rq, where);
994 __blk_run_queue(q, false); 994 __blk_run_queue(q);
995 spin_unlock_irqrestore(q->queue_lock, flags); 995 spin_unlock_irqrestore(q->queue_lock, flags);
996} 996}
997EXPORT_SYMBOL(blk_insert_request); 997EXPORT_SYMBOL(blk_insert_request);
@@ -1311,7 +1311,15 @@ get_rq:
1311 1311
1312 plug = current->plug; 1312 plug = current->plug;
1313 if (plug) { 1313 if (plug) {
1314 if (!plug->should_sort && !list_empty(&plug->list)) { 1314 /*
1315 * If this is the first request added after a plug, fire
1316 * of a plug trace. If others have been added before, check
1317 * if we have multiple devices in this plug. If so, make a
1318 * note to sort the list before dispatch.
1319 */
1320 if (list_empty(&plug->list))
1321 trace_block_plug(q);
1322 else if (!plug->should_sort) {
1315 struct request *__rq; 1323 struct request *__rq;
1316 1324
1317 __rq = list_entry_rq(plug->list.prev); 1325 __rq = list_entry_rq(plug->list.prev);
@@ -1327,7 +1335,7 @@ get_rq:
1327 } else { 1335 } else {
1328 spin_lock_irq(q->queue_lock); 1336 spin_lock_irq(q->queue_lock);
1329 add_acct_request(q, req, where); 1337 add_acct_request(q, req, where);
1330 __blk_run_queue(q, false); 1338 __blk_run_queue(q);
1331out_unlock: 1339out_unlock:
1332 spin_unlock_irq(q->queue_lock); 1340 spin_unlock_irq(q->queue_lock);
1333 } 1341 }
@@ -2644,6 +2652,7 @@ void blk_start_plug(struct blk_plug *plug)
2644 2652
2645 plug->magic = PLUG_MAGIC; 2653 plug->magic = PLUG_MAGIC;
2646 INIT_LIST_HEAD(&plug->list); 2654 INIT_LIST_HEAD(&plug->list);
2655 INIT_LIST_HEAD(&plug->cb_list);
2647 plug->should_sort = 0; 2656 plug->should_sort = 0;
2648 2657
2649 /* 2658 /*
@@ -2668,33 +2677,93 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2668 return !(rqa->q <= rqb->q); 2677 return !(rqa->q <= rqb->q);
2669} 2678}
2670 2679
2671static void flush_plug_list(struct blk_plug *plug) 2680/*
2681 * If 'from_schedule' is true, then postpone the dispatch of requests
2682 * until a safe kblockd context. We due this to avoid accidental big
2683 * additional stack usage in driver dispatch, in places where the originally
2684 * plugger did not intend it.
2685 */
2686static void queue_unplugged(struct request_queue *q, unsigned int depth,
2687 bool from_schedule)
2688 __releases(q->queue_lock)
2689{
2690 trace_block_unplug(q, depth, !from_schedule);
2691
2692 /*
2693 * If we are punting this to kblockd, then we can safely drop
2694 * the queue_lock before waking kblockd (which needs to take
2695 * this lock).
2696 */
2697 if (from_schedule) {
2698 spin_unlock(q->queue_lock);
2699 blk_run_queue_async(q);
2700 } else {
2701 __blk_run_queue(q);
2702 spin_unlock(q->queue_lock);
2703 }
2704
2705}
2706
2707static void flush_plug_callbacks(struct blk_plug *plug)
2708{
2709 LIST_HEAD(callbacks);
2710
2711 if (list_empty(&plug->cb_list))
2712 return;
2713
2714 list_splice_init(&plug->cb_list, &callbacks);
2715
2716 while (!list_empty(&callbacks)) {
2717 struct blk_plug_cb *cb = list_first_entry(&callbacks,
2718 struct blk_plug_cb,
2719 list);
2720 list_del(&cb->list);
2721 cb->callback(cb);
2722 }
2723}
2724
2725void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2672{ 2726{
2673 struct request_queue *q; 2727 struct request_queue *q;
2674 unsigned long flags; 2728 unsigned long flags;
2675 struct request *rq; 2729 struct request *rq;
2730 LIST_HEAD(list);
2731 unsigned int depth;
2676 2732
2677 BUG_ON(plug->magic != PLUG_MAGIC); 2733 BUG_ON(plug->magic != PLUG_MAGIC);
2678 2734
2735 flush_plug_callbacks(plug);
2679 if (list_empty(&plug->list)) 2736 if (list_empty(&plug->list))
2680 return; 2737 return;
2681 2738
2682 if (plug->should_sort) 2739 list_splice_init(&plug->list, &list);
2683 list_sort(NULL, &plug->list, plug_rq_cmp); 2740
2741 if (plug->should_sort) {
2742 list_sort(NULL, &list, plug_rq_cmp);
2743 plug->should_sort = 0;
2744 }
2684 2745
2685 q = NULL; 2746 q = NULL;
2747 depth = 0;
2748
2749 /*
2750 * Save and disable interrupts here, to avoid doing it for every
2751 * queue lock we have to take.
2752 */
2686 local_irq_save(flags); 2753 local_irq_save(flags);
2687 while (!list_empty(&plug->list)) { 2754 while (!list_empty(&list)) {
2688 rq = list_entry_rq(plug->list.next); 2755 rq = list_entry_rq(list.next);
2689 list_del_init(&rq->queuelist); 2756 list_del_init(&rq->queuelist);
2690 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); 2757 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
2691 BUG_ON(!rq->q); 2758 BUG_ON(!rq->q);
2692 if (rq->q != q) { 2759 if (rq->q != q) {
2693 if (q) { 2760 /*
2694 __blk_run_queue(q, false); 2761 * This drops the queue lock
2695 spin_unlock(q->queue_lock); 2762 */
2696 } 2763 if (q)
2764 queue_unplugged(q, depth, from_schedule);
2697 q = rq->q; 2765 q = rq->q;
2766 depth = 0;
2698 spin_lock(q->queue_lock); 2767 spin_lock(q->queue_lock);
2699 } 2768 }
2700 rq->cmd_flags &= ~REQ_ON_PLUG; 2769 rq->cmd_flags &= ~REQ_ON_PLUG;
@@ -2706,38 +2775,28 @@ static void flush_plug_list(struct blk_plug *plug)
2706 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 2775 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
2707 else 2776 else
2708 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 2777 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
2709 }
2710 2778
2711 if (q) { 2779 depth++;
2712 __blk_run_queue(q, false);
2713 spin_unlock(q->queue_lock);
2714 } 2780 }
2715 2781
2716 BUG_ON(!list_empty(&plug->list)); 2782 /*
2717 local_irq_restore(flags); 2783 * This drops the queue lock
2718} 2784 */
2719 2785 if (q)
2720static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug) 2786 queue_unplugged(q, depth, from_schedule);
2721{
2722 flush_plug_list(plug);
2723 2787
2724 if (plug == tsk->plug) 2788 local_irq_restore(flags);
2725 tsk->plug = NULL;
2726} 2789}
2790EXPORT_SYMBOL(blk_flush_plug_list);
2727 2791
2728void blk_finish_plug(struct blk_plug *plug) 2792void blk_finish_plug(struct blk_plug *plug)
2729{ 2793{
2730 if (plug) 2794 blk_flush_plug_list(plug, false);
2731 __blk_finish_plug(current, plug);
2732}
2733EXPORT_SYMBOL(blk_finish_plug);
2734 2795
2735void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug) 2796 if (plug == current->plug)
2736{ 2797 current->plug = NULL;
2737 __blk_finish_plug(tsk, plug);
2738 tsk->plug = plug;
2739} 2798}
2740EXPORT_SYMBOL(__blk_flush_plug); 2799EXPORT_SYMBOL(blk_finish_plug);
2741 2800
2742int __init blk_dev_init(void) 2801int __init blk_dev_init(void)
2743{ 2802{
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 7482b7fa863b..81e31819a597 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
55 WARN_ON(irqs_disabled()); 55 WARN_ON(irqs_disabled());
56 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
57 __elv_add_request(q, rq, where); 57 __elv_add_request(q, rq, where);
58 __blk_run_queue(q, false); 58 __blk_run_queue(q);
59 /* the queue is stopped so it won't be plugged+unplugged */ 59 /* the queue is stopped so it won't be plugged+unplugged */
60 if (rq->cmd_type == REQ_TYPE_PM_RESUME) 60 if (rq->cmd_type == REQ_TYPE_PM_RESUME)
61 q->request_fn(q); 61 q->request_fn(q);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index eba4a2790c6c..6c9b5e189e62 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error)
218 * request_fn may confuse the driver. Always use kblockd. 218 * request_fn may confuse the driver. Always use kblockd.
219 */ 219 */
220 if (queued) 220 if (queued)
221 __blk_run_queue(q, true); 221 blk_run_queue_async(q);
222} 222}
223 223
224/** 224/**
@@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error)
274 * the comment in flush_end_io(). 274 * the comment in flush_end_io().
275 */ 275 */
276 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) 276 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
277 __blk_run_queue(q, true); 277 blk_run_queue_async(q);
278} 278}
279 279
280/** 280/**
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 261c75c665ae..6d735122bc59 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -498,7 +498,6 @@ int blk_register_queue(struct gendisk *disk)
498{ 498{
499 int ret; 499 int ret;
500 struct device *dev = disk_to_dev(disk); 500 struct device *dev = disk_to_dev(disk);
501
502 struct request_queue *q = disk->queue; 501 struct request_queue *q = disk->queue;
503 502
504 if (WARN_ON(!q)) 503 if (WARN_ON(!q))
@@ -521,7 +520,7 @@ int blk_register_queue(struct gendisk *disk)
521 if (ret) { 520 if (ret) {
522 kobject_uevent(&q->kobj, KOBJ_REMOVE); 521 kobject_uevent(&q->kobj, KOBJ_REMOVE);
523 kobject_del(&q->kobj); 522 kobject_del(&q->kobj);
524 blk_trace_remove_sysfs(disk_to_dev(disk)); 523 blk_trace_remove_sysfs(dev);
525 kobject_put(&dev->kobj); 524 kobject_put(&dev->kobj);
526 return ret; 525 return ret;
527 } 526 }
diff --git a/block/blk.h b/block/blk.h
index 61263463e38e..c9df8fc3c999 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -22,6 +22,7 @@ void blk_rq_timed_out_timer(unsigned long data);
22void blk_delete_timer(struct request *); 22void blk_delete_timer(struct request *);
23void blk_add_timer(struct request *); 23void blk_add_timer(struct request *);
24void __generic_unplug_device(struct request_queue *); 24void __generic_unplug_device(struct request_queue *);
25void blk_run_queue_async(struct request_queue *q);
25 26
26/* 27/*
27 * Internal atomic flags for request handling 28 * Internal atomic flags for request handling
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3be881ec95ad..46b0a1d1d925 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3368,7 +3368,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3368 cfqd->busy_queues > 1) { 3368 cfqd->busy_queues > 1) {
3369 cfq_del_timer(cfqd, cfqq); 3369 cfq_del_timer(cfqd, cfqq);
3370 cfq_clear_cfqq_wait_request(cfqq); 3370 cfq_clear_cfqq_wait_request(cfqq);
3371 __blk_run_queue(cfqd->queue, false); 3371 __blk_run_queue(cfqd->queue);
3372 } else { 3372 } else {
3373 cfq_blkiocg_update_idle_time_stats( 3373 cfq_blkiocg_update_idle_time_stats(
3374 &cfqq->cfqg->blkg); 3374 &cfqq->cfqg->blkg);
@@ -3383,7 +3383,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3383 * this new queue is RT and the current one is BE 3383 * this new queue is RT and the current one is BE
3384 */ 3384 */
3385 cfq_preempt_queue(cfqd, cfqq); 3385 cfq_preempt_queue(cfqd, cfqq);
3386 __blk_run_queue(cfqd->queue, false); 3386 __blk_run_queue(cfqd->queue);
3387 } 3387 }
3388} 3388}
3389 3389
@@ -3743,7 +3743,7 @@ static void cfq_kick_queue(struct work_struct *work)
3743 struct request_queue *q = cfqd->queue; 3743 struct request_queue *q = cfqd->queue;
3744 3744
3745 spin_lock_irq(q->queue_lock); 3745 spin_lock_irq(q->queue_lock);
3746 __blk_run_queue(cfqd->queue, false); 3746 __blk_run_queue(cfqd->queue);
3747 spin_unlock_irq(q->queue_lock); 3747 spin_unlock_irq(q->queue_lock);
3748} 3748}
3749 3749
diff --git a/block/elevator.c b/block/elevator.c
index 0cdb4e7ebab4..6f6abc08bb56 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q)
642 */ 642 */
643 elv_drain_elevator(q); 643 elv_drain_elevator(q);
644 while (q->rq.elvpriv) { 644 while (q->rq.elvpriv) {
645 __blk_run_queue(q, false); 645 __blk_run_queue(q);
646 spin_unlock_irq(q->queue_lock); 646 spin_unlock_irq(q->queue_lock);
647 msleep(10); 647 msleep(10);
648 spin_lock_irq(q->queue_lock); 648 spin_lock_irq(q->queue_lock);
@@ -695,7 +695,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
695 * with anything. There's no point in delaying queue 695 * with anything. There's no point in delaying queue
696 * processing. 696 * processing.
697 */ 697 */
698 __blk_run_queue(q, false); 698 __blk_run_queue(q);
699 break; 699 break;
700 700
701 case ELEVATOR_INSERT_SORT_MERGE: 701 case ELEVATOR_INSERT_SORT_MERGE:
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 38319a69bd0a..d6d58684712b 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -232,9 +232,17 @@ static int i2c_inb(struct i2c_adapter *i2c_adap)
232 * Sanity check for the adapter hardware - check the reaction of 232 * Sanity check for the adapter hardware - check the reaction of
233 * the bus lines only if it seems to be idle. 233 * the bus lines only if it seems to be idle.
234 */ 234 */
235static int test_bus(struct i2c_algo_bit_data *adap, char *name) 235static int test_bus(struct i2c_adapter *i2c_adap)
236{ 236{
237 int scl, sda; 237 struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
238 const char *name = i2c_adap->name;
239 int scl, sda, ret;
240
241 if (adap->pre_xfer) {
242 ret = adap->pre_xfer(i2c_adap);
243 if (ret < 0)
244 return -ENODEV;
245 }
238 246
239 if (adap->getscl == NULL) 247 if (adap->getscl == NULL)
240 pr_info("%s: Testing SDA only, SCL is not readable\n", name); 248 pr_info("%s: Testing SDA only, SCL is not readable\n", name);
@@ -297,11 +305,19 @@ static int test_bus(struct i2c_algo_bit_data *adap, char *name)
297 "while pulling SCL high!\n", name); 305 "while pulling SCL high!\n", name);
298 goto bailout; 306 goto bailout;
299 } 307 }
308
309 if (adap->post_xfer)
310 adap->post_xfer(i2c_adap);
311
300 pr_info("%s: Test OK\n", name); 312 pr_info("%s: Test OK\n", name);
301 return 0; 313 return 0;
302bailout: 314bailout:
303 sdahi(adap); 315 sdahi(adap);
304 sclhi(adap); 316 sclhi(adap);
317
318 if (adap->post_xfer)
319 adap->post_xfer(i2c_adap);
320
305 return -ENODEV; 321 return -ENODEV;
306} 322}
307 323
@@ -607,7 +623,7 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
607 int ret; 623 int ret;
608 624
609 if (bit_test) { 625 if (bit_test) {
610 ret = test_bus(bit_adap, adap->name); 626 ret = test_bus(adap);
611 if (ret < 0) 627 if (ret < 0)
612 return -ENODEV; 628 return -ENODEV;
613 } 629 }
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 70c30e6bce0b..9a58994ff7ea 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -797,7 +797,8 @@ static int i2c_do_add_adapter(struct i2c_driver *driver,
797 797
798 /* Let legacy drivers scan this bus for matching devices */ 798 /* Let legacy drivers scan this bus for matching devices */
799 if (driver->attach_adapter) { 799 if (driver->attach_adapter) {
800 dev_warn(&adap->dev, "attach_adapter method is deprecated\n"); 800 dev_warn(&adap->dev, "%s: attach_adapter method is deprecated\n",
801 driver->driver.name);
801 dev_warn(&adap->dev, "Please use another way to instantiate " 802 dev_warn(&adap->dev, "Please use another way to instantiate "
802 "your i2c_client\n"); 803 "your i2c_client\n");
803 /* We ignore the return code; if it fails, too bad */ 804 /* We ignore the return code; if it fails, too bad */
@@ -984,7 +985,8 @@ static int i2c_do_del_adapter(struct i2c_driver *driver,
984 985
985 if (!driver->detach_adapter) 986 if (!driver->detach_adapter)
986 return 0; 987 return 0;
987 dev_warn(&adapter->dev, "detach_adapter method is deprecated\n"); 988 dev_warn(&adapter->dev, "%s: detach_adapter method is deprecated\n",
989 driver->driver.name);
988 res = driver->detach_adapter(adapter); 990 res = driver->detach_adapter(adapter);
989 if (res) 991 if (res)
990 dev_err(&adapter->dev, "detach_adapter failed (%d) " 992 dev_err(&adapter->dev, "detach_adapter failed (%d) "
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 7f42d3a454d2..88d8e4cb419a 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -39,13 +39,13 @@ struct evdev {
39}; 39};
40 40
41struct evdev_client { 41struct evdev_client {
42 int head; 42 unsigned int head;
43 int tail; 43 unsigned int tail;
44 spinlock_t buffer_lock; /* protects access to buffer, head and tail */ 44 spinlock_t buffer_lock; /* protects access to buffer, head and tail */
45 struct fasync_struct *fasync; 45 struct fasync_struct *fasync;
46 struct evdev *evdev; 46 struct evdev *evdev;
47 struct list_head node; 47 struct list_head node;
48 int bufsize; 48 unsigned int bufsize;
49 struct input_event buffer[]; 49 struct input_event buffer[];
50}; 50};
51 51
@@ -55,16 +55,25 @@ static DEFINE_MUTEX(evdev_table_mutex);
55static void evdev_pass_event(struct evdev_client *client, 55static void evdev_pass_event(struct evdev_client *client,
56 struct input_event *event) 56 struct input_event *event)
57{ 57{
58 /* 58 /* Interrupts are disabled, just acquire the lock. */
59 * Interrupts are disabled, just acquire the lock.
60 * Make sure we don't leave with the client buffer
61 * "empty" by having client->head == client->tail.
62 */
63 spin_lock(&client->buffer_lock); 59 spin_lock(&client->buffer_lock);
64 do { 60
65 client->buffer[client->head++] = *event; 61 client->buffer[client->head++] = *event;
66 client->head &= client->bufsize - 1; 62 client->head &= client->bufsize - 1;
67 } while (client->head == client->tail); 63
64 if (unlikely(client->head == client->tail)) {
65 /*
66 * This effectively "drops" all unconsumed events, leaving
67 * EV_SYN/SYN_DROPPED plus the newest event in the queue.
68 */
69 client->tail = (client->head - 2) & (client->bufsize - 1);
70
71 client->buffer[client->tail].time = event->time;
72 client->buffer[client->tail].type = EV_SYN;
73 client->buffer[client->tail].code = SYN_DROPPED;
74 client->buffer[client->tail].value = 0;
75 }
76
68 spin_unlock(&client->buffer_lock); 77 spin_unlock(&client->buffer_lock);
69 78
70 if (event->type == EV_SYN) 79 if (event->type == EV_SYN)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index d6e8bd8a851c..ebbceedc92f4 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1746,6 +1746,42 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
1746} 1746}
1747EXPORT_SYMBOL(input_set_capability); 1747EXPORT_SYMBOL(input_set_capability);
1748 1748
1749static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
1750{
1751 int mt_slots;
1752 int i;
1753 unsigned int events;
1754
1755 if (dev->mtsize) {
1756 mt_slots = dev->mtsize;
1757 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
1758 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
1759 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
1760 clamp(mt_slots, 2, 32);
1761 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
1762 mt_slots = 2;
1763 } else {
1764 mt_slots = 0;
1765 }
1766
1767 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
1768
1769 for (i = 0; i < ABS_CNT; i++) {
1770 if (test_bit(i, dev->absbit)) {
1771 if (input_is_mt_axis(i))
1772 events += mt_slots;
1773 else
1774 events++;
1775 }
1776 }
1777
1778 for (i = 0; i < REL_CNT; i++)
1779 if (test_bit(i, dev->relbit))
1780 events++;
1781
1782 return events;
1783}
1784
1749#define INPUT_CLEANSE_BITMASK(dev, type, bits) \ 1785#define INPUT_CLEANSE_BITMASK(dev, type, bits) \
1750 do { \ 1786 do { \
1751 if (!test_bit(EV_##type, dev->evbit)) \ 1787 if (!test_bit(EV_##type, dev->evbit)) \
@@ -1793,6 +1829,10 @@ int input_register_device(struct input_dev *dev)
1793 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ 1829 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
1794 input_cleanse_bitmasks(dev); 1830 input_cleanse_bitmasks(dev);
1795 1831
1832 if (!dev->hint_events_per_packet)
1833 dev->hint_events_per_packet =
1834 input_estimate_events_per_packet(dev);
1835
1796 /* 1836 /*
1797 * If delay and period are pre-set by the driver, then autorepeating 1837 * If delay and period are pre-set by the driver, then autorepeating
1798 * is handled by the driver itself and we don't do it in input.c. 1838 * is handled by the driver itself and we don't do it in input.c.
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index 09bef79d9da1..a26922cf0e84 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -332,18 +332,20 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp)
332static int __devinit twl4030_kp_probe(struct platform_device *pdev) 332static int __devinit twl4030_kp_probe(struct platform_device *pdev)
333{ 333{
334 struct twl4030_keypad_data *pdata = pdev->dev.platform_data; 334 struct twl4030_keypad_data *pdata = pdev->dev.platform_data;
335 const struct matrix_keymap_data *keymap_data = pdata->keymap_data; 335 const struct matrix_keymap_data *keymap_data;
336 struct twl4030_keypad *kp; 336 struct twl4030_keypad *kp;
337 struct input_dev *input; 337 struct input_dev *input;
338 u8 reg; 338 u8 reg;
339 int error; 339 int error;
340 340
341 if (!pdata || !pdata->rows || !pdata->cols || 341 if (!pdata || !pdata->rows || !pdata->cols || !pdata->keymap_data ||
342 pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) { 342 pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) {
343 dev_err(&pdev->dev, "Invalid platform_data\n"); 343 dev_err(&pdev->dev, "Invalid platform_data\n");
344 return -EINVAL; 344 return -EINVAL;
345 } 345 }
346 346
347 keymap_data = pdata->keymap_data;
348
347 kp = kzalloc(sizeof(*kp), GFP_KERNEL); 349 kp = kzalloc(sizeof(*kp), GFP_KERNEL);
348 input = input_allocate_device(); 350 input = input_allocate_device();
349 if (!kp || !input) { 351 if (!kp || !input) {
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 7077f9bf5ead..62bae99424e6 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -303,7 +303,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
303 enum xenbus_state backend_state) 303 enum xenbus_state backend_state)
304{ 304{
305 struct xenkbd_info *info = dev_get_drvdata(&dev->dev); 305 struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
306 int val; 306 int ret, val;
307 307
308 switch (backend_state) { 308 switch (backend_state) {
309 case XenbusStateInitialising: 309 case XenbusStateInitialising:
@@ -316,6 +316,17 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
316 316
317 case XenbusStateInitWait: 317 case XenbusStateInitWait:
318InitWait: 318InitWait:
319 ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
320 "feature-abs-pointer", "%d", &val);
321 if (ret < 0)
322 val = 0;
323 if (val) {
324 ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
325 "request-abs-pointer", "1");
326 if (ret)
327 pr_warning("xenkbd: can't request abs-pointer");
328 }
329
319 xenbus_switch_state(dev, XenbusStateConnected); 330 xenbus_switch_state(dev, XenbusStateConnected);
320 break; 331 break;
321 332
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
index efa06882de00..45f93d0f5592 100644
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ b/drivers/input/touchscreen/h3600_ts_input.c
@@ -399,31 +399,34 @@ static int h3600ts_connect(struct serio *serio, struct serio_driver *drv)
399 IRQF_SHARED | IRQF_DISABLED, "h3600_action", &ts->dev)) { 399 IRQF_SHARED | IRQF_DISABLED, "h3600_action", &ts->dev)) {
400 printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n"); 400 printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n");
401 err = -EBUSY; 401 err = -EBUSY;
402 goto fail2; 402 goto fail1;
403 } 403 }
404 404
405 if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler, 405 if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler,
406 IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", &ts->dev)) { 406 IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", &ts->dev)) {
407 printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n"); 407 printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n");
408 err = -EBUSY; 408 err = -EBUSY;
409 goto fail3; 409 goto fail2;
410 } 410 }
411 411
412 serio_set_drvdata(serio, ts); 412 serio_set_drvdata(serio, ts);
413 413
414 err = serio_open(serio, drv); 414 err = serio_open(serio, drv);
415 if (err) 415 if (err)
416 return err; 416 goto fail3;
417 417
418 //h3600_flite_control(1, 25); /* default brightness */ 418 //h3600_flite_control(1, 25); /* default brightness */
419 input_register_device(ts->dev); 419 err = input_register_device(ts->dev);
420 if (err)
421 goto fail4;
420 422
421 return 0; 423 return 0;
422 424
423fail3: free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev); 425fail4: serio_close(serio);
426fail3: serio_set_drvdata(serio, NULL);
427 free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
424fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev); 428fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev);
425fail1: serio_set_drvdata(serio, NULL); 429fail1: input_free_device(input_dev);
426 input_free_device(input_dev);
427 kfree(ts); 430 kfree(ts);
428 return err; 431 return err;
429} 432}
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c
index 3790816643be..8497f56f8e46 100644
--- a/drivers/leds/leds-regulator.c
+++ b/drivers/leds/leds-regulator.c
@@ -178,6 +178,10 @@ static int __devinit regulator_led_probe(struct platform_device *pdev)
178 led->cdev.flags |= LED_CORE_SUSPENDRESUME; 178 led->cdev.flags |= LED_CORE_SUSPENDRESUME;
179 led->vcc = vcc; 179 led->vcc = vcc;
180 180
181 /* to handle correctly an already enabled regulator */
182 if (regulator_is_enabled(led->vcc))
183 led->enabled = 1;
184
181 mutex_init(&led->mutex); 185 mutex_init(&led->mutex);
182 INIT_WORK(&led->work, led_work); 186 INIT_WORK(&led->work, led_work);
183 187
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 5ef136cdba91..e5d8904fc8f6 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -390,13 +390,6 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
390 return md_raid5_congested(&rs->md, bits); 390 return md_raid5_congested(&rs->md, bits);
391} 391}
392 392
393static void raid_unplug(struct dm_target_callbacks *cb)
394{
395 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
396
397 md_raid5_kick_device(rs->md.private);
398}
399
400/* 393/*
401 * Construct a RAID4/5/6 mapping: 394 * Construct a RAID4/5/6 mapping:
402 * Args: 395 * Args:
@@ -487,7 +480,6 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
487 } 480 }
488 481
489 rs->callbacks.congested_fn = raid_is_congested; 482 rs->callbacks.congested_fn = raid_is_congested;
490 rs->callbacks.unplug_fn = raid_unplug;
491 dm_table_add_target_callbacks(ti->table, &rs->callbacks); 483 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
492 484
493 return 0; 485 return 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b12b3776c0c0..6e853c61d87e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -447,48 +447,59 @@ EXPORT_SYMBOL(md_flush_request);
447 447
448/* Support for plugging. 448/* Support for plugging.
449 * This mirrors the plugging support in request_queue, but does not 449 * This mirrors the plugging support in request_queue, but does not
450 * require having a whole queue 450 * require having a whole queue or request structures.
451 * We allocate an md_plug_cb for each md device and each thread it gets
452 * plugged on. This links tot the private plug_handle structure in the
453 * personality data where we keep a count of the number of outstanding
454 * plugs so other code can see if a plug is active.
451 */ 455 */
452static void plugger_work(struct work_struct *work) 456struct md_plug_cb {
453{ 457 struct blk_plug_cb cb;
454 struct plug_handle *plug = 458 mddev_t *mddev;
455 container_of(work, struct plug_handle, unplug_work); 459};
456 plug->unplug_fn(plug);
457}
458static void plugger_timeout(unsigned long data)
459{
460 struct plug_handle *plug = (void *)data;
461 kblockd_schedule_work(NULL, &plug->unplug_work);
462}
463void plugger_init(struct plug_handle *plug,
464 void (*unplug_fn)(struct plug_handle *))
465{
466 plug->unplug_flag = 0;
467 plug->unplug_fn = unplug_fn;
468 init_timer(&plug->unplug_timer);
469 plug->unplug_timer.function = plugger_timeout;
470 plug->unplug_timer.data = (unsigned long)plug;
471 INIT_WORK(&plug->unplug_work, plugger_work);
472}
473EXPORT_SYMBOL_GPL(plugger_init);
474 460
475void plugger_set_plug(struct plug_handle *plug) 461static void plugger_unplug(struct blk_plug_cb *cb)
476{ 462{
477 if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag)) 463 struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
478 mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1); 464 if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
465 md_wakeup_thread(mdcb->mddev->thread);
466 kfree(mdcb);
479} 467}
480EXPORT_SYMBOL_GPL(plugger_set_plug);
481 468
482int plugger_remove_plug(struct plug_handle *plug) 469/* Check that an unplug wakeup will come shortly.
470 * If not, wakeup the md thread immediately
471 */
472int mddev_check_plugged(mddev_t *mddev)
483{ 473{
484 if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) { 474 struct blk_plug *plug = current->plug;
485 del_timer(&plug->unplug_timer); 475 struct md_plug_cb *mdcb;
486 return 1; 476
487 } else 477 if (!plug)
478 return 0;
479
480 list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
481 if (mdcb->cb.callback == plugger_unplug &&
482 mdcb->mddev == mddev) {
483 /* Already on the list, move to top */
484 if (mdcb != list_first_entry(&plug->cb_list,
485 struct md_plug_cb,
486 cb.list))
487 list_move(&mdcb->cb.list, &plug->cb_list);
488 return 1;
489 }
490 }
491 /* Not currently on the callback list */
492 mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
493 if (!mdcb)
488 return 0; 494 return 0;
489}
490EXPORT_SYMBOL_GPL(plugger_remove_plug);
491 495
496 mdcb->mddev = mddev;
497 mdcb->cb.callback = plugger_unplug;
498 atomic_inc(&mddev->plug_cnt);
499 list_add(&mdcb->cb.list, &plug->cb_list);
500 return 1;
501}
502EXPORT_SYMBOL_GPL(mddev_check_plugged);
492 503
493static inline mddev_t *mddev_get(mddev_t *mddev) 504static inline mddev_t *mddev_get(mddev_t *mddev)
494{ 505{
@@ -538,6 +549,7 @@ void mddev_init(mddev_t *mddev)
538 atomic_set(&mddev->active, 1); 549 atomic_set(&mddev->active, 1);
539 atomic_set(&mddev->openers, 0); 550 atomic_set(&mddev->openers, 0);
540 atomic_set(&mddev->active_io, 0); 551 atomic_set(&mddev->active_io, 0);
552 atomic_set(&mddev->plug_cnt, 0);
541 spin_lock_init(&mddev->write_lock); 553 spin_lock_init(&mddev->write_lock);
542 atomic_set(&mddev->flush_pending, 0); 554 atomic_set(&mddev->flush_pending, 0);
543 init_waitqueue_head(&mddev->sb_wait); 555 init_waitqueue_head(&mddev->sb_wait);
@@ -4723,7 +4735,6 @@ static void md_clean(mddev_t *mddev)
4723 mddev->bitmap_info.chunksize = 0; 4735 mddev->bitmap_info.chunksize = 0;
4724 mddev->bitmap_info.daemon_sleep = 0; 4736 mddev->bitmap_info.daemon_sleep = 0;
4725 mddev->bitmap_info.max_write_behind = 0; 4737 mddev->bitmap_info.max_write_behind = 0;
4726 mddev->plug = NULL;
4727} 4738}
4728 4739
4729static void __md_stop_writes(mddev_t *mddev) 4740static void __md_stop_writes(mddev_t *mddev)
@@ -6688,12 +6699,6 @@ int md_allow_write(mddev_t *mddev)
6688} 6699}
6689EXPORT_SYMBOL_GPL(md_allow_write); 6700EXPORT_SYMBOL_GPL(md_allow_write);
6690 6701
6691void md_unplug(mddev_t *mddev)
6692{
6693 if (mddev->plug)
6694 mddev->plug->unplug_fn(mddev->plug);
6695}
6696
6697#define SYNC_MARKS 10 6702#define SYNC_MARKS 10
6698#define SYNC_MARK_STEP (3*HZ) 6703#define SYNC_MARK_STEP (3*HZ)
6699void md_do_sync(mddev_t *mddev) 6704void md_do_sync(mddev_t *mddev)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 52b407369e13..0b1fd3f1d85b 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -29,26 +29,6 @@
29typedef struct mddev_s mddev_t; 29typedef struct mddev_s mddev_t;
30typedef struct mdk_rdev_s mdk_rdev_t; 30typedef struct mdk_rdev_s mdk_rdev_t;
31 31
32/* generic plugging support - like that provided with request_queue,
33 * but does not require a request_queue
34 */
35struct plug_handle {
36 void (*unplug_fn)(struct plug_handle *);
37 struct timer_list unplug_timer;
38 struct work_struct unplug_work;
39 unsigned long unplug_flag;
40};
41#define PLUGGED_FLAG 1
42void plugger_init(struct plug_handle *plug,
43 void (*unplug_fn)(struct plug_handle *));
44void plugger_set_plug(struct plug_handle *plug);
45int plugger_remove_plug(struct plug_handle *plug);
46static inline void plugger_flush(struct plug_handle *plug)
47{
48 del_timer_sync(&plug->unplug_timer);
49 cancel_work_sync(&plug->unplug_work);
50}
51
52/* 32/*
53 * MD's 'extended' device 33 * MD's 'extended' device
54 */ 34 */
@@ -199,6 +179,9 @@ struct mddev_s
199 int delta_disks, new_level, new_layout; 179 int delta_disks, new_level, new_layout;
200 int new_chunk_sectors; 180 int new_chunk_sectors;
201 181
182 atomic_t plug_cnt; /* If device is expecting
183 * more bios soon.
184 */
202 struct mdk_thread_s *thread; /* management thread */ 185 struct mdk_thread_s *thread; /* management thread */
203 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ 186 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
204 sector_t curr_resync; /* last block scheduled */ 187 sector_t curr_resync; /* last block scheduled */
@@ -336,7 +319,6 @@ struct mddev_s
336 struct list_head all_mddevs; 319 struct list_head all_mddevs;
337 320
338 struct attribute_group *to_remove; 321 struct attribute_group *to_remove;
339 struct plug_handle *plug; /* if used by personality */
340 322
341 struct bio_set *bio_set; 323 struct bio_set *bio_set;
342 324
@@ -516,7 +498,6 @@ extern int md_integrity_register(mddev_t *mddev);
516extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev); 498extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
517extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); 499extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
518extern void restore_bitmap_write_access(struct file *file); 500extern void restore_bitmap_write_access(struct file *file);
519extern void md_unplug(mddev_t *mddev);
520 501
521extern void mddev_init(mddev_t *mddev); 502extern void mddev_init(mddev_t *mddev);
522extern int md_run(mddev_t *mddev); 503extern int md_run(mddev_t *mddev);
@@ -530,4 +511,5 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
530 mddev_t *mddev); 511 mddev_t *mddev);
531extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 512extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
532 mddev_t *mddev); 513 mddev_t *mddev);
514extern int mddev_check_plugged(mddev_t *mddev);
533#endif /* _MD_MD_H */ 515#endif /* _MD_MD_H */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index c2a21ae56d97..2b7a7ff401dc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -565,12 +565,6 @@ static void flush_pending_writes(conf_t *conf)
565 spin_unlock_irq(&conf->device_lock); 565 spin_unlock_irq(&conf->device_lock);
566} 566}
567 567
568static void md_kick_device(mddev_t *mddev)
569{
570 blk_flush_plug(current);
571 md_wakeup_thread(mddev->thread);
572}
573
574/* Barriers.... 568/* Barriers....
575 * Sometimes we need to suspend IO while we do something else, 569 * Sometimes we need to suspend IO while we do something else,
576 * either some resync/recovery, or reconfigure the array. 570 * either some resync/recovery, or reconfigure the array.
@@ -600,7 +594,7 @@ static void raise_barrier(conf_t *conf)
600 594
601 /* Wait until no block IO is waiting */ 595 /* Wait until no block IO is waiting */
602 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, 596 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
603 conf->resync_lock, md_kick_device(conf->mddev)); 597 conf->resync_lock, );
604 598
605 /* block any new IO from starting */ 599 /* block any new IO from starting */
606 conf->barrier++; 600 conf->barrier++;
@@ -608,7 +602,7 @@ static void raise_barrier(conf_t *conf)
608 /* Now wait for all pending IO to complete */ 602 /* Now wait for all pending IO to complete */
609 wait_event_lock_irq(conf->wait_barrier, 603 wait_event_lock_irq(conf->wait_barrier,
610 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 604 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
611 conf->resync_lock, md_kick_device(conf->mddev)); 605 conf->resync_lock, );
612 606
613 spin_unlock_irq(&conf->resync_lock); 607 spin_unlock_irq(&conf->resync_lock);
614} 608}
@@ -630,7 +624,7 @@ static void wait_barrier(conf_t *conf)
630 conf->nr_waiting++; 624 conf->nr_waiting++;
631 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 625 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
632 conf->resync_lock, 626 conf->resync_lock,
633 md_kick_device(conf->mddev)); 627 );
634 conf->nr_waiting--; 628 conf->nr_waiting--;
635 } 629 }
636 conf->nr_pending++; 630 conf->nr_pending++;
@@ -666,8 +660,7 @@ static void freeze_array(conf_t *conf)
666 wait_event_lock_irq(conf->wait_barrier, 660 wait_event_lock_irq(conf->wait_barrier,
667 conf->nr_pending == conf->nr_queued+1, 661 conf->nr_pending == conf->nr_queued+1,
668 conf->resync_lock, 662 conf->resync_lock,
669 ({ flush_pending_writes(conf); 663 flush_pending_writes(conf));
670 md_kick_device(conf->mddev); }));
671 spin_unlock_irq(&conf->resync_lock); 664 spin_unlock_irq(&conf->resync_lock);
672} 665}
673static void unfreeze_array(conf_t *conf) 666static void unfreeze_array(conf_t *conf)
@@ -729,6 +722,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
729 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 722 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
730 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); 723 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
731 mdk_rdev_t *blocked_rdev; 724 mdk_rdev_t *blocked_rdev;
725 int plugged;
732 726
733 /* 727 /*
734 * Register the new request and wait if the reconstruction 728 * Register the new request and wait if the reconstruction
@@ -820,6 +814,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
820 * inc refcount on their rdev. Record them by setting 814 * inc refcount on their rdev. Record them by setting
821 * bios[x] to bio 815 * bios[x] to bio
822 */ 816 */
817 plugged = mddev_check_plugged(mddev);
818
823 disks = conf->raid_disks; 819 disks = conf->raid_disks;
824 retry_write: 820 retry_write:
825 blocked_rdev = NULL; 821 blocked_rdev = NULL;
@@ -925,7 +921,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
925 /* In case raid1d snuck in to freeze_array */ 921 /* In case raid1d snuck in to freeze_array */
926 wake_up(&conf->wait_barrier); 922 wake_up(&conf->wait_barrier);
927 923
928 if (do_sync || !bitmap) 924 if (do_sync || !bitmap || !plugged)
929 md_wakeup_thread(mddev->thread); 925 md_wakeup_thread(mddev->thread);
930 926
931 return 0; 927 return 0;
@@ -1516,13 +1512,16 @@ static void raid1d(mddev_t *mddev)
1516 conf_t *conf = mddev->private; 1512 conf_t *conf = mddev->private;
1517 struct list_head *head = &conf->retry_list; 1513 struct list_head *head = &conf->retry_list;
1518 mdk_rdev_t *rdev; 1514 mdk_rdev_t *rdev;
1515 struct blk_plug plug;
1519 1516
1520 md_check_recovery(mddev); 1517 md_check_recovery(mddev);
1521 1518
1519 blk_start_plug(&plug);
1522 for (;;) { 1520 for (;;) {
1523 char b[BDEVNAME_SIZE]; 1521 char b[BDEVNAME_SIZE];
1524 1522
1525 flush_pending_writes(conf); 1523 if (atomic_read(&mddev->plug_cnt) == 0)
1524 flush_pending_writes(conf);
1526 1525
1527 spin_lock_irqsave(&conf->device_lock, flags); 1526 spin_lock_irqsave(&conf->device_lock, flags);
1528 if (list_empty(head)) { 1527 if (list_empty(head)) {
@@ -1593,6 +1592,7 @@ static void raid1d(mddev_t *mddev)
1593 } 1592 }
1594 cond_resched(); 1593 cond_resched();
1595 } 1594 }
1595 blk_finish_plug(&plug);
1596} 1596}
1597 1597
1598 1598
@@ -2039,7 +2039,6 @@ static int stop(mddev_t *mddev)
2039 2039
2040 md_unregister_thread(mddev->thread); 2040 md_unregister_thread(mddev->thread);
2041 mddev->thread = NULL; 2041 mddev->thread = NULL;
2042 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2043 if (conf->r1bio_pool) 2042 if (conf->r1bio_pool)
2044 mempool_destroy(conf->r1bio_pool); 2043 mempool_destroy(conf->r1bio_pool);
2045 kfree(conf->mirrors); 2044 kfree(conf->mirrors);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 2da83d566592..8e9462626ec5 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -634,12 +634,6 @@ static void flush_pending_writes(conf_t *conf)
634 spin_unlock_irq(&conf->device_lock); 634 spin_unlock_irq(&conf->device_lock);
635} 635}
636 636
637static void md_kick_device(mddev_t *mddev)
638{
639 blk_flush_plug(current);
640 md_wakeup_thread(mddev->thread);
641}
642
643/* Barriers.... 637/* Barriers....
644 * Sometimes we need to suspend IO while we do something else, 638 * Sometimes we need to suspend IO while we do something else,
645 * either some resync/recovery, or reconfigure the array. 639 * either some resync/recovery, or reconfigure the array.
@@ -669,15 +663,15 @@ static void raise_barrier(conf_t *conf, int force)
669 663
670 /* Wait until no block IO is waiting (unless 'force') */ 664 /* Wait until no block IO is waiting (unless 'force') */
671 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 665 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
672 conf->resync_lock, md_kick_device(conf->mddev)); 666 conf->resync_lock, );
673 667
674 /* block any new IO from starting */ 668 /* block any new IO from starting */
675 conf->barrier++; 669 conf->barrier++;
676 670
677 /* No wait for all pending IO to complete */ 671 /* Now wait for all pending IO to complete */
678 wait_event_lock_irq(conf->wait_barrier, 672 wait_event_lock_irq(conf->wait_barrier,
679 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 673 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
680 conf->resync_lock, md_kick_device(conf->mddev)); 674 conf->resync_lock, );
681 675
682 spin_unlock_irq(&conf->resync_lock); 676 spin_unlock_irq(&conf->resync_lock);
683} 677}
@@ -698,7 +692,7 @@ static void wait_barrier(conf_t *conf)
698 conf->nr_waiting++; 692 conf->nr_waiting++;
699 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 693 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
700 conf->resync_lock, 694 conf->resync_lock,
701 md_kick_device(conf->mddev)); 695 );
702 conf->nr_waiting--; 696 conf->nr_waiting--;
703 } 697 }
704 conf->nr_pending++; 698 conf->nr_pending++;
@@ -734,8 +728,8 @@ static void freeze_array(conf_t *conf)
734 wait_event_lock_irq(conf->wait_barrier, 728 wait_event_lock_irq(conf->wait_barrier,
735 conf->nr_pending == conf->nr_queued+1, 729 conf->nr_pending == conf->nr_queued+1,
736 conf->resync_lock, 730 conf->resync_lock,
737 ({ flush_pending_writes(conf); 731 flush_pending_writes(conf));
738 md_kick_device(conf->mddev); })); 732
739 spin_unlock_irq(&conf->resync_lock); 733 spin_unlock_irq(&conf->resync_lock);
740} 734}
741 735
@@ -762,6 +756,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
762 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 756 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
763 unsigned long flags; 757 unsigned long flags;
764 mdk_rdev_t *blocked_rdev; 758 mdk_rdev_t *blocked_rdev;
759 int plugged;
765 760
766 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 761 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
767 md_flush_request(mddev, bio); 762 md_flush_request(mddev, bio);
@@ -870,6 +865,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
870 * inc refcount on their rdev. Record them by setting 865 * inc refcount on their rdev. Record them by setting
871 * bios[x] to bio 866 * bios[x] to bio
872 */ 867 */
868 plugged = mddev_check_plugged(mddev);
869
873 raid10_find_phys(conf, r10_bio); 870 raid10_find_phys(conf, r10_bio);
874 retry_write: 871 retry_write:
875 blocked_rdev = NULL; 872 blocked_rdev = NULL;
@@ -946,9 +943,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
946 /* In case raid10d snuck in to freeze_array */ 943 /* In case raid10d snuck in to freeze_array */
947 wake_up(&conf->wait_barrier); 944 wake_up(&conf->wait_barrier);
948 945
949 if (do_sync || !mddev->bitmap) 946 if (do_sync || !mddev->bitmap || !plugged)
950 md_wakeup_thread(mddev->thread); 947 md_wakeup_thread(mddev->thread);
951
952 return 0; 948 return 0;
953} 949}
954 950
@@ -1640,9 +1636,11 @@ static void raid10d(mddev_t *mddev)
1640 conf_t *conf = mddev->private; 1636 conf_t *conf = mddev->private;
1641 struct list_head *head = &conf->retry_list; 1637 struct list_head *head = &conf->retry_list;
1642 mdk_rdev_t *rdev; 1638 mdk_rdev_t *rdev;
1639 struct blk_plug plug;
1643 1640
1644 md_check_recovery(mddev); 1641 md_check_recovery(mddev);
1645 1642
1643 blk_start_plug(&plug);
1646 for (;;) { 1644 for (;;) {
1647 char b[BDEVNAME_SIZE]; 1645 char b[BDEVNAME_SIZE];
1648 1646
@@ -1716,6 +1714,7 @@ static void raid10d(mddev_t *mddev)
1716 } 1714 }
1717 cond_resched(); 1715 cond_resched();
1718 } 1716 }
1717 blk_finish_plug(&plug);
1719} 1718}
1720 1719
1721 1720
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e867ee42b152..f301e6ae220c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -27,12 +27,12 @@
27 * 27 *
28 * We group bitmap updates into batches. Each batch has a number. 28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important. 29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written. 30 * conf->seq_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to 31 * conf->seq_flush is the number of the last batch that was closed to
32 * new additions. 32 * new additions.
33 * When we discover that we will need to write to any block in a stripe 33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1. 35 * the number of the batch it will be in. This is seq_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet, 36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later. 37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current 38 * When an unplug happens, we increment bm_flush, thus closing the current
@@ -199,14 +199,12 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
199 BUG_ON(!list_empty(&sh->lru)); 199 BUG_ON(!list_empty(&sh->lru));
200 BUG_ON(atomic_read(&conf->active_stripes)==0); 200 BUG_ON(atomic_read(&conf->active_stripes)==0);
201 if (test_bit(STRIPE_HANDLE, &sh->state)) { 201 if (test_bit(STRIPE_HANDLE, &sh->state)) {
202 if (test_bit(STRIPE_DELAYED, &sh->state)) { 202 if (test_bit(STRIPE_DELAYED, &sh->state))
203 list_add_tail(&sh->lru, &conf->delayed_list); 203 list_add_tail(&sh->lru, &conf->delayed_list);
204 plugger_set_plug(&conf->plug); 204 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
205 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 205 sh->bm_seq - conf->seq_write > 0)
206 sh->bm_seq - conf->seq_write > 0) {
207 list_add_tail(&sh->lru, &conf->bitmap_list); 206 list_add_tail(&sh->lru, &conf->bitmap_list);
208 plugger_set_plug(&conf->plug); 207 else {
209 } else {
210 clear_bit(STRIPE_BIT_DELAY, &sh->state); 208 clear_bit(STRIPE_BIT_DELAY, &sh->state);
211 list_add_tail(&sh->lru, &conf->handle_list); 209 list_add_tail(&sh->lru, &conf->handle_list);
212 } 210 }
@@ -461,7 +459,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
461 < (conf->max_nr_stripes *3/4) 459 < (conf->max_nr_stripes *3/4)
462 || !conf->inactive_blocked), 460 || !conf->inactive_blocked),
463 conf->device_lock, 461 conf->device_lock,
464 md_raid5_kick_device(conf)); 462 );
465 conf->inactive_blocked = 0; 463 conf->inactive_blocked = 0;
466 } else 464 } else
467 init_stripe(sh, sector, previous); 465 init_stripe(sh, sector, previous);
@@ -1470,7 +1468,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
1470 wait_event_lock_irq(conf->wait_for_stripe, 1468 wait_event_lock_irq(conf->wait_for_stripe,
1471 !list_empty(&conf->inactive_list), 1469 !list_empty(&conf->inactive_list),
1472 conf->device_lock, 1470 conf->device_lock,
1473 blk_flush_plug(current)); 1471 );
1474 osh = get_free_stripe(conf); 1472 osh = get_free_stripe(conf);
1475 spin_unlock_irq(&conf->device_lock); 1473 spin_unlock_irq(&conf->device_lock);
1476 atomic_set(&nsh->count, 1); 1474 atomic_set(&nsh->count, 1);
@@ -3623,8 +3621,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
3623 atomic_inc(&conf->preread_active_stripes); 3621 atomic_inc(&conf->preread_active_stripes);
3624 list_add_tail(&sh->lru, &conf->hold_list); 3622 list_add_tail(&sh->lru, &conf->hold_list);
3625 } 3623 }
3626 } else 3624 }
3627 plugger_set_plug(&conf->plug);
3628} 3625}
3629 3626
3630static void activate_bit_delay(raid5_conf_t *conf) 3627static void activate_bit_delay(raid5_conf_t *conf)
@@ -3641,21 +3638,6 @@ static void activate_bit_delay(raid5_conf_t *conf)
3641 } 3638 }
3642} 3639}
3643 3640
3644void md_raid5_kick_device(raid5_conf_t *conf)
3645{
3646 blk_flush_plug(current);
3647 raid5_activate_delayed(conf);
3648 md_wakeup_thread(conf->mddev->thread);
3649}
3650EXPORT_SYMBOL_GPL(md_raid5_kick_device);
3651
3652static void raid5_unplug(struct plug_handle *plug)
3653{
3654 raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
3655
3656 md_raid5_kick_device(conf);
3657}
3658
3659int md_raid5_congested(mddev_t *mddev, int bits) 3641int md_raid5_congested(mddev_t *mddev, int bits)
3660{ 3642{
3661 raid5_conf_t *conf = mddev->private; 3643 raid5_conf_t *conf = mddev->private;
@@ -3945,6 +3927,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
3945 struct stripe_head *sh; 3927 struct stripe_head *sh;
3946 const int rw = bio_data_dir(bi); 3928 const int rw = bio_data_dir(bi);
3947 int remaining; 3929 int remaining;
3930 int plugged;
3948 3931
3949 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 3932 if (unlikely(bi->bi_rw & REQ_FLUSH)) {
3950 md_flush_request(mddev, bi); 3933 md_flush_request(mddev, bi);
@@ -3963,6 +3946,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
3963 bi->bi_next = NULL; 3946 bi->bi_next = NULL;
3964 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 3947 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
3965 3948
3949 plugged = mddev_check_plugged(mddev);
3966 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 3950 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
3967 DEFINE_WAIT(w); 3951 DEFINE_WAIT(w);
3968 int disks, data_disks; 3952 int disks, data_disks;
@@ -4057,7 +4041,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
4057 * add failed due to overlap. Flush everything 4041 * add failed due to overlap. Flush everything
4058 * and wait a while 4042 * and wait a while
4059 */ 4043 */
4060 md_raid5_kick_device(conf); 4044 md_wakeup_thread(mddev->thread);
4061 release_stripe(sh); 4045 release_stripe(sh);
4062 schedule(); 4046 schedule();
4063 goto retry; 4047 goto retry;
@@ -4077,6 +4061,9 @@ static int make_request(mddev_t *mddev, struct bio * bi)
4077 } 4061 }
4078 4062
4079 } 4063 }
4064 if (!plugged)
4065 md_wakeup_thread(mddev->thread);
4066
4080 spin_lock_irq(&conf->device_lock); 4067 spin_lock_irq(&conf->device_lock);
4081 remaining = raid5_dec_bi_phys_segments(bi); 4068 remaining = raid5_dec_bi_phys_segments(bi);
4082 spin_unlock_irq(&conf->device_lock); 4069 spin_unlock_irq(&conf->device_lock);
@@ -4478,24 +4465,30 @@ static void raid5d(mddev_t *mddev)
4478 struct stripe_head *sh; 4465 struct stripe_head *sh;
4479 raid5_conf_t *conf = mddev->private; 4466 raid5_conf_t *conf = mddev->private;
4480 int handled; 4467 int handled;
4468 struct blk_plug plug;
4481 4469
4482 pr_debug("+++ raid5d active\n"); 4470 pr_debug("+++ raid5d active\n");
4483 4471
4484 md_check_recovery(mddev); 4472 md_check_recovery(mddev);
4485 4473
4474 blk_start_plug(&plug);
4486 handled = 0; 4475 handled = 0;
4487 spin_lock_irq(&conf->device_lock); 4476 spin_lock_irq(&conf->device_lock);
4488 while (1) { 4477 while (1) {
4489 struct bio *bio; 4478 struct bio *bio;
4490 4479
4491 if (conf->seq_flush != conf->seq_write) { 4480 if (atomic_read(&mddev->plug_cnt) == 0 &&
4492 int seq = conf->seq_flush; 4481 !list_empty(&conf->bitmap_list)) {
4482 /* Now is a good time to flush some bitmap updates */
4483 conf->seq_flush++;
4493 spin_unlock_irq(&conf->device_lock); 4484 spin_unlock_irq(&conf->device_lock);
4494 bitmap_unplug(mddev->bitmap); 4485 bitmap_unplug(mddev->bitmap);
4495 spin_lock_irq(&conf->device_lock); 4486 spin_lock_irq(&conf->device_lock);
4496 conf->seq_write = seq; 4487 conf->seq_write = conf->seq_flush;
4497 activate_bit_delay(conf); 4488 activate_bit_delay(conf);
4498 } 4489 }
4490 if (atomic_read(&mddev->plug_cnt) == 0)
4491 raid5_activate_delayed(conf);
4499 4492
4500 while ((bio = remove_bio_from_retry(conf))) { 4493 while ((bio = remove_bio_from_retry(conf))) {
4501 int ok; 4494 int ok;
@@ -4525,6 +4518,7 @@ static void raid5d(mddev_t *mddev)
4525 spin_unlock_irq(&conf->device_lock); 4518 spin_unlock_irq(&conf->device_lock);
4526 4519
4527 async_tx_issue_pending_all(); 4520 async_tx_issue_pending_all();
4521 blk_finish_plug(&plug);
4528 4522
4529 pr_debug("--- raid5d inactive\n"); 4523 pr_debug("--- raid5d inactive\n");
4530} 4524}
@@ -5141,8 +5135,6 @@ static int run(mddev_t *mddev)
5141 mdname(mddev)); 5135 mdname(mddev));
5142 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 5136 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5143 5137
5144 plugger_init(&conf->plug, raid5_unplug);
5145 mddev->plug = &conf->plug;
5146 if (mddev->queue) { 5138 if (mddev->queue) {
5147 int chunk_size; 5139 int chunk_size;
5148 /* read-ahead size must cover two whole stripes, which 5140 /* read-ahead size must cover two whole stripes, which
@@ -5192,7 +5184,6 @@ static int stop(mddev_t *mddev)
5192 mddev->thread = NULL; 5184 mddev->thread = NULL;
5193 if (mddev->queue) 5185 if (mddev->queue)
5194 mddev->queue->backing_dev_info.congested_fn = NULL; 5186 mddev->queue->backing_dev_info.congested_fn = NULL;
5195 plugger_flush(&conf->plug); /* the unplug fn references 'conf'*/
5196 free_conf(conf); 5187 free_conf(conf);
5197 mddev->private = NULL; 5188 mddev->private = NULL;
5198 mddev->to_remove = &raid5_attrs_group; 5189 mddev->to_remove = &raid5_attrs_group;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 8d563a4f022a..3ca77a2613ba 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -400,8 +400,6 @@ struct raid5_private_data {
400 * Cleared when a sync completes. 400 * Cleared when a sync completes.
401 */ 401 */
402 402
403 struct plug_handle plug;
404
405 /* per cpu variables */ 403 /* per cpu variables */
406 struct raid5_percpu { 404 struct raid5_percpu {
407 struct page *spare_page; /* Used when checking P/Q in raid6 */ 405 struct page *spare_page; /* Used when checking P/Q in raid6 */
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 20e4e9395b61..ecafa4ba238b 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -348,15 +348,15 @@ static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
348 348
349static int gru_irq_count[GRU_CHIPLETS_PER_BLADE]; 349static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
350 350
351static void gru_noop(unsigned int irq) 351static void gru_noop(struct irq_data *d)
352{ 352{
353} 353}
354 354
355static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = { 355static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
356 [0 ... GRU_CHIPLETS_PER_BLADE - 1] { 356 [0 ... GRU_CHIPLETS_PER_BLADE - 1] {
357 .mask = gru_noop, 357 .irq_mask = gru_noop,
358 .unmask = gru_noop, 358 .irq_unmask = gru_noop,
359 .ack = gru_noop 359 .irq_ack = gru_noop
360 } 360 }
361}; 361};
362 362
diff --git a/drivers/pcmcia/pxa2xx_balloon3.c b/drivers/pcmcia/pxa2xx_balloon3.c
index 453c54c97612..4c3e94c0ae85 100644
--- a/drivers/pcmcia/pxa2xx_balloon3.c
+++ b/drivers/pcmcia/pxa2xx_balloon3.c
@@ -25,6 +25,8 @@
25 25
26#include <mach/balloon3.h> 26#include <mach/balloon3.h>
27 27
28#include <asm/mach-types.h>
29
28#include "soc_common.h" 30#include "soc_common.h"
29 31
30/* 32/*
@@ -127,6 +129,9 @@ static int __init balloon3_pcmcia_init(void)
127{ 129{
128 int ret; 130 int ret;
129 131
132 if (!machine_is_balloon3())
133 return -ENODEV;
134
130 balloon3_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); 135 balloon3_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
131 if (!balloon3_pcmcia_device) 136 if (!balloon3_pcmcia_device)
132 return -ENOMEM; 137 return -ENOMEM;
diff --git a/drivers/pcmcia/pxa2xx_trizeps4.c b/drivers/pcmcia/pxa2xx_trizeps4.c
index b7e596620db1..b829e655457b 100644
--- a/drivers/pcmcia/pxa2xx_trizeps4.c
+++ b/drivers/pcmcia/pxa2xx_trizeps4.c
@@ -69,15 +69,15 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
69 for (i = 0; i < ARRAY_SIZE(irqs); i++) { 69 for (i = 0; i < ARRAY_SIZE(irqs); i++) {
70 if (irqs[i].sock != skt->nr) 70 if (irqs[i].sock != skt->nr)
71 continue; 71 continue;
72 if (gpio_request(IRQ_TO_GPIO(irqs[i].irq), irqs[i].str) < 0) { 72 if (gpio_request(irq_to_gpio(irqs[i].irq), irqs[i].str) < 0) {
73 pr_err("%s: sock %d unable to request gpio %d\n", 73 pr_err("%s: sock %d unable to request gpio %d\n",
74 __func__, skt->nr, IRQ_TO_GPIO(irqs[i].irq)); 74 __func__, skt->nr, irq_to_gpio(irqs[i].irq));
75 ret = -EBUSY; 75 ret = -EBUSY;
76 goto error; 76 goto error;
77 } 77 }
78 if (gpio_direction_input(IRQ_TO_GPIO(irqs[i].irq)) < 0) { 78 if (gpio_direction_input(irq_to_gpio(irqs[i].irq)) < 0) {
79 pr_err("%s: sock %d unable to set input gpio %d\n", 79 pr_err("%s: sock %d unable to set input gpio %d\n",
80 __func__, skt->nr, IRQ_TO_GPIO(irqs[i].irq)); 80 __func__, skt->nr, irq_to_gpio(irqs[i].irq));
81 ret = -EINVAL; 81 ret = -EINVAL;
82 goto error; 82 goto error;
83 } 83 }
@@ -86,7 +86,7 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
86 86
87error: 87error:
88 for (; i >= 0; i--) { 88 for (; i >= 0; i--) {
89 gpio_free(IRQ_TO_GPIO(irqs[i].irq)); 89 gpio_free(irq_to_gpio(irqs[i].irq));
90 } 90 }
91 return (ret); 91 return (ret);
92} 92}
@@ -97,7 +97,7 @@ static void trizeps_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
97 /* free allocated gpio's */ 97 /* free allocated gpio's */
98 gpio_free(GPIO_PRDY); 98 gpio_free(GPIO_PRDY);
99 for (i = 0; i < ARRAY_SIZE(irqs); i++) 99 for (i = 0; i < ARRAY_SIZE(irqs); i++)
100 gpio_free(IRQ_TO_GPIO(irqs[i].irq)); 100 gpio_free(irq_to_gpio(irqs[i].irq));
101} 101}
102 102
103static unsigned long trizeps_pcmcia_status[2]; 103static unsigned long trizeps_pcmcia_status[2];
@@ -226,6 +226,9 @@ static int __init trizeps_pcmcia_init(void)
226{ 226{
227 int ret; 227 int ret;
228 228
229 if (!machine_is_trizeps4() && !machine_is_trizeps4wl())
230 return -ENODEV;
231
229 trizeps_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); 232 trizeps_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
230 if (!trizeps_pcmcia_device) 233 if (!trizeps_pcmcia_device)
231 return -ENOMEM; 234 return -ENOMEM;
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index c29719cacbca..86c9a091a2ff 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1171,16 +1171,17 @@ static int rio_hdid_setup(char *str)
1171 1171
1172__setup("riohdid=", rio_hdid_setup); 1172__setup("riohdid=", rio_hdid_setup);
1173 1173
1174void rio_register_mport(struct rio_mport *port) 1174int rio_register_mport(struct rio_mport *port)
1175{ 1175{
1176 if (next_portid >= RIO_MAX_MPORTS) { 1176 if (next_portid >= RIO_MAX_MPORTS) {
1177 pr_err("RIO: reached specified max number of mports\n"); 1177 pr_err("RIO: reached specified max number of mports\n");
1178 return; 1178 return 1;
1179 } 1179 }
1180 1180
1181 port->id = next_portid++; 1181 port->id = next_portid++;
1182 port->host_deviceid = rio_get_hdid(port->id); 1182 port->host_deviceid = rio_get_hdid(port->id);
1183 list_add_tail(&port->node, &rio_mports); 1183 list_add_tail(&port->node, &rio_mports);
1184 return 0;
1184} 1185}
1185 1186
1186EXPORT_SYMBOL_GPL(rio_local_get_device_id); 1187EXPORT_SYMBOL_GPL(rio_local_get_device_id);
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
index 095016a9dec1..ac2701b22e71 100644
--- a/drivers/rapidio/switches/idt_gen2.c
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -418,3 +418,4 @@ DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init);
418DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init); 418DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init);
419DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init); 419DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init);
420DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init); 420DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init);
421DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1432, idtg2_switch_init);
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 09b4437b3e61..39013867cbd6 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -171,7 +171,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
171 err = __rtc_read_alarm(rtc, &alrm); 171 err = __rtc_read_alarm(rtc, &alrm);
172 172
173 if (!err && !rtc_valid_tm(&alrm.time)) 173 if (!err && !rtc_valid_tm(&alrm.time))
174 rtc_set_alarm(rtc, &alrm); 174 rtc_initialize_alarm(rtc, &alrm);
175 175
176 strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); 176 strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
177 dev_set_name(&rtc->dev, "rtc%d", id); 177 dev_set_name(&rtc->dev, "rtc%d", id);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 23719f0acbf6..ef6316acec43 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -375,6 +375,32 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
375} 375}
376EXPORT_SYMBOL_GPL(rtc_set_alarm); 376EXPORT_SYMBOL_GPL(rtc_set_alarm);
377 377
378/* Called once per device from rtc_device_register */
379int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
380{
381 int err;
382
383 err = rtc_valid_tm(&alarm->time);
384 if (err != 0)
385 return err;
386
387 err = mutex_lock_interruptible(&rtc->ops_lock);
388 if (err)
389 return err;
390
391 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
392 rtc->aie_timer.period = ktime_set(0, 0);
393 if (alarm->enabled) {
394 rtc->aie_timer.enabled = 1;
395 timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
396 }
397 mutex_unlock(&rtc->ops_lock);
398 return err;
399}
400EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
401
402
403
378int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) 404int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
379{ 405{
380 int err = mutex_lock_interruptible(&rtc->ops_lock); 406 int err = mutex_lock_interruptible(&rtc->ops_lock);
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index a0fc4cf42abf..90d866272c8e 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -250,6 +250,8 @@ static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
250 bfin_rtc_int_set_alarm(rtc); 250 bfin_rtc_int_set_alarm(rtc);
251 else 251 else
252 bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); 252 bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
253
254 return 0;
253} 255}
254 256
255static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) 257static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm)
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index c42006469559..c5ac03793e79 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -401,6 +401,7 @@ const struct platform_device_id mc13xxx_rtc_idtable[] = {
401 }, { 401 }, {
402 .name = "mc13892-rtc", 402 .name = "mc13892-rtc",
403 }, 403 },
404 { }
404}; 405};
405 406
406static struct platform_driver mc13xxx_rtc_driver = { 407static struct platform_driver mc13xxx_rtc_driver = {
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 714964913e5e..b3466c491cd3 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -336,7 +336,6 @@ static void s3c_rtc_release(struct device *dev)
336 336
337 /* do not clear AIE here, it may be needed for wake */ 337 /* do not clear AIE here, it may be needed for wake */
338 338
339 s3c_rtc_setpie(dev, 0);
340 free_irq(s3c_rtc_alarmno, rtc_dev); 339 free_irq(s3c_rtc_alarmno, rtc_dev);
341 free_irq(s3c_rtc_tickno, rtc_dev); 340 free_irq(s3c_rtc_tickno, rtc_dev);
342} 341}
@@ -408,7 +407,6 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
408 platform_set_drvdata(dev, NULL); 407 platform_set_drvdata(dev, NULL);
409 rtc_device_unregister(rtc); 408 rtc_device_unregister(rtc);
410 409
411 s3c_rtc_setpie(&dev->dev, 0);
412 s3c_rtc_setaie(&dev->dev, 0); 410 s3c_rtc_setaie(&dev->dev, 0);
413 411
414 clk_disable(rtc_clk); 412 clk_disable(rtc_clk);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 6d5c7ff43f5b..ab55c2fa7ce2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
443 &sdev->request_queue->queue_flags); 443 &sdev->request_queue->queue_flags);
444 if (flagset) 444 if (flagset)
445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446 __blk_run_queue(sdev->request_queue, false); 446 __blk_run_queue(sdev->request_queue);
447 if (flagset) 447 if (flagset)
448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449 spin_unlock(sdev->request_queue->queue_lock); 449 spin_unlock(sdev->request_queue->queue_lock);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index fdf3fa639056..28c33506e4ad 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); 3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
3830 if (flagset) 3830 if (flagset)
3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); 3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
3832 __blk_run_queue(rport->rqst_q, false); 3832 __blk_run_queue(rport->rqst_q);
3833 if (flagset) 3833 if (flagset)
3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); 3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); 3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 41b6e51188e4..006489d82dc3 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -66,6 +66,7 @@ config USB_ARCH_HAS_EHCI
66 default y if ARCH_VT8500 66 default y if ARCH_VT8500
67 default y if PLAT_SPEAR 67 default y if PLAT_SPEAR
68 default y if ARCH_MSM 68 default y if ARCH_MSM
69 default y if MICROBLAZE
69 default PCI 70 default PCI
70 71
71# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface. 72# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index a3d2e2399655..96fdfb815f89 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -221,7 +221,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
221 break; 221 break;
222 case USB_ENDPOINT_XFER_INT: 222 case USB_ENDPOINT_XFER_INT:
223 type = "Int."; 223 type = "Int.";
224 if (speed == USB_SPEED_HIGH) 224 if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER)
225 interval = 1 << (desc->bInterval - 1); 225 interval = 1 << (desc->bInterval - 1);
226 else 226 else
227 interval = desc->bInterval; 227 interval = desc->bInterval;
@@ -229,7 +229,8 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
229 default: /* "can't happen" */ 229 default: /* "can't happen" */
230 return start; 230 return start;
231 } 231 }
232 interval *= (speed == USB_SPEED_HIGH) ? 125 : 1000; 232 interval *= (speed == USB_SPEED_HIGH ||
233 speed == USB_SPEED_SUPER) ? 125 : 1000;
233 if (interval % 1000) 234 if (interval % 1000)
234 unit = 'u'; 235 unit = 'u';
235 else { 236 else {
@@ -542,8 +543,9 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
542 if (level == 0) { 543 if (level == 0) {
543 int max; 544 int max;
544 545
545 /* high speed reserves 80%, full/low reserves 90% */ 546 /* super/high speed reserves 80%, full/low reserves 90% */
546 if (usbdev->speed == USB_SPEED_HIGH) 547 if (usbdev->speed == USB_SPEED_HIGH ||
548 usbdev->speed == USB_SPEED_SUPER)
547 max = 800; 549 max = 800;
548 else 550 else
549 max = FRAME_TIME_MAX_USECS_ALLOC; 551 max = FRAME_TIME_MAX_USECS_ALLOC;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 8eed05d23838..77a7faec8d78 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1908,7 +1908,7 @@ void usb_free_streams(struct usb_interface *interface,
1908 1908
1909 /* Streams only apply to bulk endpoints. */ 1909 /* Streams only apply to bulk endpoints. */
1910 for (i = 0; i < num_eps; i++) 1910 for (i = 0; i < num_eps; i++)
1911 if (!usb_endpoint_xfer_bulk(&eps[i]->desc)) 1911 if (!eps[i] || !usb_endpoint_xfer_bulk(&eps[i]->desc))
1912 return; 1912 return;
1913 1913
1914 hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags); 1914 hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 8fb754916c67..93720bdc9efd 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2285,7 +2285,17 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2285 } 2285 }
2286 2286
2287 /* see 7.1.7.6 */ 2287 /* see 7.1.7.6 */
2288 status = set_port_feature(hub->hdev, port1, USB_PORT_FEAT_SUSPEND); 2288 /* Clear PORT_POWER if it's a USB3.0 device connected to USB 3.0
2289 * external hub.
2290 * FIXME: this is a temporary workaround to make the system able
2291 * to suspend/resume.
2292 */
2293 if ((hub->hdev->parent != NULL) && hub_is_superspeed(hub->hdev))
2294 status = clear_port_feature(hub->hdev, port1,
2295 USB_PORT_FEAT_POWER);
2296 else
2297 status = set_port_feature(hub->hdev, port1,
2298 USB_PORT_FEAT_SUSPEND);
2289 if (status) { 2299 if (status) {
2290 dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", 2300 dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
2291 port1, status); 2301 port1, status);
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index 9abecfddb27d..0111f8a9cf7f 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -706,6 +706,7 @@ f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
706 struct f_audio *audio = func_to_audio(f); 706 struct f_audio *audio = func_to_audio(f);
707 707
708 usb_free_descriptors(f->descriptors); 708 usb_free_descriptors(f->descriptors);
709 usb_free_descriptors(f->hs_descriptors);
709 kfree(audio); 710 kfree(audio);
710} 711}
711 712
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index 95dd4662d6a8..b3c304290150 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -314,6 +314,9 @@ eem_unbind(struct usb_configuration *c, struct usb_function *f)
314 314
315static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req) 315static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
316{ 316{
317 struct sk_buff *skb = (struct sk_buff *)req->context;
318
319 dev_kfree_skb_any(skb);
317} 320}
318 321
319/* 322/*
@@ -428,10 +431,11 @@ static int eem_unwrap(struct gether *port,
428 skb_trim(skb2, len); 431 skb_trim(skb2, len);
429 put_unaligned_le16(BIT(15) | BIT(11) | len, 432 put_unaligned_le16(BIT(15) | BIT(11) | len,
430 skb_push(skb2, 2)); 433 skb_push(skb2, 2));
431 skb_copy_bits(skb, 0, req->buf, skb->len); 434 skb_copy_bits(skb2, 0, req->buf, skb2->len);
432 req->length = skb->len; 435 req->length = skb2->len;
433 req->complete = eem_cmd_complete; 436 req->complete = eem_cmd_complete;
434 req->zero = 1; 437 req->zero = 1;
438 req->context = skb2;
435 if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC)) 439 if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC))
436 DBG(cdev, "echo response queue fail\n"); 440 DBG(cdev, "echo response queue fail\n");
437 break; 441 break;
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index aee7e3c53c38..36613b37c504 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -1148,6 +1148,12 @@ static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
1148static int txcomplete(struct qe_ep *ep, unsigned char restart) 1148static int txcomplete(struct qe_ep *ep, unsigned char restart)
1149{ 1149{
1150 if (ep->tx_req != NULL) { 1150 if (ep->tx_req != NULL) {
1151 struct qe_req *req = ep->tx_req;
1152 unsigned zlp = 0, last_len = 0;
1153
1154 last_len = min_t(unsigned, req->req.length - ep->sent,
1155 ep->ep.maxpacket);
1156
1151 if (!restart) { 1157 if (!restart) {
1152 int asent = ep->last; 1158 int asent = ep->last;
1153 ep->sent += asent; 1159 ep->sent += asent;
@@ -1156,9 +1162,18 @@ static int txcomplete(struct qe_ep *ep, unsigned char restart)
1156 ep->last = 0; 1162 ep->last = 0;
1157 } 1163 }
1158 1164
1165 /* zlp needed when req->re.zero is set */
1166 if (req->req.zero) {
1167 if (last_len == 0 ||
1168 (req->req.length % ep->ep.maxpacket) != 0)
1169 zlp = 0;
1170 else
1171 zlp = 1;
1172 } else
1173 zlp = 0;
1174
1159 /* a request already were transmitted completely */ 1175 /* a request already were transmitted completely */
1160 if ((ep->tx_req->req.length - ep->sent) <= 0) { 1176 if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
1161 ep->tx_req->req.actual = (unsigned int)ep->sent;
1162 done(ep, ep->tx_req, 0); 1177 done(ep, ep->tx_req, 0);
1163 ep->tx_req = NULL; 1178 ep->tx_req = NULL;
1164 ep->last = 0; 1179 ep->last = 0;
@@ -1191,6 +1206,7 @@ static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
1191 buf = (u8 *)ep->tx_req->req.buf + ep->sent; 1206 buf = (u8 *)ep->tx_req->req.buf + ep->sent;
1192 if (buf && size) { 1207 if (buf && size) {
1193 ep->last = size; 1208 ep->last = size;
1209 ep->tx_req->req.actual += size;
1194 frame_set_data(frame, buf); 1210 frame_set_data(frame, buf);
1195 frame_set_length(frame, size); 1211 frame_set_length(frame, size);
1196 frame_set_status(frame, FRAME_OK); 1212 frame_set_status(frame, FRAME_OK);
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 3ed73f49cf18..a01383f71f38 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -386,8 +386,10 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
386 386
387 /* halt any endpoint by doing a "wrong direction" i/o call */ 387 /* halt any endpoint by doing a "wrong direction" i/o call */
388 if (usb_endpoint_dir_in(&data->desc)) { 388 if (usb_endpoint_dir_in(&data->desc)) {
389 if (usb_endpoint_xfer_isoc(&data->desc)) 389 if (usb_endpoint_xfer_isoc(&data->desc)) {
390 mutex_unlock(&data->lock);
390 return -EINVAL; 391 return -EINVAL;
392 }
391 DBG (data->dev, "%s halt\n", data->name); 393 DBG (data->dev, "%s halt\n", data->name);
392 spin_lock_irq (&data->dev->lock); 394 spin_lock_irq (&data->dev->lock);
393 if (likely (data->ep != NULL)) 395 if (likely (data->ep != NULL))
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 3e4b35e50c24..68dbcc3e4cc2 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -1608,7 +1608,7 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1608 return -EINVAL; 1608 return -EINVAL;
1609 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN)) 1609 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1610 return -ESHUTDOWN; 1610 return -ESHUTDOWN;
1611 spin_lock_irqsave(&ep->dev->lock, iflags); 1611 spin_lock_irqsave(&dev->lock, iflags);
1612 /* map the buffer for dma */ 1612 /* map the buffer for dma */
1613 if (usbreq->length && 1613 if (usbreq->length &&
1614 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) { 1614 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
@@ -1625,8 +1625,10 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1625 DMA_FROM_DEVICE); 1625 DMA_FROM_DEVICE);
1626 } else { 1626 } else {
1627 req->buf = kzalloc(usbreq->length, GFP_ATOMIC); 1627 req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1628 if (!req->buf) 1628 if (!req->buf) {
1629 return -ENOMEM; 1629 retval = -ENOMEM;
1630 goto probe_end;
1631 }
1630 if (ep->in) { 1632 if (ep->in) {
1631 memcpy(req->buf, usbreq->buf, usbreq->length); 1633 memcpy(req->buf, usbreq->buf, usbreq->length);
1632 req->dma = dma_map_single(&dev->pdev->dev, 1634 req->dma = dma_map_single(&dev->pdev->dev,
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 015118535f77..6dcc1f68fa60 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1083,7 +1083,9 @@ static void irq_device_state(struct r8a66597 *r8a66597)
1083 1083
1084 if (dvsq == DS_DFLT) { 1084 if (dvsq == DS_DFLT) {
1085 /* bus reset */ 1085 /* bus reset */
1086 spin_unlock(&r8a66597->lock);
1086 r8a66597->driver->disconnect(&r8a66597->gadget); 1087 r8a66597->driver->disconnect(&r8a66597->gadget);
1088 spin_lock(&r8a66597->lock);
1087 r8a66597_update_usb_speed(r8a66597); 1089 r8a66597_update_usb_speed(r8a66597);
1088 } 1090 }
1089 if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG) 1091 if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 98ded66e8d3f..42abd0f603bf 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1247,24 +1247,27 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1247 1247
1248static void scan_async (struct ehci_hcd *ehci) 1248static void scan_async (struct ehci_hcd *ehci)
1249{ 1249{
1250 bool stopped;
1250 struct ehci_qh *qh; 1251 struct ehci_qh *qh;
1251 enum ehci_timer_action action = TIMER_IO_WATCHDOG; 1252 enum ehci_timer_action action = TIMER_IO_WATCHDOG;
1252 1253
1253 ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index); 1254 ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index);
1254 timer_action_done (ehci, TIMER_ASYNC_SHRINK); 1255 timer_action_done (ehci, TIMER_ASYNC_SHRINK);
1255rescan: 1256rescan:
1257 stopped = !HC_IS_RUNNING(ehci_to_hcd(ehci)->state);
1256 qh = ehci->async->qh_next.qh; 1258 qh = ehci->async->qh_next.qh;
1257 if (likely (qh != NULL)) { 1259 if (likely (qh != NULL)) {
1258 do { 1260 do {
1259 /* clean any finished work for this qh */ 1261 /* clean any finished work for this qh */
1260 if (!list_empty (&qh->qtd_list) 1262 if (!list_empty(&qh->qtd_list) && (stopped ||
1261 && qh->stamp != ehci->stamp) { 1263 qh->stamp != ehci->stamp)) {
1262 int temp; 1264 int temp;
1263 1265
1264 /* unlinks could happen here; completion 1266 /* unlinks could happen here; completion
1265 * reporting drops the lock. rescan using 1267 * reporting drops the lock. rescan using
1266 * the latest schedule, but don't rescan 1268 * the latest schedule, but don't rescan
1267 * qhs we already finished (no looping). 1269 * qhs we already finished (no looping)
1270 * unless the controller is stopped.
1268 */ 1271 */
1269 qh = qh_get (qh); 1272 qh = qh_get (qh);
1270 qh->stamp = ehci->stamp; 1273 qh->stamp = ehci->stamp;
@@ -1285,9 +1288,9 @@ rescan:
1285 */ 1288 */
1286 if (list_empty(&qh->qtd_list) 1289 if (list_empty(&qh->qtd_list)
1287 && qh->qh_state == QH_STATE_LINKED) { 1290 && qh->qh_state == QH_STATE_LINKED) {
1288 if (!ehci->reclaim 1291 if (!ehci->reclaim && (stopped ||
1289 && ((ehci->stamp - qh->stamp) & 0x1fff) 1292 ((ehci->stamp - qh->stamp) & 0x1fff)
1290 >= (EHCI_SHRINK_FRAMES * 8)) 1293 >= EHCI_SHRINK_FRAMES * 8))
1291 start_unlink_async(ehci, qh); 1294 start_unlink_async(ehci, qh);
1292 else 1295 else
1293 action = TIMER_ASYNC_SHRINK; 1296 action = TIMER_ASYNC_SHRINK;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index f50e84ac570a..795345ad45e6 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -295,7 +295,7 @@ static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
295 } 295 }
296 296
297 dev_err(hcd->self.controller, 297 dev_err(hcd->self.controller,
298 "%s: Can not allocate %lu bytes of memory\n" 298 "%s: Cannot allocate %zu bytes of memory\n"
299 "Current memory map:\n", 299 "Current memory map:\n",
300 __func__, qtd->length); 300 __func__, qtd->length);
301 for (i = 0; i < BLOCKS; i++) { 301 for (i = 0; i < BLOCKS; i++) {
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index 17a6043c1fa0..958d985f2951 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -33,7 +33,7 @@
33 33
34#ifdef __LITTLE_ENDIAN 34#ifdef __LITTLE_ENDIAN
35#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C) 35#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C)
36#elif __BIG_ENDIAN 36#elif defined(__BIG_ENDIAN)
37#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | \ 37#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | \
38 USBH_ENABLE_BE) 38 USBH_ENABLE_BE)
39#else 39#else
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 1d586d4f7b56..9b166d70ae91 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -84,65 +84,92 @@ int usb_amd_find_chipset_info(void)
84{ 84{
85 u8 rev = 0; 85 u8 rev = 0;
86 unsigned long flags; 86 unsigned long flags;
87 struct amd_chipset_info info;
88 int ret;
87 89
88 spin_lock_irqsave(&amd_lock, flags); 90 spin_lock_irqsave(&amd_lock, flags);
89 91
90 amd_chipset.probe_count++;
91 /* probe only once */ 92 /* probe only once */
92 if (amd_chipset.probe_count > 1) { 93 if (amd_chipset.probe_count > 0) {
94 amd_chipset.probe_count++;
93 spin_unlock_irqrestore(&amd_lock, flags); 95 spin_unlock_irqrestore(&amd_lock, flags);
94 return amd_chipset.probe_result; 96 return amd_chipset.probe_result;
95 } 97 }
98 memset(&info, 0, sizeof(info));
99 spin_unlock_irqrestore(&amd_lock, flags);
96 100
97 amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL); 101 info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
98 if (amd_chipset.smbus_dev) { 102 if (info.smbus_dev) {
99 rev = amd_chipset.smbus_dev->revision; 103 rev = info.smbus_dev->revision;
100 if (rev >= 0x40) 104 if (rev >= 0x40)
101 amd_chipset.sb_type = 1; 105 info.sb_type = 1;
102 else if (rev >= 0x30 && rev <= 0x3b) 106 else if (rev >= 0x30 && rev <= 0x3b)
103 amd_chipset.sb_type = 3; 107 info.sb_type = 3;
104 } else { 108 } else {
105 amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 109 info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
106 0x780b, NULL); 110 0x780b, NULL);
107 if (!amd_chipset.smbus_dev) { 111 if (!info.smbus_dev) {
108 spin_unlock_irqrestore(&amd_lock, flags); 112 ret = 0;
109 return 0; 113 goto commit;
110 } 114 }
111 rev = amd_chipset.smbus_dev->revision; 115
116 rev = info.smbus_dev->revision;
112 if (rev >= 0x11 && rev <= 0x18) 117 if (rev >= 0x11 && rev <= 0x18)
113 amd_chipset.sb_type = 2; 118 info.sb_type = 2;
114 } 119 }
115 120
116 if (amd_chipset.sb_type == 0) { 121 if (info.sb_type == 0) {
117 if (amd_chipset.smbus_dev) { 122 if (info.smbus_dev) {
118 pci_dev_put(amd_chipset.smbus_dev); 123 pci_dev_put(info.smbus_dev);
119 amd_chipset.smbus_dev = NULL; 124 info.smbus_dev = NULL;
120 } 125 }
121 spin_unlock_irqrestore(&amd_lock, flags); 126 ret = 0;
122 return 0; 127 goto commit;
123 } 128 }
124 129
125 amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL); 130 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
126 if (amd_chipset.nb_dev) { 131 if (info.nb_dev) {
127 amd_chipset.nb_type = 1; 132 info.nb_type = 1;
128 } else { 133 } else {
129 amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 134 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
130 0x1510, NULL); 135 if (info.nb_dev) {
131 if (amd_chipset.nb_dev) { 136 info.nb_type = 2;
132 amd_chipset.nb_type = 2; 137 } else {
133 } else { 138 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
134 amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 139 0x9600, NULL);
135 0x9600, NULL); 140 if (info.nb_dev)
136 if (amd_chipset.nb_dev) 141 info.nb_type = 3;
137 amd_chipset.nb_type = 3;
138 } 142 }
139 } 143 }
140 144
141 amd_chipset.probe_result = 1; 145 ret = info.probe_result = 1;
142 printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); 146 printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
143 147
144 spin_unlock_irqrestore(&amd_lock, flags); 148commit:
145 return amd_chipset.probe_result; 149
150 spin_lock_irqsave(&amd_lock, flags);
151 if (amd_chipset.probe_count > 0) {
152 /* race - someone else was faster - drop devices */
153
154 /* Mark that we where here */
155 amd_chipset.probe_count++;
156 ret = amd_chipset.probe_result;
157
158 spin_unlock_irqrestore(&amd_lock, flags);
159
160 if (info.nb_dev)
161 pci_dev_put(info.nb_dev);
162 if (info.smbus_dev)
163 pci_dev_put(info.smbus_dev);
164
165 } else {
166 /* no race - commit the result */
167 info.probe_count++;
168 amd_chipset = info;
169 spin_unlock_irqrestore(&amd_lock, flags);
170 }
171
172 return ret;
146} 173}
147EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); 174EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
148 175
@@ -284,6 +311,7 @@ EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
284 311
285void usb_amd_dev_put(void) 312void usb_amd_dev_put(void)
286{ 313{
314 struct pci_dev *nb, *smbus;
287 unsigned long flags; 315 unsigned long flags;
288 316
289 spin_lock_irqsave(&amd_lock, flags); 317 spin_lock_irqsave(&amd_lock, flags);
@@ -294,20 +322,23 @@ void usb_amd_dev_put(void)
294 return; 322 return;
295 } 323 }
296 324
297 if (amd_chipset.nb_dev) { 325 /* save them to pci_dev_put outside of spinlock */
298 pci_dev_put(amd_chipset.nb_dev); 326 nb = amd_chipset.nb_dev;
299 amd_chipset.nb_dev = NULL; 327 smbus = amd_chipset.smbus_dev;
300 } 328
301 if (amd_chipset.smbus_dev) { 329 amd_chipset.nb_dev = NULL;
302 pci_dev_put(amd_chipset.smbus_dev); 330 amd_chipset.smbus_dev = NULL;
303 amd_chipset.smbus_dev = NULL;
304 }
305 amd_chipset.nb_type = 0; 331 amd_chipset.nb_type = 0;
306 amd_chipset.sb_type = 0; 332 amd_chipset.sb_type = 0;
307 amd_chipset.isoc_reqs = 0; 333 amd_chipset.isoc_reqs = 0;
308 amd_chipset.probe_result = 0; 334 amd_chipset.probe_result = 0;
309 335
310 spin_unlock_irqrestore(&amd_lock, flags); 336 spin_unlock_irqrestore(&amd_lock, flags);
337
338 if (nb)
339 pci_dev_put(nb);
340 if (smbus)
341 pci_dev_put(smbus);
311} 342}
312EXPORT_SYMBOL_GPL(usb_amd_dev_put); 343EXPORT_SYMBOL_GPL(usb_amd_dev_put);
313 344
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index a003e79aacdc..627f3438028c 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -846,7 +846,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
846 * Skip ports that don't have known speeds, or have duplicate 846 * Skip ports that don't have known speeds, or have duplicate
847 * Extended Capabilities port speed entries. 847 * Extended Capabilities port speed entries.
848 */ 848 */
849 if (port_speed == 0 || port_speed == -1) 849 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
850 continue; 850 continue;
851 851
852 /* 852 /*
@@ -974,6 +974,47 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
974 return 0; 974 return 0;
975} 975}
976 976
977/*
978 * Convert interval expressed as 2^(bInterval - 1) == interval into
979 * straight exponent value 2^n == interval.
980 *
981 */
982static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
983 struct usb_host_endpoint *ep)
984{
985 unsigned int interval;
986
987 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
988 if (interval != ep->desc.bInterval - 1)
989 dev_warn(&udev->dev,
990 "ep %#x - rounding interval to %d microframes\n",
991 ep->desc.bEndpointAddress,
992 1 << interval);
993
994 return interval;
995}
996
997/*
998 * Convert bInterval expressed in frames (in 1-255 range) to exponent of
999 * microframes, rounded down to nearest power of 2.
1000 */
1001static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1002 struct usb_host_endpoint *ep)
1003{
1004 unsigned int interval;
1005
1006 interval = fls(8 * ep->desc.bInterval) - 1;
1007 interval = clamp_val(interval, 3, 10);
1008 if ((1 << interval) != 8 * ep->desc.bInterval)
1009 dev_warn(&udev->dev,
1010 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1011 ep->desc.bEndpointAddress,
1012 1 << interval,
1013 8 * ep->desc.bInterval);
1014
1015 return interval;
1016}
1017
977/* Return the polling or NAK interval. 1018/* Return the polling or NAK interval.
978 * 1019 *
979 * The polling interval is expressed in "microframes". If xHCI's Interval field 1020 * The polling interval is expressed in "microframes". If xHCI's Interval field
@@ -982,7 +1023,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
982 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval 1023 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
983 * is set to 0. 1024 * is set to 0.
984 */ 1025 */
985static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, 1026static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
986 struct usb_host_endpoint *ep) 1027 struct usb_host_endpoint *ep)
987{ 1028{
988 unsigned int interval = 0; 1029 unsigned int interval = 0;
@@ -991,45 +1032,38 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
991 case USB_SPEED_HIGH: 1032 case USB_SPEED_HIGH:
992 /* Max NAK rate */ 1033 /* Max NAK rate */
993 if (usb_endpoint_xfer_control(&ep->desc) || 1034 if (usb_endpoint_xfer_control(&ep->desc) ||
994 usb_endpoint_xfer_bulk(&ep->desc)) 1035 usb_endpoint_xfer_bulk(&ep->desc)) {
995 interval = ep->desc.bInterval; 1036 interval = ep->desc.bInterval;
1037 break;
1038 }
996 /* Fall through - SS and HS isoc/int have same decoding */ 1039 /* Fall through - SS and HS isoc/int have same decoding */
1040
997 case USB_SPEED_SUPER: 1041 case USB_SPEED_SUPER:
998 if (usb_endpoint_xfer_int(&ep->desc) || 1042 if (usb_endpoint_xfer_int(&ep->desc) ||
999 usb_endpoint_xfer_isoc(&ep->desc)) { 1043 usb_endpoint_xfer_isoc(&ep->desc)) {
1000 if (ep->desc.bInterval == 0) 1044 interval = xhci_parse_exponent_interval(udev, ep);
1001 interval = 0;
1002 else
1003 interval = ep->desc.bInterval - 1;
1004 if (interval > 15)
1005 interval = 15;
1006 if (interval != ep->desc.bInterval + 1)
1007 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
1008 ep->desc.bEndpointAddress, 1 << interval);
1009 } 1045 }
1010 break; 1046 break;
1011 /* Convert bInterval (in 1-255 frames) to microframes and round down to 1047
1012 * nearest power of 2.
1013 */
1014 case USB_SPEED_FULL: 1048 case USB_SPEED_FULL:
1049 if (usb_endpoint_xfer_int(&ep->desc)) {
1050 interval = xhci_parse_exponent_interval(udev, ep);
1051 break;
1052 }
1053 /*
1054 * Fall through for isochronous endpoint interval decoding
1055 * since it uses the same rules as low speed interrupt
1056 * endpoints.
1057 */
1058
1015 case USB_SPEED_LOW: 1059 case USB_SPEED_LOW:
1016 if (usb_endpoint_xfer_int(&ep->desc) || 1060 if (usb_endpoint_xfer_int(&ep->desc) ||
1017 usb_endpoint_xfer_isoc(&ep->desc)) { 1061 usb_endpoint_xfer_isoc(&ep->desc)) {
1018 interval = fls(8*ep->desc.bInterval) - 1; 1062
1019 if (interval > 10) 1063 interval = xhci_parse_frame_interval(udev, ep);
1020 interval = 10;
1021 if (interval < 3)
1022 interval = 3;
1023 if ((1 << interval) != 8*ep->desc.bInterval)
1024 dev_warn(&udev->dev,
1025 "ep %#x - rounding interval"
1026 " to %d microframes, "
1027 "ep desc says %d microframes\n",
1028 ep->desc.bEndpointAddress,
1029 1 << interval,
1030 8*ep->desc.bInterval);
1031 } 1064 }
1032 break; 1065 break;
1066
1033 default: 1067 default:
1034 BUG(); 1068 BUG();
1035 } 1069 }
@@ -1041,7 +1075,7 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1041 * transaction opportunities per microframe", but that goes in the Max Burst 1075 * transaction opportunities per microframe", but that goes in the Max Burst
1042 * endpoint context field. 1076 * endpoint context field.
1043 */ 1077 */
1044static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, 1078static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1045 struct usb_host_endpoint *ep) 1079 struct usb_host_endpoint *ep)
1046{ 1080{
1047 if (udev->speed != USB_SPEED_SUPER || 1081 if (udev->speed != USB_SPEED_SUPER ||
@@ -1050,7 +1084,7 @@ static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
1050 return ep->ss_ep_comp.bmAttributes; 1084 return ep->ss_ep_comp.bmAttributes;
1051} 1085}
1052 1086
1053static inline u32 xhci_get_endpoint_type(struct usb_device *udev, 1087static u32 xhci_get_endpoint_type(struct usb_device *udev,
1054 struct usb_host_endpoint *ep) 1088 struct usb_host_endpoint *ep)
1055{ 1089{
1056 int in; 1090 int in;
@@ -1084,7 +1118,7 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
1084 * Basically, this is the maxpacket size, multiplied by the burst size 1118 * Basically, this is the maxpacket size, multiplied by the burst size
1085 * and mult size. 1119 * and mult size.
1086 */ 1120 */
1087static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, 1121static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
1088 struct usb_device *udev, 1122 struct usb_device *udev,
1089 struct usb_host_endpoint *ep) 1123 struct usb_host_endpoint *ep)
1090{ 1124{
@@ -1727,12 +1761,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
1727 * found a similar duplicate. 1761 * found a similar duplicate.
1728 */ 1762 */
1729 if (xhci->port_array[i] != major_revision && 1763 if (xhci->port_array[i] != major_revision &&
1730 xhci->port_array[i] != (u8) -1) { 1764 xhci->port_array[i] != DUPLICATE_ENTRY) {
1731 if (xhci->port_array[i] == 0x03) 1765 if (xhci->port_array[i] == 0x03)
1732 xhci->num_usb3_ports--; 1766 xhci->num_usb3_ports--;
1733 else 1767 else
1734 xhci->num_usb2_ports--; 1768 xhci->num_usb2_ports--;
1735 xhci->port_array[i] = (u8) -1; 1769 xhci->port_array[i] = DUPLICATE_ENTRY;
1736 } 1770 }
1737 /* FIXME: Should we disable the port? */ 1771 /* FIXME: Should we disable the port? */
1738 continue; 1772 continue;
@@ -1831,7 +1865,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
1831 for (i = 0; i < num_ports; i++) { 1865 for (i = 0; i < num_ports; i++) {
1832 if (xhci->port_array[i] == 0x03 || 1866 if (xhci->port_array[i] == 0x03 ||
1833 xhci->port_array[i] == 0 || 1867 xhci->port_array[i] == 0 ||
1834 xhci->port_array[i] == -1) 1868 xhci->port_array[i] == DUPLICATE_ENTRY)
1835 continue; 1869 continue;
1836 1870
1837 xhci->usb2_ports[port_index] = 1871 xhci->usb2_ports[port_index] =
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index ceea9f33491c..a10494c2f3c7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -114,6 +114,10 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
114 if (pdev->vendor == PCI_VENDOR_ID_NEC) 114 if (pdev->vendor == PCI_VENDOR_ID_NEC)
115 xhci->quirks |= XHCI_NEC_HOST; 115 xhci->quirks |= XHCI_NEC_HOST;
116 116
117 /* AMD PLL quirk */
118 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
119 xhci->quirks |= XHCI_AMD_PLL_FIX;
120
117 /* Make sure the HC is halted. */ 121 /* Make sure the HC is halted. */
118 retval = xhci_halt(xhci); 122 retval = xhci_halt(xhci);
119 if (retval) 123 if (retval)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index cfc1ad92473f..7437386a9a50 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -93,7 +93,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
93/* Does this link TRB point to the first segment in a ring, 93/* Does this link TRB point to the first segment in a ring,
94 * or was the previous TRB the last TRB on the last segment in the ERST? 94 * or was the previous TRB the last TRB on the last segment in the ERST?
95 */ 95 */
96static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, 96static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
97 struct xhci_segment *seg, union xhci_trb *trb) 97 struct xhci_segment *seg, union xhci_trb *trb)
98{ 98{
99 if (ring == xhci->event_ring) 99 if (ring == xhci->event_ring)
@@ -107,7 +107,7 @@ static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring
107 * segment? I.e. would the updated event TRB pointer step off the end of the 107 * segment? I.e. would the updated event TRB pointer step off the end of the
108 * event seg? 108 * event seg?
109 */ 109 */
110static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 110static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
111 struct xhci_segment *seg, union xhci_trb *trb) 111 struct xhci_segment *seg, union xhci_trb *trb)
112{ 112{
113 if (ring == xhci->event_ring) 113 if (ring == xhci->event_ring)
@@ -116,7 +116,7 @@ static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
116 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); 116 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
117} 117}
118 118
119static inline int enqueue_is_link_trb(struct xhci_ring *ring) 119static int enqueue_is_link_trb(struct xhci_ring *ring)
120{ 120{
121 struct xhci_link_trb *link = &ring->enqueue->link; 121 struct xhci_link_trb *link = &ring->enqueue->link;
122 return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)); 122 return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
@@ -592,7 +592,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
592 ep->ep_state |= SET_DEQ_PENDING; 592 ep->ep_state |= SET_DEQ_PENDING;
593} 593}
594 594
595static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, 595static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
596 struct xhci_virt_ep *ep) 596 struct xhci_virt_ep *ep)
597{ 597{
598 ep->ep_state &= ~EP_HALT_PENDING; 598 ep->ep_state &= ~EP_HALT_PENDING;
@@ -619,6 +619,13 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
619 619
620 /* Only giveback urb when this is the last td in urb */ 620 /* Only giveback urb when this is the last td in urb */
621 if (urb_priv->td_cnt == urb_priv->length) { 621 if (urb_priv->td_cnt == urb_priv->length) {
622 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
623 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
624 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
625 if (xhci->quirks & XHCI_AMD_PLL_FIX)
626 usb_amd_quirk_pll_enable();
627 }
628 }
622 usb_hcd_unlink_urb_from_ep(hcd, urb); 629 usb_hcd_unlink_urb_from_ep(hcd, urb);
623 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb); 630 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
624 631
@@ -1209,7 +1216,7 @@ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1209 * Skip ports that don't have known speeds, or have duplicate 1216 * Skip ports that don't have known speeds, or have duplicate
1210 * Extended Capabilities port speed entries. 1217 * Extended Capabilities port speed entries.
1211 */ 1218 */
1212 if (port_speed == 0 || port_speed == -1) 1219 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1213 continue; 1220 continue;
1214 1221
1215 /* 1222 /*
@@ -1235,6 +1242,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
1235 u8 major_revision; 1242 u8 major_revision;
1236 struct xhci_bus_state *bus_state; 1243 struct xhci_bus_state *bus_state;
1237 u32 __iomem **port_array; 1244 u32 __iomem **port_array;
1245 bool bogus_port_status = false;
1238 1246
1239 /* Port status change events always have a successful completion code */ 1247 /* Port status change events always have a successful completion code */
1240 if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { 1248 if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
@@ -1247,6 +1255,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
1247 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1255 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1248 if ((port_id <= 0) || (port_id > max_ports)) { 1256 if ((port_id <= 0) || (port_id > max_ports)) {
1249 xhci_warn(xhci, "Invalid port id %d\n", port_id); 1257 xhci_warn(xhci, "Invalid port id %d\n", port_id);
1258 bogus_port_status = true;
1250 goto cleanup; 1259 goto cleanup;
1251 } 1260 }
1252 1261
@@ -1258,12 +1267,14 @@ static void handle_port_status(struct xhci_hcd *xhci,
1258 xhci_warn(xhci, "Event for port %u not in " 1267 xhci_warn(xhci, "Event for port %u not in "
1259 "Extended Capabilities, ignoring.\n", 1268 "Extended Capabilities, ignoring.\n",
1260 port_id); 1269 port_id);
1270 bogus_port_status = true;
1261 goto cleanup; 1271 goto cleanup;
1262 } 1272 }
1263 if (major_revision == (u8) -1) { 1273 if (major_revision == DUPLICATE_ENTRY) {
1264 xhci_warn(xhci, "Event for port %u duplicated in" 1274 xhci_warn(xhci, "Event for port %u duplicated in"
1265 "Extended Capabilities, ignoring.\n", 1275 "Extended Capabilities, ignoring.\n",
1266 port_id); 1276 port_id);
1277 bogus_port_status = true;
1267 goto cleanup; 1278 goto cleanup;
1268 } 1279 }
1269 1280
@@ -1335,6 +1346,13 @@ cleanup:
1335 /* Update event ring dequeue pointer before dropping the lock */ 1346 /* Update event ring dequeue pointer before dropping the lock */
1336 inc_deq(xhci, xhci->event_ring, true); 1347 inc_deq(xhci, xhci->event_ring, true);
1337 1348
1349 /* Don't make the USB core poll the roothub if we got a bad port status
1350 * change event. Besides, at that point we can't tell which roothub
1351 * (USB 2.0 or USB 3.0) to kick.
1352 */
1353 if (bogus_port_status)
1354 return;
1355
1338 spin_unlock(&xhci->lock); 1356 spin_unlock(&xhci->lock);
1339 /* Pass this up to the core */ 1357 /* Pass this up to the core */
1340 usb_hcd_poll_rh_status(hcd); 1358 usb_hcd_poll_rh_status(hcd);
@@ -1554,8 +1572,17 @@ td_cleanup:
1554 1572
1555 urb_priv->td_cnt++; 1573 urb_priv->td_cnt++;
1556 /* Giveback the urb when all the tds are completed */ 1574 /* Giveback the urb when all the tds are completed */
1557 if (urb_priv->td_cnt == urb_priv->length) 1575 if (urb_priv->td_cnt == urb_priv->length) {
1558 ret = 1; 1576 ret = 1;
1577 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1578 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
1579 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
1580 == 0) {
1581 if (xhci->quirks & XHCI_AMD_PLL_FIX)
1582 usb_amd_quirk_pll_enable();
1583 }
1584 }
1585 }
1559 } 1586 }
1560 1587
1561 return ret; 1588 return ret;
@@ -1675,71 +1702,52 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1675 struct urb_priv *urb_priv; 1702 struct urb_priv *urb_priv;
1676 int idx; 1703 int idx;
1677 int len = 0; 1704 int len = 0;
1678 int skip_td = 0;
1679 union xhci_trb *cur_trb; 1705 union xhci_trb *cur_trb;
1680 struct xhci_segment *cur_seg; 1706 struct xhci_segment *cur_seg;
1707 struct usb_iso_packet_descriptor *frame;
1681 u32 trb_comp_code; 1708 u32 trb_comp_code;
1709 bool skip_td = false;
1682 1710
1683 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1711 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1684 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1712 trb_comp_code = GET_COMP_CODE(event->transfer_len);
1685 urb_priv = td->urb->hcpriv; 1713 urb_priv = td->urb->hcpriv;
1686 idx = urb_priv->td_cnt; 1714 idx = urb_priv->td_cnt;
1715 frame = &td->urb->iso_frame_desc[idx];
1687 1716
1688 if (ep->skip) { 1717 /* handle completion code */
1689 /* The transfer is partly done */ 1718 switch (trb_comp_code) {
1690 *status = -EXDEV; 1719 case COMP_SUCCESS:
1691 td->urb->iso_frame_desc[idx].status = -EXDEV; 1720 frame->status = 0;
1692 } else { 1721 xhci_dbg(xhci, "Successful isoc transfer!\n");
1693 /* handle completion code */ 1722 break;
1694 switch (trb_comp_code) { 1723 case COMP_SHORT_TX:
1695 case COMP_SUCCESS: 1724 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
1696 td->urb->iso_frame_desc[idx].status = 0; 1725 -EREMOTEIO : 0;
1697 xhci_dbg(xhci, "Successful isoc transfer!\n"); 1726 break;
1698 break; 1727 case COMP_BW_OVER:
1699 case COMP_SHORT_TX: 1728 frame->status = -ECOMM;
1700 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1729 skip_td = true;
1701 td->urb->iso_frame_desc[idx].status = 1730 break;
1702 -EREMOTEIO; 1731 case COMP_BUFF_OVER:
1703 else 1732 case COMP_BABBLE:
1704 td->urb->iso_frame_desc[idx].status = 0; 1733 frame->status = -EOVERFLOW;
1705 break; 1734 skip_td = true;
1706 case COMP_BW_OVER: 1735 break;
1707 td->urb->iso_frame_desc[idx].status = -ECOMM; 1736 case COMP_STALL:
1708 skip_td = 1; 1737 frame->status = -EPROTO;
1709 break; 1738 skip_td = true;
1710 case COMP_BUFF_OVER: 1739 break;
1711 case COMP_BABBLE: 1740 case COMP_STOP:
1712 td->urb->iso_frame_desc[idx].status = -EOVERFLOW; 1741 case COMP_STOP_INVAL:
1713 skip_td = 1; 1742 break;
1714 break; 1743 default:
1715 case COMP_STALL: 1744 frame->status = -1;
1716 td->urb->iso_frame_desc[idx].status = -EPROTO; 1745 break;
1717 skip_td = 1;
1718 break;
1719 case COMP_STOP:
1720 case COMP_STOP_INVAL:
1721 break;
1722 default:
1723 td->urb->iso_frame_desc[idx].status = -1;
1724 break;
1725 }
1726 }
1727
1728 /* calc actual length */
1729 if (ep->skip) {
1730 td->urb->iso_frame_desc[idx].actual_length = 0;
1731 /* Update ring dequeue pointer */
1732 while (ep_ring->dequeue != td->last_trb)
1733 inc_deq(xhci, ep_ring, false);
1734 inc_deq(xhci, ep_ring, false);
1735 return finish_td(xhci, td, event_trb, event, ep, status, true);
1736 } 1746 }
1737 1747
1738 if (trb_comp_code == COMP_SUCCESS || skip_td == 1) { 1748 if (trb_comp_code == COMP_SUCCESS || skip_td) {
1739 td->urb->iso_frame_desc[idx].actual_length = 1749 frame->actual_length = frame->length;
1740 td->urb->iso_frame_desc[idx].length; 1750 td->urb->actual_length += frame->length;
1741 td->urb->actual_length +=
1742 td->urb->iso_frame_desc[idx].length;
1743 } else { 1751 } else {
1744 for (cur_trb = ep_ring->dequeue, 1752 for (cur_trb = ep_ring->dequeue,
1745 cur_seg = ep_ring->deq_seg; cur_trb != event_trb; 1753 cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
@@ -1755,7 +1763,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1755 TRB_LEN(event->transfer_len); 1763 TRB_LEN(event->transfer_len);
1756 1764
1757 if (trb_comp_code != COMP_STOP_INVAL) { 1765 if (trb_comp_code != COMP_STOP_INVAL) {
1758 td->urb->iso_frame_desc[idx].actual_length = len; 1766 frame->actual_length = len;
1759 td->urb->actual_length += len; 1767 td->urb->actual_length += len;
1760 } 1768 }
1761 } 1769 }
@@ -1766,6 +1774,35 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1766 return finish_td(xhci, td, event_trb, event, ep, status, false); 1774 return finish_td(xhci, td, event_trb, event, ep, status, false);
1767} 1775}
1768 1776
1777static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1778 struct xhci_transfer_event *event,
1779 struct xhci_virt_ep *ep, int *status)
1780{
1781 struct xhci_ring *ep_ring;
1782 struct urb_priv *urb_priv;
1783 struct usb_iso_packet_descriptor *frame;
1784 int idx;
1785
1786 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1787 urb_priv = td->urb->hcpriv;
1788 idx = urb_priv->td_cnt;
1789 frame = &td->urb->iso_frame_desc[idx];
1790
1791 /* The transfer is partly done */
1792 *status = -EXDEV;
1793 frame->status = -EXDEV;
1794
1795 /* calc actual length */
1796 frame->actual_length = 0;
1797
1798 /* Update ring dequeue pointer */
1799 while (ep_ring->dequeue != td->last_trb)
1800 inc_deq(xhci, ep_ring, false);
1801 inc_deq(xhci, ep_ring, false);
1802
1803 return finish_td(xhci, td, NULL, event, ep, status, true);
1804}
1805
1769/* 1806/*
1770 * Process bulk and interrupt tds, update urb status and actual_length. 1807 * Process bulk and interrupt tds, update urb status and actual_length.
1771 */ 1808 */
@@ -2024,36 +2061,42 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2024 } 2061 }
2025 2062
2026 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 2063 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2064
2027 /* Is this a TRB in the currently executing TD? */ 2065 /* Is this a TRB in the currently executing TD? */
2028 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 2066 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
2029 td->last_trb, event_dma); 2067 td->last_trb, event_dma);
2030 if (event_seg && ep->skip) { 2068 if (!event_seg) {
2069 if (!ep->skip ||
2070 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2071 /* HC is busted, give up! */
2072 xhci_err(xhci,
2073 "ERROR Transfer event TRB DMA ptr not "
2074 "part of current TD\n");
2075 return -ESHUTDOWN;
2076 }
2077
2078 ret = skip_isoc_td(xhci, td, event, ep, &status);
2079 goto cleanup;
2080 }
2081
2082 if (ep->skip) {
2031 xhci_dbg(xhci, "Found td. Clear skip flag.\n"); 2083 xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2032 ep->skip = false; 2084 ep->skip = false;
2033 } 2085 }
2034 if (!event_seg &&
2035 (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) {
2036 /* HC is busted, give up! */
2037 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not "
2038 "part of current TD\n");
2039 return -ESHUTDOWN;
2040 }
2041 2086
2042 if (event_seg) { 2087 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2043 event_trb = &event_seg->trbs[(event_dma - 2088 sizeof(*event_trb)];
2044 event_seg->dma) / sizeof(*event_trb)]; 2089 /*
2045 /* 2090 * No-op TRB should not trigger interrupts.
2046 * No-op TRB should not trigger interrupts. 2091 * If event_trb is a no-op TRB, it means the
2047 * If event_trb is a no-op TRB, it means the 2092 * corresponding TD has been cancelled. Just ignore
2048 * corresponding TD has been cancelled. Just ignore 2093 * the TD.
2049 * the TD. 2094 */
2050 */ 2095 if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
2051 if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK) 2096 == TRB_TYPE(TRB_TR_NOOP)) {
2052 == TRB_TYPE(TRB_TR_NOOP)) { 2097 xhci_dbg(xhci,
2053 xhci_dbg(xhci, "event_trb is a no-op TRB. " 2098 "event_trb is a no-op TRB. Skip it\n");
2054 "Skip it\n"); 2099 goto cleanup;
2055 goto cleanup;
2056 }
2057 } 2100 }
2058 2101
2059 /* Now update the urb's actual_length and give back to 2102 /* Now update the urb's actual_length and give back to
@@ -3126,6 +3169,12 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3126 } 3169 }
3127 } 3170 }
3128 3171
3172 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3173 if (xhci->quirks & XHCI_AMD_PLL_FIX)
3174 usb_amd_quirk_pll_disable();
3175 }
3176 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3177
3129 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3178 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3130 start_cycle, start_trb); 3179 start_cycle, start_trb);
3131 return 0; 3180 return 0;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 196e0181b2ed..81b976e45880 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -550,6 +550,9 @@ void xhci_stop(struct usb_hcd *hcd)
550 del_timer_sync(&xhci->event_ring_timer); 550 del_timer_sync(&xhci->event_ring_timer);
551#endif 551#endif
552 552
553 if (xhci->quirks & XHCI_AMD_PLL_FIX)
554 usb_amd_dev_put();
555
553 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 556 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
554 temp = xhci_readl(xhci, &xhci->op_regs->status); 557 temp = xhci_readl(xhci, &xhci->op_regs->status);
555 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); 558 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
@@ -771,7 +774,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
771 774
772 /* If restore operation fails, re-initialize the HC during resume */ 775 /* If restore operation fails, re-initialize the HC during resume */
773 if ((temp & STS_SRE) || hibernated) { 776 if ((temp & STS_SRE) || hibernated) {
774 usb_root_hub_lost_power(hcd->self.root_hub); 777 /* Let the USB core know _both_ roothubs lost power. */
778 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
779 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
775 780
776 xhci_dbg(xhci, "Stop HCD\n"); 781 xhci_dbg(xhci, "Stop HCD\n");
777 xhci_halt(xhci); 782 xhci_halt(xhci);
@@ -2386,10 +2391,18 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2386 /* Everything but endpoint 0 is disabled, so free or cache the rings. */ 2391 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
2387 last_freed_endpoint = 1; 2392 last_freed_endpoint = 1;
2388 for (i = 1; i < 31; ++i) { 2393 for (i = 1; i < 31; ++i) {
2389 if (!virt_dev->eps[i].ring) 2394 struct xhci_virt_ep *ep = &virt_dev->eps[i];
2390 continue; 2395
2391 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2396 if (ep->ep_state & EP_HAS_STREAMS) {
2392 last_freed_endpoint = i; 2397 xhci_free_stream_info(xhci, ep->stream_info);
2398 ep->stream_info = NULL;
2399 ep->ep_state &= ~EP_HAS_STREAMS;
2400 }
2401
2402 if (ep->ring) {
2403 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2404 last_freed_endpoint = i;
2405 }
2393 } 2406 }
2394 xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); 2407 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
2395 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); 2408 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 07e263063e37..ba1be6b7cc6d 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -30,6 +30,7 @@
30 30
31/* Code sharing between pci-quirks and xhci hcd */ 31/* Code sharing between pci-quirks and xhci hcd */
32#include "xhci-ext-caps.h" 32#include "xhci-ext-caps.h"
33#include "pci-quirks.h"
33 34
34/* xHCI PCI Configuration Registers */ 35/* xHCI PCI Configuration Registers */
35#define XHCI_SBRN_OFFSET (0x60) 36#define XHCI_SBRN_OFFSET (0x60)
@@ -232,7 +233,7 @@ struct xhci_op_regs {
232 * notification type that matches a bit set in this bit field. 233 * notification type that matches a bit set in this bit field.
233 */ 234 */
234#define DEV_NOTE_MASK (0xffff) 235#define DEV_NOTE_MASK (0xffff)
235#define ENABLE_DEV_NOTE(x) (1 << x) 236#define ENABLE_DEV_NOTE(x) (1 << (x))
236/* Most of the device notification types should only be used for debug. 237/* Most of the device notification types should only be used for debug.
237 * SW does need to pay attention to function wake notifications. 238 * SW does need to pay attention to function wake notifications.
238 */ 239 */
@@ -348,6 +349,9 @@ struct xhci_op_regs {
348/* Initiate a warm port reset - complete when PORT_WRC is '1' */ 349/* Initiate a warm port reset - complete when PORT_WRC is '1' */
349#define PORT_WR (1 << 31) 350#define PORT_WR (1 << 31)
350 351
352/* We mark duplicate entries with -1 */
353#define DUPLICATE_ENTRY ((u8)(-1))
354
351/* Port Power Management Status and Control - port_power_base bitmasks */ 355/* Port Power Management Status and Control - port_power_base bitmasks */
352/* Inactivity timer value for transitions into U1, in microseconds. 356/* Inactivity timer value for transitions into U1, in microseconds.
353 * Timeout can be up to 127us. 0xFF means an infinite timeout. 357 * Timeout can be up to 127us. 0xFF means an infinite timeout.
@@ -601,11 +605,11 @@ struct xhci_ep_ctx {
601#define EP_STATE_STOPPED 3 605#define EP_STATE_STOPPED 3
602#define EP_STATE_ERROR 4 606#define EP_STATE_ERROR 4
603/* Mult - Max number of burtst within an interval, in EP companion desc. */ 607/* Mult - Max number of burtst within an interval, in EP companion desc. */
604#define EP_MULT(p) ((p & 0x3) << 8) 608#define EP_MULT(p) (((p) & 0x3) << 8)
605/* bits 10:14 are Max Primary Streams */ 609/* bits 10:14 are Max Primary Streams */
606/* bit 15 is Linear Stream Array */ 610/* bit 15 is Linear Stream Array */
607/* Interval - period between requests to an endpoint - 125u increments. */ 611/* Interval - period between requests to an endpoint - 125u increments. */
608#define EP_INTERVAL(p) ((p & 0xff) << 16) 612#define EP_INTERVAL(p) (((p) & 0xff) << 16)
609#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) 613#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
610#define EP_MAXPSTREAMS_MASK (0x1f << 10) 614#define EP_MAXPSTREAMS_MASK (0x1f << 10)
611#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) 615#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
@@ -1276,6 +1280,7 @@ struct xhci_hcd {
1276#define XHCI_LINK_TRB_QUIRK (1 << 0) 1280#define XHCI_LINK_TRB_QUIRK (1 << 0)
1277#define XHCI_RESET_EP_QUIRK (1 << 1) 1281#define XHCI_RESET_EP_QUIRK (1 << 1)
1278#define XHCI_NEC_HOST (1 << 2) 1282#define XHCI_NEC_HOST (1 << 2)
1283#define XHCI_AMD_PLL_FIX (1 << 3)
1279 /* There are two roothubs to keep track of bus suspend info for */ 1284 /* There are two roothubs to keep track of bus suspend info for */
1280 struct xhci_bus_state bus_state[2]; 1285 struct xhci_bus_state bus_state[2];
1281 /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ 1286 /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 4cbb7e4b368d..74073b363c30 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -14,7 +14,7 @@ config USB_MUSB_HDRC
14 select TWL4030_USB if MACH_OMAP_3430SDP 14 select TWL4030_USB if MACH_OMAP_3430SDP
15 select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA 15 select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
16 select USB_OTG_UTILS 16 select USB_OTG_UTILS
17 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' 17 bool 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
18 help 18 help
19 Say Y here if your system has a dual role high speed USB 19 Say Y here if your system has a dual role high speed USB
20 controller based on the Mentor Graphics silicon IP. Then 20 controller based on the Mentor Graphics silicon IP. Then
@@ -30,8 +30,8 @@ config USB_MUSB_HDRC
30 30
31 If you do not know what this is, please say N. 31 If you do not know what this is, please say N.
32 32
33 To compile this driver as a module, choose M here; the 33# To compile this driver as a module, choose M here; the
34 module will be called "musb-hdrc". 34# module will be called "musb-hdrc".
35 35
36choice 36choice
37 prompt "Platform Glue Layer" 37 prompt "Platform Glue Layer"
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 52312e8af213..8e2a1ff8a35a 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -21,6 +21,7 @@
21#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
22 22
23#include "musb_core.h" 23#include "musb_core.h"
24#include "musbhsdma.h"
24#include "blackfin.h" 25#include "blackfin.h"
25 26
26struct bfin_glue { 27struct bfin_glue {
@@ -332,6 +333,27 @@ static int bfin_musb_set_mode(struct musb *musb, u8 musb_mode)
332 return -EIO; 333 return -EIO;
333} 334}
334 335
336static int bfin_musb_adjust_channel_params(struct dma_channel *channel,
337 u16 packet_sz, u8 *mode,
338 dma_addr_t *dma_addr, u32 *len)
339{
340 struct musb_dma_channel *musb_channel = channel->private_data;
341
342 /*
343 * Anomaly 05000450 might cause data corruption when using DMA
344 * MODE 1 transmits with short packet. So to work around this,
345 * we truncate all MODE 1 transfers down to a multiple of the
346 * max packet size, and then do the last short packet transfer
347 * (if there is any) using MODE 0.
348 */
349 if (ANOMALY_05000450) {
350 if (musb_channel->transmit && *mode == 1)
351 *len = *len - (*len % packet_sz);
352 }
353
354 return 0;
355}
356
335static void bfin_musb_reg_init(struct musb *musb) 357static void bfin_musb_reg_init(struct musb *musb)
336{ 358{
337 if (ANOMALY_05000346) { 359 if (ANOMALY_05000346) {
@@ -430,6 +452,8 @@ static const struct musb_platform_ops bfin_ops = {
430 452
431 .vbus_status = bfin_musb_vbus_status, 453 .vbus_status = bfin_musb_vbus_status,
432 .set_vbus = bfin_musb_set_vbus, 454 .set_vbus = bfin_musb_set_vbus,
455
456 .adjust_channel_params = bfin_musb_adjust_channel_params,
433}; 457};
434 458
435static u64 bfin_dmamask = DMA_BIT_MASK(32); 459static u64 bfin_dmamask = DMA_BIT_MASK(32);
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index de55a3c3259a..ab434fbd8c35 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -597,12 +597,12 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
597 length = min(n_bds * maxpacket, length); 597 length = min(n_bds * maxpacket, length);
598 } 598 }
599 599
600 DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", 600 DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n",
601 tx->index, 601 tx->index,
602 maxpacket, 602 maxpacket,
603 rndis ? "rndis" : "transparent", 603 rndis ? "rndis" : "transparent",
604 n_bds, 604 n_bds,
605 addr, length); 605 (unsigned long long)addr, length);
606 606
607 cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); 607 cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
608 608
@@ -820,7 +820,7 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
820 length = min(n_bds * maxpacket, length); 820 length = min(n_bds * maxpacket, length);
821 821
822 DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " 822 DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
823 "dma 0x%x len %u %u/%u\n", 823 "dma 0x%llx len %u %u/%u\n",
824 rx->index, maxpacket, 824 rx->index, maxpacket,
825 onepacket 825 onepacket
826 ? (is_rndis ? "rndis" : "onepacket") 826 ? (is_rndis ? "rndis" : "onepacket")
@@ -829,7 +829,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
829 musb_readl(tibase, 829 musb_readl(tibase,
830 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) 830 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
831 & 0xffff, 831 & 0xffff,
832 addr, length, rx->channel.actual_len, rx->buf_len); 832 (unsigned long long)addr, length,
833 rx->channel.actual_len, rx->buf_len);
833 834
834 /* only queue one segment at a time, since the hardware prevents 835 /* only queue one segment at a time, since the hardware prevents
835 * correct queue shutdown after unexpected short packets 836 * correct queue shutdown after unexpected short packets
@@ -1039,9 +1040,9 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
1039 if (!completed && (bd->hw_options & CPPI_OWN_SET)) 1040 if (!completed && (bd->hw_options & CPPI_OWN_SET))
1040 break; 1041 break;
1041 1042
1042 DBG(5, "C/RXBD %08x: nxt %08x buf %08x " 1043 DBG(5, "C/RXBD %llx: nxt %08x buf %08x "
1043 "off.len %08x opt.len %08x (%d)\n", 1044 "off.len %08x opt.len %08x (%d)\n",
1044 bd->dma, bd->hw_next, bd->hw_bufp, 1045 (unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
1045 bd->hw_off_len, bd->hw_options, 1046 bd->hw_off_len, bd->hw_options,
1046 rx->channel.actual_len); 1047 rx->channel.actual_len);
1047 1048
@@ -1111,11 +1112,12 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
1111 musb_ep_select(cppi->mregs, rx->index + 1); 1112 musb_ep_select(cppi->mregs, rx->index + 1);
1112 csr = musb_readw(regs, MUSB_RXCSR); 1113 csr = musb_readw(regs, MUSB_RXCSR);
1113 if (csr & MUSB_RXCSR_DMAENAB) { 1114 if (csr & MUSB_RXCSR_DMAENAB) {
1114 DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", 1115 DBG(4, "list%d %p/%p, last %llx%s, csr %04x\n",
1115 rx->index, 1116 rx->index,
1116 rx->head, rx->tail, 1117 rx->head, rx->tail,
1117 rx->last_processed 1118 rx->last_processed
1118 ? rx->last_processed->dma 1119 ? (unsigned long long)
1120 rx->last_processed->dma
1119 : 0, 1121 : 0,
1120 completed ? ", completed" : "", 1122 completed ? ", completed" : "",
1121 csr); 1123 csr);
@@ -1167,8 +1169,11 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
1167 tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); 1169 tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
1168 rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); 1170 rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
1169 1171
1170 if (!tx && !rx) 1172 if (!tx && !rx) {
1173 if (cppi->irq)
1174 spin_unlock_irqrestore(&musb->lock, flags);
1171 return IRQ_NONE; 1175 return IRQ_NONE;
1176 }
1172 1177
1173 DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx); 1178 DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
1174 1179
@@ -1199,7 +1204,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
1199 */ 1204 */
1200 if (NULL == bd) { 1205 if (NULL == bd) {
1201 DBG(1, "null BD\n"); 1206 DBG(1, "null BD\n");
1202 tx_ram->tx_complete = 0; 1207 musb_writel(&tx_ram->tx_complete, 0, 0);
1203 continue; 1208 continue;
1204 } 1209 }
1205 1210
@@ -1452,7 +1457,7 @@ static int cppi_channel_abort(struct dma_channel *channel)
1452 * compare mode by writing 1 to the tx_complete register. 1457 * compare mode by writing 1 to the tx_complete register.
1453 */ 1458 */
1454 cppi_reset_tx(tx_ram, 1); 1459 cppi_reset_tx(tx_ram, 1);
1455 cppi_ch->head = 0; 1460 cppi_ch->head = NULL;
1456 musb_writel(&tx_ram->tx_complete, 0, 1); 1461 musb_writel(&tx_ram->tx_complete, 0, 1);
1457 cppi_dump_tx(5, cppi_ch, " (done teardown)"); 1462 cppi_dump_tx(5, cppi_ch, " (done teardown)");
1458 1463
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 630ae7f3cd4c..f10ff00ca09e 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1030,6 +1030,7 @@ static void musb_shutdown(struct platform_device *pdev)
1030 struct musb *musb = dev_to_musb(&pdev->dev); 1030 struct musb *musb = dev_to_musb(&pdev->dev);
1031 unsigned long flags; 1031 unsigned long flags;
1032 1032
1033 pm_runtime_get_sync(musb->controller);
1033 spin_lock_irqsave(&musb->lock, flags); 1034 spin_lock_irqsave(&musb->lock, flags);
1034 musb_platform_disable(musb); 1035 musb_platform_disable(musb);
1035 musb_generic_disable(musb); 1036 musb_generic_disable(musb);
@@ -1040,6 +1041,7 @@ static void musb_shutdown(struct platform_device *pdev)
1040 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 1041 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1041 musb_platform_exit(musb); 1042 musb_platform_exit(musb);
1042 1043
1044 pm_runtime_put(musb->controller);
1043 /* FIXME power down */ 1045 /* FIXME power down */
1044} 1046}
1045 1047
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 4bd9e2145ee4..0e053b587960 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -261,6 +261,7 @@ enum musb_g_ep0_state {
261 * @try_ilde: tries to idle the IP 261 * @try_ilde: tries to idle the IP
262 * @vbus_status: returns vbus status if possible 262 * @vbus_status: returns vbus status if possible
263 * @set_vbus: forces vbus status 263 * @set_vbus: forces vbus status
264 * @channel_program: pre check for standard dma channel_program func
264 */ 265 */
265struct musb_platform_ops { 266struct musb_platform_ops {
266 int (*init)(struct musb *musb); 267 int (*init)(struct musb *musb);
@@ -274,6 +275,10 @@ struct musb_platform_ops {
274 275
275 int (*vbus_status)(struct musb *musb); 276 int (*vbus_status)(struct musb *musb);
276 void (*set_vbus)(struct musb *musb, int on); 277 void (*set_vbus)(struct musb *musb, int on);
278
279 int (*adjust_channel_params)(struct dma_channel *channel,
280 u16 packet_sz, u8 *mode,
281 dma_addr_t *dma_addr, u32 *len);
277}; 282};
278 283
279/* 284/*
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 98519c5d8b5c..6dfbf9ffd7a6 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -535,7 +535,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
535 is_dma = 1; 535 is_dma = 1;
536 csr |= MUSB_TXCSR_P_WZC_BITS; 536 csr |= MUSB_TXCSR_P_WZC_BITS;
537 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | 537 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
538 MUSB_TXCSR_TXPKTRDY); 538 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
539 musb_writew(epio, MUSB_TXCSR, csr); 539 musb_writew(epio, MUSB_TXCSR, csr);
540 /* Ensure writebuffer is empty. */ 540 /* Ensure writebuffer is empty. */
541 csr = musb_readw(epio, MUSB_TXCSR); 541 csr = musb_readw(epio, MUSB_TXCSR);
@@ -1296,7 +1296,7 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1296 } 1296 }
1297 1297
1298 /* if the hardware doesn't have the request, easy ... */ 1298 /* if the hardware doesn't have the request, easy ... */
1299 if (musb_ep->req_list.next != &request->list || musb_ep->busy) 1299 if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1300 musb_g_giveback(musb_ep, request, -ECONNRESET); 1300 musb_g_giveback(musb_ep, request, -ECONNRESET);
1301 1301
1302 /* ... else abort the dma transfer ... */ 1302 /* ... else abort the dma transfer ... */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 0144a2d481fd..d281792db05c 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -169,6 +169,14 @@ static int dma_channel_program(struct dma_channel *channel,
169 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || 169 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
170 channel->status == MUSB_DMA_STATUS_BUSY); 170 channel->status == MUSB_DMA_STATUS_BUSY);
171 171
172 /* Let targets check/tweak the arguments */
173 if (musb->ops->adjust_channel_params) {
174 int ret = musb->ops->adjust_channel_params(channel,
175 packet_sz, &mode, &dma_addr, &len);
176 if (ret)
177 return ret;
178 }
179
172 /* 180 /*
173 * The DMA engine in RTL1.8 and above cannot handle 181 * The DMA engine in RTL1.8 and above cannot handle
174 * DMA addresses that are not aligned to a 4 byte boundary. 182 * DMA addresses that are not aligned to a 4 byte boundary.
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 25cb8b0003b1..57a27fa954b4 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -259,9 +259,10 @@ static int musb_otg_notifications(struct notifier_block *nb,
259 case USB_EVENT_VBUS: 259 case USB_EVENT_VBUS:
260 DBG(4, "VBUS Connect\n"); 260 DBG(4, "VBUS Connect\n");
261 261
262#ifdef CONFIG_USB_GADGET_MUSB_HDRC
262 if (musb->gadget_driver) 263 if (musb->gadget_driver)
263 pm_runtime_get_sync(musb->controller); 264 pm_runtime_get_sync(musb->controller);
264 265#endif
265 otg_init(musb->xceiv); 266 otg_init(musb->xceiv);
266 break; 267 break;
267 268
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index d6384e4aeef9..f7e04bf34a13 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -93,6 +93,8 @@ static int __init ux500_probe(struct platform_device *pdev)
93 } 93 }
94 94
95 musb->dev.parent = &pdev->dev; 95 musb->dev.parent = &pdev->dev;
96 musb->dev.dma_mask = pdev->dev.dma_mask;
97 musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
96 98
97 glue->dev = &pdev->dev; 99 glue->dev = &pdev->dev;
98 glue->musb = musb; 100 glue->musb = musb;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index a973c7a29d6e..4de6ef0ae52a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -151,6 +151,8 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
151 * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! 151 * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
152 */ 152 */
153static struct usb_device_id id_table_combined [] = { 153static struct usb_device_id id_table_combined [] = {
154 { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
155 { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
154 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, 156 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
155 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, 157 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
156 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, 158 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
@@ -525,6 +527,7 @@ static struct usb_device_id id_table_combined [] = {
525 { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) }, 527 { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) },
526 { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) }, 528 { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) },
527 { USB_DEVICE(OCT_VID, OCT_US101_PID) }, 529 { USB_DEVICE(OCT_VID, OCT_US101_PID) },
530 { USB_DEVICE(OCT_VID, OCT_DK201_PID) },
528 { USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID), 531 { USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID),
529 .driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk }, 532 .driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk },
530 { USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID), 533 { USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID),
@@ -787,6 +790,8 @@ static struct usb_device_id id_table_combined [] = {
787 { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), 790 { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
788 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 791 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
789 { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, 792 { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
793 { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) },
794 { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) },
790 { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) }, 795 { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
791 { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) }, 796 { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) },
792 { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) }, 797 { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index c543e55bafba..efffc23723bd 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -300,6 +300,8 @@
300 * Hameg HO820 and HO870 interface (using VID 0x0403) 300 * Hameg HO820 and HO870 interface (using VID 0x0403)
301 */ 301 */
302#define HAMEG_HO820_PID 0xed74 302#define HAMEG_HO820_PID 0xed74
303#define HAMEG_HO730_PID 0xed73
304#define HAMEG_HO720_PID 0xed72
303#define HAMEG_HO870_PID 0xed71 305#define HAMEG_HO870_PID 0xed71
304 306
305/* 307/*
@@ -572,6 +574,7 @@
572/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */ 574/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */
573/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */ 575/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */
574/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */ 576/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */
577#define OCT_DK201_PID 0x0103 /* OCT DK201 USB docking station */
575#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */ 578#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
576 579
577/* 580/*
@@ -1141,3 +1144,12 @@
1141#define QIHARDWARE_VID 0x20B7 1144#define QIHARDWARE_VID 0x20B7
1142#define MILKYMISTONE_JTAGSERIAL_PID 0x0713 1145#define MILKYMISTONE_JTAGSERIAL_PID 0x0713
1143 1146
1147/*
1148 * CTI GmbH RS485 Converter http://www.cti-lean.com/
1149 */
1150/* USB-485-Mini*/
1151#define FTDI_CTI_MINI_PID 0xF608
1152/* USB-Nano-485*/
1153#define FTDI_CTI_NANO_PID 0xF60B
1154
1155
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 75c7f456eed5..d77ff0435896 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -407,6 +407,10 @@ static void option_instat_callback(struct urb *urb);
407/* ONDA MT825UP HSDPA 14.2 modem */ 407/* ONDA MT825UP HSDPA 14.2 modem */
408#define ONDA_MT825UP 0x000b 408#define ONDA_MT825UP 0x000b
409 409
410/* Samsung products */
411#define SAMSUNG_VENDOR_ID 0x04e8
412#define SAMSUNG_PRODUCT_GT_B3730 0x6889
413
410/* some devices interfaces need special handling due to a number of reasons */ 414/* some devices interfaces need special handling due to a number of reasons */
411enum option_blacklist_reason { 415enum option_blacklist_reason {
412 OPTION_BLACKLIST_NONE = 0, 416 OPTION_BLACKLIST_NONE = 0,
@@ -968,6 +972,7 @@ static const struct usb_device_id option_ids[] = {
968 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, 972 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
969 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 973 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
970 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ 974 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
975 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730/GT-B3710 LTE USB modem.*/
971 { } /* Terminating entry */ 976 { } /* Terminating entry */
972}; 977};
973MODULE_DEVICE_TABLE(usb, option_ids); 978MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 8858201eb1d3..54a9dab1f33b 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -111,7 +111,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
111 ifnum = intf->desc.bInterfaceNumber; 111 ifnum = intf->desc.bInterfaceNumber;
112 dbg("This Interface = %d", ifnum); 112 dbg("This Interface = %d", ifnum);
113 113
114 data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), 114 data = kzalloc(sizeof(struct usb_wwan_intf_private),
115 GFP_KERNEL); 115 GFP_KERNEL);
116 if (!data) 116 if (!data)
117 return -ENOMEM; 117 return -ENOMEM;
@@ -134,8 +134,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
134 usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) { 134 usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) {
135 dbg("QDL port found"); 135 dbg("QDL port found");
136 136
137 if (serial->interface->num_altsetting == 1) 137 if (serial->interface->num_altsetting == 1) {
138 return 0; 138 retval = 0; /* Success */
139 break;
140 }
139 141
140 retval = usb_set_interface(serial->dev, ifnum, 1); 142 retval = usb_set_interface(serial->dev, ifnum, 1);
141 if (retval < 0) { 143 if (retval < 0) {
@@ -145,7 +147,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
145 retval = -ENODEV; 147 retval = -ENODEV;
146 kfree(data); 148 kfree(data);
147 } 149 }
148 return retval;
149 } 150 }
150 break; 151 break;
151 152
@@ -166,6 +167,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
166 "Could not set interface, error %d\n", 167 "Could not set interface, error %d\n",
167 retval); 168 retval);
168 retval = -ENODEV; 169 retval = -ENODEV;
170 kfree(data);
169 } 171 }
170 } else if (ifnum == 2) { 172 } else if (ifnum == 2) {
171 dbg("Modem port found"); 173 dbg("Modem port found");
@@ -177,7 +179,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
177 retval = -ENODEV; 179 retval = -ENODEV;
178 kfree(data); 180 kfree(data);
179 } 181 }
180 return retval;
181 } else if (ifnum==3) { 182 } else if (ifnum==3) {
182 /* 183 /*
183 * NMEA (serial line 9600 8N1) 184 * NMEA (serial line 9600 8N1)
@@ -191,6 +192,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
191 "Could not set interface, error %d\n", 192 "Could not set interface, error %d\n",
192 retval); 193 retval);
193 retval = -ENODEV; 194 retval = -ENODEV;
195 kfree(data);
194 } 196 }
195 } 197 }
196 break; 198 break;
@@ -199,12 +201,27 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
199 dev_err(&serial->dev->dev, 201 dev_err(&serial->dev->dev,
200 "unknown number of interfaces: %d\n", nintf); 202 "unknown number of interfaces: %d\n", nintf);
201 kfree(data); 203 kfree(data);
202 return -ENODEV; 204 retval = -ENODEV;
203 } 205 }
204 206
207 /* Set serial->private if not returning -ENODEV */
208 if (retval != -ENODEV)
209 usb_set_serial_data(serial, data);
205 return retval; 210 return retval;
206} 211}
207 212
213static void qc_release(struct usb_serial *serial)
214{
215 struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
216
217 dbg("%s", __func__);
218
219 /* Call usb_wwan release & free the private data allocated in qcprobe */
220 usb_wwan_release(serial);
221 usb_set_serial_data(serial, NULL);
222 kfree(priv);
223}
224
208static struct usb_serial_driver qcdevice = { 225static struct usb_serial_driver qcdevice = {
209 .driver = { 226 .driver = {
210 .owner = THIS_MODULE, 227 .owner = THIS_MODULE,
@@ -222,7 +239,7 @@ static struct usb_serial_driver qcdevice = {
222 .chars_in_buffer = usb_wwan_chars_in_buffer, 239 .chars_in_buffer = usb_wwan_chars_in_buffer,
223 .attach = usb_wwan_startup, 240 .attach = usb_wwan_startup,
224 .disconnect = usb_wwan_disconnect, 241 .disconnect = usb_wwan_disconnect,
225 .release = usb_wwan_release, 242 .release = qc_release,
226#ifdef CONFIG_PM 243#ifdef CONFIG_PM
227 .suspend = usb_wwan_suspend, 244 .suspend = usb_wwan_suspend,
228 .resume = usb_wwan_resume, 245 .resume = usb_wwan_resume,
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index a2e5b5100ab4..0f4e8c942f9e 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1648,7 +1648,9 @@ pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data)
1648 1648
1649 switch (val) { 1649 switch (val) {
1650 case CPUFREQ_PRECHANGE: 1650 case CPUFREQ_PRECHANGE:
1651 if (!fbi->overlay[0].usage && !fbi->overlay[1].usage) 1651#ifdef CONFIG_FB_PXA_OVERLAY
1652 if (!(fbi->overlay[0].usage || fbi->overlay[1].usage))
1653#endif
1652 set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE); 1654 set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE);
1653 break; 1655 break;
1654 1656
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 0ee594569dcc..85b67ffa2a43 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -286,11 +286,9 @@ static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, uid_t uid)
286 286
287struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) 287struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
288{ 288{
289 int err, flags; 289 int err;
290 struct p9_fid *fid; 290 struct p9_fid *fid;
291 struct v9fs_session_info *v9ses;
292 291
293 v9ses = v9fs_dentry2v9ses(dentry);
294 fid = v9fs_fid_clone_with_uid(dentry, 0); 292 fid = v9fs_fid_clone_with_uid(dentry, 0);
295 if (IS_ERR(fid)) 293 if (IS_ERR(fid))
296 goto error_out; 294 goto error_out;
@@ -299,17 +297,8 @@ struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
299 * dirty pages. We always request for the open fid in read-write 297 * dirty pages. We always request for the open fid in read-write
300 * mode so that a partial page write which result in page 298 * mode so that a partial page write which result in page
301 * read can work. 299 * read can work.
302 *
303 * we don't have a tsyncfs operation for older version
304 * of protocol. So make sure the write back fid is
305 * opened in O_SYNC mode.
306 */ 300 */
307 if (!v9fs_proto_dotl(v9ses)) 301 err = p9_client_open(fid, O_RDWR);
308 flags = O_RDWR | O_SYNC;
309 else
310 flags = O_RDWR;
311
312 err = p9_client_open(fid, flags);
313 if (err < 0) { 302 if (err < 0) {
314 p9_client_clunk(fid); 303 p9_client_clunk(fid);
315 fid = ERR_PTR(err); 304 fid = ERR_PTR(err);
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 9665c2b840e6..e5ebedfc5ed8 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -116,7 +116,6 @@ struct v9fs_session_info {
116 struct list_head slist; /* list of sessions registered with v9fs */ 116 struct list_head slist; /* list of sessions registered with v9fs */
117 struct backing_dev_info bdi; 117 struct backing_dev_info bdi;
118 struct rw_semaphore rename_sem; 118 struct rw_semaphore rename_sem;
119 struct p9_fid *root_fid; /* Used for file system sync */
120}; 119};
121 120
122/* cache_validity flags */ 121/* cache_validity flags */
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index b6a3b9f7fe4d..e022890c6f40 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -126,7 +126,9 @@ static int v9fs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
126 retval = v9fs_refresh_inode_dotl(fid, inode); 126 retval = v9fs_refresh_inode_dotl(fid, inode);
127 else 127 else
128 retval = v9fs_refresh_inode(fid, inode); 128 retval = v9fs_refresh_inode(fid, inode);
129 if (retval <= 0) 129 if (retval == -ENOENT)
130 return 0;
131 if (retval < 0)
130 return retval; 132 return retval;
131 } 133 }
132out_valid: 134out_valid:
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index ffbb113d5f33..82a7c38ddad0 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -811,7 +811,7 @@ v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
811 fid = v9fs_fid_lookup(dentry); 811 fid = v9fs_fid_lookup(dentry);
812 if (IS_ERR(fid)) { 812 if (IS_ERR(fid)) {
813 __putname(link); 813 __putname(link);
814 link = ERR_PTR(PTR_ERR(fid)); 814 link = ERR_CAST(fid);
815 goto ndset; 815 goto ndset;
816 } 816 }
817 retval = p9_client_readlink(fid, &target); 817 retval = p9_client_readlink(fid, &target);
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index f3eed3383e4f..feef6cdc1fd2 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -154,6 +154,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
154 retval = PTR_ERR(inode); 154 retval = PTR_ERR(inode);
155 goto release_sb; 155 goto release_sb;
156 } 156 }
157
157 root = d_alloc_root(inode); 158 root = d_alloc_root(inode);
158 if (!root) { 159 if (!root) {
159 iput(inode); 160 iput(inode);
@@ -185,21 +186,10 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
185 p9stat_free(st); 186 p9stat_free(st);
186 kfree(st); 187 kfree(st);
187 } 188 }
188 v9fs_fid_add(root, fid);
189 retval = v9fs_get_acl(inode, fid); 189 retval = v9fs_get_acl(inode, fid);
190 if (retval) 190 if (retval)
191 goto release_sb; 191 goto release_sb;
192 /* 192 v9fs_fid_add(root, fid);
193 * Add the root fid to session info. This is used
194 * for file system sync. We want a cloned fid here
195 * so that we can do a sync_filesystem after a
196 * shrink_dcache_for_umount
197 */
198 v9ses->root_fid = v9fs_fid_clone(root);
199 if (IS_ERR(v9ses->root_fid)) {
200 retval = PTR_ERR(v9ses->root_fid);
201 goto release_sb;
202 }
203 193
204 P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); 194 P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
205 return dget(sb->s_root); 195 return dget(sb->s_root);
@@ -210,11 +200,15 @@ close_session:
210 v9fs_session_close(v9ses); 200 v9fs_session_close(v9ses);
211 kfree(v9ses); 201 kfree(v9ses);
212 return ERR_PTR(retval); 202 return ERR_PTR(retval);
203
213release_sb: 204release_sb:
214 /* 205 /*
215 * we will do the session_close and root dentry 206 * we will do the session_close and root dentry release
216 * release in the below call. 207 * in the below call. But we need to clunk fid, because we haven't
208 * attached the fid to dentry so it won't get clunked
209 * automatically.
217 */ 210 */
211 p9_client_clunk(fid);
218 deactivate_locked_super(sb); 212 deactivate_locked_super(sb);
219 return ERR_PTR(retval); 213 return ERR_PTR(retval);
220} 214}
@@ -232,7 +226,7 @@ static void v9fs_kill_super(struct super_block *s)
232 P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s); 226 P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
233 227
234 kill_anon_super(s); 228 kill_anon_super(s);
235 p9_client_clunk(v9ses->root_fid); 229
236 v9fs_session_cancel(v9ses); 230 v9fs_session_cancel(v9ses);
237 v9fs_session_close(v9ses); 231 v9fs_session_close(v9ses);
238 kfree(v9ses); 232 kfree(v9ses);
@@ -285,14 +279,6 @@ done:
285 return res; 279 return res;
286} 280}
287 281
288static int v9fs_sync_fs(struct super_block *sb, int wait)
289{
290 struct v9fs_session_info *v9ses = sb->s_fs_info;
291
292 P9_DPRINTK(P9_DEBUG_VFS, "v9fs_sync_fs: super_block %p\n", sb);
293 return p9_client_sync_fs(v9ses->root_fid);
294}
295
296static int v9fs_drop_inode(struct inode *inode) 282static int v9fs_drop_inode(struct inode *inode)
297{ 283{
298 struct v9fs_session_info *v9ses; 284 struct v9fs_session_info *v9ses;
@@ -307,6 +293,51 @@ static int v9fs_drop_inode(struct inode *inode)
307 return 1; 293 return 1;
308} 294}
309 295
296static int v9fs_write_inode(struct inode *inode,
297 struct writeback_control *wbc)
298{
299 int ret;
300 struct p9_wstat wstat;
301 struct v9fs_inode *v9inode;
302 /*
303 * send an fsync request to server irrespective of
304 * wbc->sync_mode.
305 */
306 P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
307 v9inode = V9FS_I(inode);
308 if (!v9inode->writeback_fid)
309 return 0;
310 v9fs_blank_wstat(&wstat);
311
312 ret = p9_client_wstat(v9inode->writeback_fid, &wstat);
313 if (ret < 0) {
314 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
315 return ret;
316 }
317 return 0;
318}
319
320static int v9fs_write_inode_dotl(struct inode *inode,
321 struct writeback_control *wbc)
322{
323 int ret;
324 struct v9fs_inode *v9inode;
325 /*
326 * send an fsync request to server irrespective of
327 * wbc->sync_mode.
328 */
329 P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
330 v9inode = V9FS_I(inode);
331 if (!v9inode->writeback_fid)
332 return 0;
333 ret = p9_client_fsync(v9inode->writeback_fid, 0);
334 if (ret < 0) {
335 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
336 return ret;
337 }
338 return 0;
339}
340
310static const struct super_operations v9fs_super_ops = { 341static const struct super_operations v9fs_super_ops = {
311 .alloc_inode = v9fs_alloc_inode, 342 .alloc_inode = v9fs_alloc_inode,
312 .destroy_inode = v9fs_destroy_inode, 343 .destroy_inode = v9fs_destroy_inode,
@@ -314,17 +345,18 @@ static const struct super_operations v9fs_super_ops = {
314 .evict_inode = v9fs_evict_inode, 345 .evict_inode = v9fs_evict_inode,
315 .show_options = generic_show_options, 346 .show_options = generic_show_options,
316 .umount_begin = v9fs_umount_begin, 347 .umount_begin = v9fs_umount_begin,
348 .write_inode = v9fs_write_inode,
317}; 349};
318 350
319static const struct super_operations v9fs_super_ops_dotl = { 351static const struct super_operations v9fs_super_ops_dotl = {
320 .alloc_inode = v9fs_alloc_inode, 352 .alloc_inode = v9fs_alloc_inode,
321 .destroy_inode = v9fs_destroy_inode, 353 .destroy_inode = v9fs_destroy_inode,
322 .sync_fs = v9fs_sync_fs,
323 .statfs = v9fs_statfs, 354 .statfs = v9fs_statfs,
324 .drop_inode = v9fs_drop_inode, 355 .drop_inode = v9fs_drop_inode,
325 .evict_inode = v9fs_evict_inode, 356 .evict_inode = v9fs_evict_inode,
326 .show_options = generic_show_options, 357 .show_options = generic_show_options,
327 .umount_begin = v9fs_umount_begin, 358 .umount_begin = v9fs_umount_begin,
359 .write_inode = v9fs_write_inode_dotl,
328}; 360};
329 361
330struct file_system_type v9fs_fs_type = { 362struct file_system_type v9fs_fs_type = {
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index f34078d702d3..303983fabfd6 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -941,9 +941,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
941 current->mm->start_stack = bprm->p; 941 current->mm->start_stack = bprm->p;
942 942
943#ifdef arch_randomize_brk 943#ifdef arch_randomize_brk
944 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) 944 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
945 current->mm->brk = current->mm->start_brk = 945 current->mm->brk = current->mm->start_brk =
946 arch_randomize_brk(current->mm); 946 arch_randomize_brk(current->mm);
947#ifdef CONFIG_COMPAT_BRK
948 current->brk_randomized = 1;
949#endif
950 }
947#endif 951#endif
948 952
949 if (current->personality & MMAP_PAGE_ZERO) { 953 if (current->personality & MMAP_PAGE_ZERO) {
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index de34bfad9ec3..5d505aaa72fb 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -178,16 +178,17 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
178 178
179 if (value) { 179 if (value) {
180 acl = posix_acl_from_xattr(value, size); 180 acl = posix_acl_from_xattr(value, size);
181 if (acl == NULL) { 181 if (acl) {
182 value = NULL; 182 ret = posix_acl_valid(acl);
183 size = 0; 183 if (ret)
184 goto out;
184 } else if (IS_ERR(acl)) { 185 } else if (IS_ERR(acl)) {
185 return PTR_ERR(acl); 186 return PTR_ERR(acl);
186 } 187 }
187 } 188 }
188 189
189 ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type); 190 ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type);
190 191out:
191 posix_acl_release(acl); 192 posix_acl_release(acl);
192 193
193 return ret; 194 return ret;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 3458b5725540..2e61fe1b6b8c 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -740,8 +740,10 @@ struct btrfs_space_info {
740 */ 740 */
741 unsigned long reservation_progress; 741 unsigned long reservation_progress;
742 742
743 int full; /* indicates that we cannot allocate any more 743 int full:1; /* indicates that we cannot allocate any more
744 chunks for this space */ 744 chunks for this space */
745 int chunk_alloc:1; /* set if we are allocating a chunk */
746
745 int force_alloc; /* set if we need to force a chunk alloc for 747 int force_alloc; /* set if we need to force a chunk alloc for
746 this space */ 748 this space */
747 749
@@ -2576,6 +2578,11 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
2576int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 2578int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
2577 struct inode *inode, u64 start, u64 end); 2579 struct inode *inode, u64 start, u64 end);
2578int btrfs_release_file(struct inode *inode, struct file *file); 2580int btrfs_release_file(struct inode *inode, struct file *file);
2581void btrfs_drop_pages(struct page **pages, size_t num_pages);
2582int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
2583 struct page **pages, size_t num_pages,
2584 loff_t pos, size_t write_bytes,
2585 struct extent_state **cached);
2579 2586
2580/* tree-defrag.c */ 2587/* tree-defrag.c */
2581int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, 2588int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8f1d44ba332f..68c84c8c24bd 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3057,7 +3057,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
3057 btrfs_destroy_pinned_extent(root, 3057 btrfs_destroy_pinned_extent(root,
3058 root->fs_info->pinned_extents); 3058 root->fs_info->pinned_extents);
3059 3059
3060 t->use_count = 0; 3060 atomic_set(&t->use_count, 0);
3061 list_del_init(&t->list); 3061 list_del_init(&t->list);
3062 memset(t, 0, sizeof(*t)); 3062 memset(t, 0, sizeof(*t));
3063 kmem_cache_free(btrfs_transaction_cachep, t); 3063 kmem_cache_free(btrfs_transaction_cachep, t);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f619c3cb13b7..31f33ba56fe8 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -33,6 +33,25 @@
33#include "locking.h" 33#include "locking.h"
34#include "free-space-cache.h" 34#include "free-space-cache.h"
35 35
36/* control flags for do_chunk_alloc's force field
37 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
38 * if we really need one.
39 *
40 * CHUNK_ALLOC_FORCE means it must try to allocate one
41 *
42 * CHUNK_ALLOC_LIMITED means to only try and allocate one
43 * if we have very few chunks already allocated. This is
44 * used as part of the clustering code to help make sure
45 * we have a good pool of storage to cluster in, without
46 * filling the FS with empty chunks
47 *
48 */
49enum {
50 CHUNK_ALLOC_NO_FORCE = 0,
51 CHUNK_ALLOC_FORCE = 1,
52 CHUNK_ALLOC_LIMITED = 2,
53};
54
36static int update_block_group(struct btrfs_trans_handle *trans, 55static int update_block_group(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root, 56 struct btrfs_root *root,
38 u64 bytenr, u64 num_bytes, int alloc); 57 u64 bytenr, u64 num_bytes, int alloc);
@@ -3019,7 +3038,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3019 found->bytes_readonly = 0; 3038 found->bytes_readonly = 0;
3020 found->bytes_may_use = 0; 3039 found->bytes_may_use = 0;
3021 found->full = 0; 3040 found->full = 0;
3022 found->force_alloc = 0; 3041 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3042 found->chunk_alloc = 0;
3023 *space_info = found; 3043 *space_info = found;
3024 list_add_rcu(&found->list, &info->space_info); 3044 list_add_rcu(&found->list, &info->space_info);
3025 atomic_set(&found->caching_threads, 0); 3045 atomic_set(&found->caching_threads, 0);
@@ -3150,7 +3170,7 @@ again:
3150 if (!data_sinfo->full && alloc_chunk) { 3170 if (!data_sinfo->full && alloc_chunk) {
3151 u64 alloc_target; 3171 u64 alloc_target;
3152 3172
3153 data_sinfo->force_alloc = 1; 3173 data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3154 spin_unlock(&data_sinfo->lock); 3174 spin_unlock(&data_sinfo->lock);
3155alloc: 3175alloc:
3156 alloc_target = btrfs_get_alloc_profile(root, 1); 3176 alloc_target = btrfs_get_alloc_profile(root, 1);
@@ -3160,7 +3180,8 @@ alloc:
3160 3180
3161 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 3181 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3162 bytes + 2 * 1024 * 1024, 3182 bytes + 2 * 1024 * 1024,
3163 alloc_target, 0); 3183 alloc_target,
3184 CHUNK_ALLOC_NO_FORCE);
3164 btrfs_end_transaction(trans, root); 3185 btrfs_end_transaction(trans, root);
3165 if (ret < 0) { 3186 if (ret < 0) {
3166 if (ret != -ENOSPC) 3187 if (ret != -ENOSPC)
@@ -3239,31 +3260,56 @@ static void force_metadata_allocation(struct btrfs_fs_info *info)
3239 rcu_read_lock(); 3260 rcu_read_lock();
3240 list_for_each_entry_rcu(found, head, list) { 3261 list_for_each_entry_rcu(found, head, list) {
3241 if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 3262 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3242 found->force_alloc = 1; 3263 found->force_alloc = CHUNK_ALLOC_FORCE;
3243 } 3264 }
3244 rcu_read_unlock(); 3265 rcu_read_unlock();
3245} 3266}
3246 3267
3247static int should_alloc_chunk(struct btrfs_root *root, 3268static int should_alloc_chunk(struct btrfs_root *root,
3248 struct btrfs_space_info *sinfo, u64 alloc_bytes) 3269 struct btrfs_space_info *sinfo, u64 alloc_bytes,
3270 int force)
3249{ 3271{
3250 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; 3272 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3273 u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3251 u64 thresh; 3274 u64 thresh;
3252 3275
3253 if (sinfo->bytes_used + sinfo->bytes_reserved + 3276 if (force == CHUNK_ALLOC_FORCE)
3254 alloc_bytes + 256 * 1024 * 1024 < num_bytes) 3277 return 1;
3278
3279 /*
3280 * in limited mode, we want to have some free space up to
3281 * about 1% of the FS size.
3282 */
3283 if (force == CHUNK_ALLOC_LIMITED) {
3284 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3285 thresh = max_t(u64, 64 * 1024 * 1024,
3286 div_factor_fine(thresh, 1));
3287
3288 if (num_bytes - num_allocated < thresh)
3289 return 1;
3290 }
3291
3292 /*
3293 * we have two similar checks here, one based on percentage
3294 * and once based on a hard number of 256MB. The idea
3295 * is that if we have a good amount of free
3296 * room, don't allocate a chunk. A good mount is
3297 * less than 80% utilized of the chunks we have allocated,
3298 * or more than 256MB free
3299 */
3300 if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3255 return 0; 3301 return 0;
3256 3302
3257 if (sinfo->bytes_used + sinfo->bytes_reserved + 3303 if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
3258 alloc_bytes < div_factor(num_bytes, 8))
3259 return 0; 3304 return 0;
3260 3305
3261 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); 3306 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3307
3308 /* 256MB or 5% of the FS */
3262 thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5)); 3309 thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
3263 3310
3264 if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3)) 3311 if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
3265 return 0; 3312 return 0;
3266
3267 return 1; 3313 return 1;
3268} 3314}
3269 3315
@@ -3273,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3273{ 3319{
3274 struct btrfs_space_info *space_info; 3320 struct btrfs_space_info *space_info;
3275 struct btrfs_fs_info *fs_info = extent_root->fs_info; 3321 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3322 int wait_for_alloc = 0;
3276 int ret = 0; 3323 int ret = 0;
3277 3324
3278 mutex_lock(&fs_info->chunk_mutex);
3279
3280 flags = btrfs_reduce_alloc_profile(extent_root, flags); 3325 flags = btrfs_reduce_alloc_profile(extent_root, flags);
3281 3326
3282 space_info = __find_space_info(extent_root->fs_info, flags); 3327 space_info = __find_space_info(extent_root->fs_info, flags);
@@ -3287,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3287 } 3332 }
3288 BUG_ON(!space_info); 3333 BUG_ON(!space_info);
3289 3334
3335again:
3290 spin_lock(&space_info->lock); 3336 spin_lock(&space_info->lock);
3291 if (space_info->force_alloc) 3337 if (space_info->force_alloc)
3292 force = 1; 3338 force = space_info->force_alloc;
3293 if (space_info->full) { 3339 if (space_info->full) {
3294 spin_unlock(&space_info->lock); 3340 spin_unlock(&space_info->lock);
3295 goto out; 3341 return 0;
3296 } 3342 }
3297 3343
3298 if (!force && !should_alloc_chunk(extent_root, space_info, 3344 if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
3299 alloc_bytes)) {
3300 spin_unlock(&space_info->lock); 3345 spin_unlock(&space_info->lock);
3301 goto out; 3346 return 0;
3347 } else if (space_info->chunk_alloc) {
3348 wait_for_alloc = 1;
3349 } else {
3350 space_info->chunk_alloc = 1;
3302 } 3351 }
3352
3303 spin_unlock(&space_info->lock); 3353 spin_unlock(&space_info->lock);
3304 3354
3355 mutex_lock(&fs_info->chunk_mutex);
3356
3357 /*
3358 * The chunk_mutex is held throughout the entirety of a chunk
3359 * allocation, so once we've acquired the chunk_mutex we know that the
3360 * other guy is done and we need to recheck and see if we should
3361 * allocate.
3362 */
3363 if (wait_for_alloc) {
3364 mutex_unlock(&fs_info->chunk_mutex);
3365 wait_for_alloc = 0;
3366 goto again;
3367 }
3368
3305 /* 3369 /*
3306 * If we have mixed data/metadata chunks we want to make sure we keep 3370 * If we have mixed data/metadata chunks we want to make sure we keep
3307 * allocating mixed chunks instead of individual chunks. 3371 * allocating mixed chunks instead of individual chunks.
@@ -3327,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3327 space_info->full = 1; 3391 space_info->full = 1;
3328 else 3392 else
3329 ret = 1; 3393 ret = 1;
3330 space_info->force_alloc = 0; 3394
3395 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3396 space_info->chunk_alloc = 0;
3331 spin_unlock(&space_info->lock); 3397 spin_unlock(&space_info->lock);
3332out:
3333 mutex_unlock(&extent_root->fs_info->chunk_mutex); 3398 mutex_unlock(&extent_root->fs_info->chunk_mutex);
3334 return ret; 3399 return ret;
3335} 3400}
@@ -5303,11 +5368,13 @@ loop:
5303 5368
5304 if (allowed_chunk_alloc) { 5369 if (allowed_chunk_alloc) {
5305 ret = do_chunk_alloc(trans, root, num_bytes + 5370 ret = do_chunk_alloc(trans, root, num_bytes +
5306 2 * 1024 * 1024, data, 1); 5371 2 * 1024 * 1024, data,
5372 CHUNK_ALLOC_LIMITED);
5307 allowed_chunk_alloc = 0; 5373 allowed_chunk_alloc = 0;
5308 done_chunk_alloc = 1; 5374 done_chunk_alloc = 1;
5309 } else if (!done_chunk_alloc) { 5375 } else if (!done_chunk_alloc &&
5310 space_info->force_alloc = 1; 5376 space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
5377 space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5311 } 5378 }
5312 5379
5313 if (loop < LOOP_NO_EMPTY_SIZE) { 5380 if (loop < LOOP_NO_EMPTY_SIZE) {
@@ -5393,7 +5460,8 @@ again:
5393 */ 5460 */
5394 if (empty_size || root->ref_cows) 5461 if (empty_size || root->ref_cows)
5395 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 5462 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5396 num_bytes + 2 * 1024 * 1024, data, 0); 5463 num_bytes + 2 * 1024 * 1024, data,
5464 CHUNK_ALLOC_NO_FORCE);
5397 5465
5398 WARN_ON(num_bytes < root->sectorsize); 5466 WARN_ON(num_bytes < root->sectorsize);
5399 ret = find_free_extent(trans, root, num_bytes, empty_size, 5467 ret = find_free_extent(trans, root, num_bytes, empty_size,
@@ -5405,7 +5473,7 @@ again:
5405 num_bytes = num_bytes & ~(root->sectorsize - 1); 5473 num_bytes = num_bytes & ~(root->sectorsize - 1);
5406 num_bytes = max(num_bytes, min_alloc_size); 5474 num_bytes = max(num_bytes, min_alloc_size);
5407 do_chunk_alloc(trans, root->fs_info->extent_root, 5475 do_chunk_alloc(trans, root->fs_info->extent_root,
5408 num_bytes, data, 1); 5476 num_bytes, data, CHUNK_ALLOC_FORCE);
5409 goto again; 5477 goto again;
5410 } 5478 }
5411 if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { 5479 if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
@@ -8109,13 +8177,15 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
8109 8177
8110 alloc_flags = update_block_group_flags(root, cache->flags); 8178 alloc_flags = update_block_group_flags(root, cache->flags);
8111 if (alloc_flags != cache->flags) 8179 if (alloc_flags != cache->flags)
8112 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); 8180 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
8181 CHUNK_ALLOC_FORCE);
8113 8182
8114 ret = set_block_group_ro(cache); 8183 ret = set_block_group_ro(cache);
8115 if (!ret) 8184 if (!ret)
8116 goto out; 8185 goto out;
8117 alloc_flags = get_alloc_profile(root, cache->space_info->flags); 8186 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8118 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); 8187 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
8188 CHUNK_ALLOC_FORCE);
8119 if (ret < 0) 8189 if (ret < 0)
8120 goto out; 8190 goto out;
8121 ret = set_block_group_ro(cache); 8191 ret = set_block_group_ro(cache);
@@ -8128,7 +8198,8 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8128 struct btrfs_root *root, u64 type) 8198 struct btrfs_root *root, u64 type)
8129{ 8199{
8130 u64 alloc_flags = get_alloc_profile(root, type); 8200 u64 alloc_flags = get_alloc_profile(root, type);
8131 return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); 8201 return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
8202 CHUNK_ALLOC_FORCE);
8132} 8203}
8133 8204
8134/* 8205/*
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 20ddb28602a8..315138605088 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -690,6 +690,15 @@ static void cache_state(struct extent_state *state,
690 } 690 }
691} 691}
692 692
693static void uncache_state(struct extent_state **cached_ptr)
694{
695 if (cached_ptr && (*cached_ptr)) {
696 struct extent_state *state = *cached_ptr;
697 *cached_ptr = NULL;
698 free_extent_state(state);
699 }
700}
701
693/* 702/*
694 * set some bits on a range in the tree. This may require allocations or 703 * set some bits on a range in the tree. This may require allocations or
695 * sleeping, so the gfp mask is used to indicate what is allowed. 704 * sleeping, so the gfp mask is used to indicate what is allowed.
@@ -940,10 +949,10 @@ static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
940} 949}
941 950
942int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 951int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
943 gfp_t mask) 952 struct extent_state **cached_state, gfp_t mask)
944{ 953{
945 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, 954 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
946 NULL, mask); 955 NULL, cached_state, mask);
947} 956}
948 957
949static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, 958static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -1012,8 +1021,7 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1012 mask); 1021 mask);
1013} 1022}
1014 1023
1015int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, 1024int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1016 gfp_t mask)
1017{ 1025{
1018 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, 1026 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1019 mask); 1027 mask);
@@ -1735,6 +1743,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
1735 1743
1736 do { 1744 do {
1737 struct page *page = bvec->bv_page; 1745 struct page *page = bvec->bv_page;
1746 struct extent_state *cached = NULL;
1747 struct extent_state *state;
1748
1738 tree = &BTRFS_I(page->mapping->host)->io_tree; 1749 tree = &BTRFS_I(page->mapping->host)->io_tree;
1739 1750
1740 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 1751 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
@@ -1749,9 +1760,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
1749 if (++bvec <= bvec_end) 1760 if (++bvec <= bvec_end)
1750 prefetchw(&bvec->bv_page->flags); 1761 prefetchw(&bvec->bv_page->flags);
1751 1762
1763 spin_lock(&tree->lock);
1764 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
1765 if (state && state->start == start) {
1766 /*
1767 * take a reference on the state, unlock will drop
1768 * the ref
1769 */
1770 cache_state(state, &cached);
1771 }
1772 spin_unlock(&tree->lock);
1773
1752 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 1774 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1753 ret = tree->ops->readpage_end_io_hook(page, start, end, 1775 ret = tree->ops->readpage_end_io_hook(page, start, end,
1754 NULL); 1776 state);
1755 if (ret) 1777 if (ret)
1756 uptodate = 0; 1778 uptodate = 0;
1757 } 1779 }
@@ -1764,15 +1786,16 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
1764 test_bit(BIO_UPTODATE, &bio->bi_flags); 1786 test_bit(BIO_UPTODATE, &bio->bi_flags);
1765 if (err) 1787 if (err)
1766 uptodate = 0; 1788 uptodate = 0;
1789 uncache_state(&cached);
1767 continue; 1790 continue;
1768 } 1791 }
1769 } 1792 }
1770 1793
1771 if (uptodate) { 1794 if (uptodate) {
1772 set_extent_uptodate(tree, start, end, 1795 set_extent_uptodate(tree, start, end, &cached,
1773 GFP_ATOMIC); 1796 GFP_ATOMIC);
1774 } 1797 }
1775 unlock_extent(tree, start, end, GFP_ATOMIC); 1798 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
1776 1799
1777 if (whole_page) { 1800 if (whole_page) {
1778 if (uptodate) { 1801 if (uptodate) {
@@ -1811,6 +1834,7 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
1811 1834
1812 do { 1835 do {
1813 struct page *page = bvec->bv_page; 1836 struct page *page = bvec->bv_page;
1837 struct extent_state *cached = NULL;
1814 tree = &BTRFS_I(page->mapping->host)->io_tree; 1838 tree = &BTRFS_I(page->mapping->host)->io_tree;
1815 1839
1816 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 1840 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
@@ -1821,13 +1845,14 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
1821 prefetchw(&bvec->bv_page->flags); 1845 prefetchw(&bvec->bv_page->flags);
1822 1846
1823 if (uptodate) { 1847 if (uptodate) {
1824 set_extent_uptodate(tree, start, end, GFP_ATOMIC); 1848 set_extent_uptodate(tree, start, end, &cached,
1849 GFP_ATOMIC);
1825 } else { 1850 } else {
1826 ClearPageUptodate(page); 1851 ClearPageUptodate(page);
1827 SetPageError(page); 1852 SetPageError(page);
1828 } 1853 }
1829 1854
1830 unlock_extent(tree, start, end, GFP_ATOMIC); 1855 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
1831 1856
1832 } while (bvec >= bio->bi_io_vec); 1857 } while (bvec >= bio->bi_io_vec);
1833 1858
@@ -2016,14 +2041,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2016 while (cur <= end) { 2041 while (cur <= end) {
2017 if (cur >= last_byte) { 2042 if (cur >= last_byte) {
2018 char *userpage; 2043 char *userpage;
2044 struct extent_state *cached = NULL;
2045
2019 iosize = PAGE_CACHE_SIZE - page_offset; 2046 iosize = PAGE_CACHE_SIZE - page_offset;
2020 userpage = kmap_atomic(page, KM_USER0); 2047 userpage = kmap_atomic(page, KM_USER0);
2021 memset(userpage + page_offset, 0, iosize); 2048 memset(userpage + page_offset, 0, iosize);
2022 flush_dcache_page(page); 2049 flush_dcache_page(page);
2023 kunmap_atomic(userpage, KM_USER0); 2050 kunmap_atomic(userpage, KM_USER0);
2024 set_extent_uptodate(tree, cur, cur + iosize - 1, 2051 set_extent_uptodate(tree, cur, cur + iosize - 1,
2025 GFP_NOFS); 2052 &cached, GFP_NOFS);
2026 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2053 unlock_extent_cached(tree, cur, cur + iosize - 1,
2054 &cached, GFP_NOFS);
2027 break; 2055 break;
2028 } 2056 }
2029 em = get_extent(inode, page, page_offset, cur, 2057 em = get_extent(inode, page, page_offset, cur,
@@ -2063,14 +2091,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2063 /* we've found a hole, just zero and go on */ 2091 /* we've found a hole, just zero and go on */
2064 if (block_start == EXTENT_MAP_HOLE) { 2092 if (block_start == EXTENT_MAP_HOLE) {
2065 char *userpage; 2093 char *userpage;
2094 struct extent_state *cached = NULL;
2095
2066 userpage = kmap_atomic(page, KM_USER0); 2096 userpage = kmap_atomic(page, KM_USER0);
2067 memset(userpage + page_offset, 0, iosize); 2097 memset(userpage + page_offset, 0, iosize);
2068 flush_dcache_page(page); 2098 flush_dcache_page(page);
2069 kunmap_atomic(userpage, KM_USER0); 2099 kunmap_atomic(userpage, KM_USER0);
2070 2100
2071 set_extent_uptodate(tree, cur, cur + iosize - 1, 2101 set_extent_uptodate(tree, cur, cur + iosize - 1,
2072 GFP_NOFS); 2102 &cached, GFP_NOFS);
2073 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2103 unlock_extent_cached(tree, cur, cur + iosize - 1,
2104 &cached, GFP_NOFS);
2074 cur = cur + iosize; 2105 cur = cur + iosize;
2075 page_offset += iosize; 2106 page_offset += iosize;
2076 continue; 2107 continue;
@@ -2789,9 +2820,12 @@ int extent_prepare_write(struct extent_io_tree *tree,
2789 iocount++; 2820 iocount++;
2790 block_start = block_start + iosize; 2821 block_start = block_start + iosize;
2791 } else { 2822 } else {
2792 set_extent_uptodate(tree, block_start, cur_end, 2823 struct extent_state *cached = NULL;
2824
2825 set_extent_uptodate(tree, block_start, cur_end, &cached,
2793 GFP_NOFS); 2826 GFP_NOFS);
2794 unlock_extent(tree, block_start, cur_end, GFP_NOFS); 2827 unlock_extent_cached(tree, block_start, cur_end,
2828 &cached, GFP_NOFS);
2795 block_start = cur_end + 1; 2829 block_start = cur_end + 1;
2796 } 2830 }
2797 page_offset = block_start & (PAGE_CACHE_SIZE - 1); 2831 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
@@ -3457,7 +3491,7 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3457 num_pages = num_extent_pages(eb->start, eb->len); 3491 num_pages = num_extent_pages(eb->start, eb->len);
3458 3492
3459 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 3493 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3460 GFP_NOFS); 3494 NULL, GFP_NOFS);
3461 for (i = 0; i < num_pages; i++) { 3495 for (i = 0; i < num_pages; i++) {
3462 page = extent_buffer_page(eb, i); 3496 page = extent_buffer_page(eb, i);
3463 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || 3497 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
@@ -3885,6 +3919,12 @@ static void move_pages(struct page *dst_page, struct page *src_page,
3885 kunmap_atomic(dst_kaddr, KM_USER0); 3919 kunmap_atomic(dst_kaddr, KM_USER0);
3886} 3920}
3887 3921
3922static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
3923{
3924 unsigned long distance = (src > dst) ? src - dst : dst - src;
3925 return distance < len;
3926}
3927
3888static void copy_pages(struct page *dst_page, struct page *src_page, 3928static void copy_pages(struct page *dst_page, struct page *src_page,
3889 unsigned long dst_off, unsigned long src_off, 3929 unsigned long dst_off, unsigned long src_off,
3890 unsigned long len) 3930 unsigned long len)
@@ -3892,10 +3932,12 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
3892 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 3932 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3893 char *src_kaddr; 3933 char *src_kaddr;
3894 3934
3895 if (dst_page != src_page) 3935 if (dst_page != src_page) {
3896 src_kaddr = kmap_atomic(src_page, KM_USER1); 3936 src_kaddr = kmap_atomic(src_page, KM_USER1);
3897 else 3937 } else {
3898 src_kaddr = dst_kaddr; 3938 src_kaddr = dst_kaddr;
3939 BUG_ON(areas_overlap(src_off, dst_off, len));
3940 }
3899 3941
3900 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); 3942 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3901 kunmap_atomic(dst_kaddr, KM_USER0); 3943 kunmap_atomic(dst_kaddr, KM_USER0);
@@ -3970,7 +4012,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3970 "len %lu len %lu\n", dst_offset, len, dst->len); 4012 "len %lu len %lu\n", dst_offset, len, dst->len);
3971 BUG_ON(1); 4013 BUG_ON(1);
3972 } 4014 }
3973 if (dst_offset < src_offset) { 4015 if (!areas_overlap(src_offset, dst_offset, len)) {
3974 memcpy_extent_buffer(dst, dst_offset, src_offset, len); 4016 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3975 return; 4017 return;
3976 } 4018 }
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index f62c5442835d..af2d7179c372 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -208,7 +208,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
208 int bits, int exclusive_bits, u64 *failed_start, 208 int bits, int exclusive_bits, u64 *failed_start,
209 struct extent_state **cached_state, gfp_t mask); 209 struct extent_state **cached_state, gfp_t mask);
210int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 210int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
211 gfp_t mask); 211 struct extent_state **cached_state, gfp_t mask);
212int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, 212int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
213 gfp_t mask); 213 gfp_t mask);
214int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 214int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e621ea54a3fd..75899a01dded 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -104,7 +104,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
104/* 104/*
105 * unlocks pages after btrfs_file_write is done with them 105 * unlocks pages after btrfs_file_write is done with them
106 */ 106 */
107static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) 107void btrfs_drop_pages(struct page **pages, size_t num_pages)
108{ 108{
109 size_t i; 109 size_t i;
110 for (i = 0; i < num_pages; i++) { 110 for (i = 0; i < num_pages; i++) {
@@ -127,16 +127,13 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
127 * this also makes the decision about creating an inline extent vs 127 * this also makes the decision about creating an inline extent vs
128 * doing real data extents, marking pages dirty and delalloc as required. 128 * doing real data extents, marking pages dirty and delalloc as required.
129 */ 129 */
130static noinline int dirty_and_release_pages(struct btrfs_root *root, 130int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
131 struct file *file, 131 struct page **pages, size_t num_pages,
132 struct page **pages, 132 loff_t pos, size_t write_bytes,
133 size_t num_pages, 133 struct extent_state **cached)
134 loff_t pos,
135 size_t write_bytes)
136{ 134{
137 int err = 0; 135 int err = 0;
138 int i; 136 int i;
139 struct inode *inode = fdentry(file)->d_inode;
140 u64 num_bytes; 137 u64 num_bytes;
141 u64 start_pos; 138 u64 start_pos;
142 u64 end_of_last_block; 139 u64 end_of_last_block;
@@ -149,7 +146,7 @@ static noinline int dirty_and_release_pages(struct btrfs_root *root,
149 146
150 end_of_last_block = start_pos + num_bytes - 1; 147 end_of_last_block = start_pos + num_bytes - 1;
151 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 148 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
152 NULL); 149 cached);
153 if (err) 150 if (err)
154 return err; 151 return err;
155 152
@@ -992,9 +989,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
992 } 989 }
993 990
994 if (copied > 0) { 991 if (copied > 0) {
995 ret = dirty_and_release_pages(root, file, pages, 992 ret = btrfs_dirty_pages(root, inode, pages,
996 dirty_pages, pos, 993 dirty_pages, pos, copied,
997 copied); 994 NULL);
998 if (ret) { 995 if (ret) {
999 btrfs_delalloc_release_space(inode, 996 btrfs_delalloc_release_space(inode,
1000 dirty_pages << PAGE_CACHE_SHIFT); 997 dirty_pages << PAGE_CACHE_SHIFT);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index f561c953205b..11d2e9cea09e 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -508,6 +508,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
508 struct inode *inode; 508 struct inode *inode;
509 struct rb_node *node; 509 struct rb_node *node;
510 struct list_head *pos, *n; 510 struct list_head *pos, *n;
511 struct page **pages;
511 struct page *page; 512 struct page *page;
512 struct extent_state *cached_state = NULL; 513 struct extent_state *cached_state = NULL;
513 struct btrfs_free_cluster *cluster = NULL; 514 struct btrfs_free_cluster *cluster = NULL;
@@ -517,13 +518,13 @@ int btrfs_write_out_cache(struct btrfs_root *root,
517 u64 start, end, len; 518 u64 start, end, len;
518 u64 bytes = 0; 519 u64 bytes = 0;
519 u32 *crc, *checksums; 520 u32 *crc, *checksums;
520 pgoff_t index = 0, last_index = 0;
521 unsigned long first_page_offset; 521 unsigned long first_page_offset;
522 int num_checksums; 522 int index = 0, num_pages = 0;
523 int entries = 0; 523 int entries = 0;
524 int bitmaps = 0; 524 int bitmaps = 0;
525 int ret = 0; 525 int ret = 0;
526 bool next_page = false; 526 bool next_page = false;
527 bool out_of_space = false;
527 528
528 root = root->fs_info->tree_root; 529 root = root->fs_info->tree_root;
529 530
@@ -551,24 +552,31 @@ int btrfs_write_out_cache(struct btrfs_root *root,
551 return 0; 552 return 0;
552 } 553 }
553 554
554 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 555 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
556 PAGE_CACHE_SHIFT;
555 filemap_write_and_wait(inode->i_mapping); 557 filemap_write_and_wait(inode->i_mapping);
556 btrfs_wait_ordered_range(inode, inode->i_size & 558 btrfs_wait_ordered_range(inode, inode->i_size &
557 ~(root->sectorsize - 1), (u64)-1); 559 ~(root->sectorsize - 1), (u64)-1);
558 560
559 /* We need a checksum per page. */ 561 /* We need a checksum per page. */
560 num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE; 562 crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
561 crc = checksums = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS);
562 if (!crc) { 563 if (!crc) {
563 iput(inode); 564 iput(inode);
564 return 0; 565 return 0;
565 } 566 }
566 567
568 pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
569 if (!pages) {
570 kfree(crc);
571 iput(inode);
572 return 0;
573 }
574
567 /* Since the first page has all of our checksums and our generation we 575 /* Since the first page has all of our checksums and our generation we
568 * need to calculate the offset into the page that we can start writing 576 * need to calculate the offset into the page that we can start writing
569 * our entries. 577 * our entries.
570 */ 578 */
571 first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64); 579 first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
572 580
573 /* Get the cluster for this block_group if it exists */ 581 /* Get the cluster for this block_group if it exists */
574 if (!list_empty(&block_group->cluster_list)) 582 if (!list_empty(&block_group->cluster_list))
@@ -590,20 +598,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
590 * after find_get_page at this point. Just putting this here so people 598 * after find_get_page at this point. Just putting this here so people
591 * know and don't freak out. 599 * know and don't freak out.
592 */ 600 */
593 while (index <= last_index) { 601 while (index < num_pages) {
594 page = grab_cache_page(inode->i_mapping, index); 602 page = grab_cache_page(inode->i_mapping, index);
595 if (!page) { 603 if (!page) {
596 pgoff_t i = 0; 604 int i;
597 605
598 while (i < index) { 606 for (i = 0; i < num_pages; i++) {
599 page = find_get_page(inode->i_mapping, i); 607 unlock_page(pages[i]);
600 unlock_page(page); 608 page_cache_release(pages[i]);
601 page_cache_release(page);
602 page_cache_release(page);
603 i++;
604 } 609 }
605 goto out_free; 610 goto out_free;
606 } 611 }
612 pages[index] = page;
607 index++; 613 index++;
608 } 614 }
609 615
@@ -631,7 +637,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
631 offset = start_offset; 637 offset = start_offset;
632 } 638 }
633 639
634 page = find_get_page(inode->i_mapping, index); 640 if (index >= num_pages) {
641 out_of_space = true;
642 break;
643 }
644
645 page = pages[index];
635 646
636 addr = kmap(page); 647 addr = kmap(page);
637 entry = addr + start_offset; 648 entry = addr + start_offset;
@@ -708,23 +719,6 @@ int btrfs_write_out_cache(struct btrfs_root *root,
708 719
709 bytes += PAGE_CACHE_SIZE; 720 bytes += PAGE_CACHE_SIZE;
710 721
711 ClearPageChecked(page);
712 set_page_extent_mapped(page);
713 SetPageUptodate(page);
714 set_page_dirty(page);
715
716 /*
717 * We need to release our reference we got for grab_cache_page,
718 * except for the first page which will hold our checksums, we
719 * do that below.
720 */
721 if (index != 0) {
722 unlock_page(page);
723 page_cache_release(page);
724 }
725
726 page_cache_release(page);
727
728 index++; 722 index++;
729 } while (node || next_page); 723 } while (node || next_page);
730 724
@@ -734,7 +728,11 @@ int btrfs_write_out_cache(struct btrfs_root *root,
734 struct btrfs_free_space *entry = 728 struct btrfs_free_space *entry =
735 list_entry(pos, struct btrfs_free_space, list); 729 list_entry(pos, struct btrfs_free_space, list);
736 730
737 page = find_get_page(inode->i_mapping, index); 731 if (index >= num_pages) {
732 out_of_space = true;
733 break;
734 }
735 page = pages[index];
738 736
739 addr = kmap(page); 737 addr = kmap(page);
740 memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE); 738 memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
@@ -745,64 +743,58 @@ int btrfs_write_out_cache(struct btrfs_root *root,
745 crc++; 743 crc++;
746 bytes += PAGE_CACHE_SIZE; 744 bytes += PAGE_CACHE_SIZE;
747 745
748 ClearPageChecked(page);
749 set_page_extent_mapped(page);
750 SetPageUptodate(page);
751 set_page_dirty(page);
752 unlock_page(page);
753 page_cache_release(page);
754 page_cache_release(page);
755 list_del_init(&entry->list); 746 list_del_init(&entry->list);
756 index++; 747 index++;
757 } 748 }
758 749
750 if (out_of_space) {
751 btrfs_drop_pages(pages, num_pages);
752 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
753 i_size_read(inode) - 1, &cached_state,
754 GFP_NOFS);
755 ret = 0;
756 goto out_free;
757 }
758
759 /* Zero out the rest of the pages just to make sure */ 759 /* Zero out the rest of the pages just to make sure */
760 while (index <= last_index) { 760 while (index < num_pages) {
761 void *addr; 761 void *addr;
762 762
763 page = find_get_page(inode->i_mapping, index); 763 page = pages[index];
764
765 addr = kmap(page); 764 addr = kmap(page);
766 memset(addr, 0, PAGE_CACHE_SIZE); 765 memset(addr, 0, PAGE_CACHE_SIZE);
767 kunmap(page); 766 kunmap(page);
768 ClearPageChecked(page);
769 set_page_extent_mapped(page);
770 SetPageUptodate(page);
771 set_page_dirty(page);
772 unlock_page(page);
773 page_cache_release(page);
774 page_cache_release(page);
775 bytes += PAGE_CACHE_SIZE; 767 bytes += PAGE_CACHE_SIZE;
776 index++; 768 index++;
777 } 769 }
778 770
779 btrfs_set_extent_delalloc(inode, 0, bytes - 1, &cached_state);
780
781 /* Write the checksums and trans id to the first page */ 771 /* Write the checksums and trans id to the first page */
782 { 772 {
783 void *addr; 773 void *addr;
784 u64 *gen; 774 u64 *gen;
785 775
786 page = find_get_page(inode->i_mapping, 0); 776 page = pages[0];
787 777
788 addr = kmap(page); 778 addr = kmap(page);
789 memcpy(addr, checksums, sizeof(u32) * num_checksums); 779 memcpy(addr, checksums, sizeof(u32) * num_pages);
790 gen = addr + (sizeof(u32) * num_checksums); 780 gen = addr + (sizeof(u32) * num_pages);
791 *gen = trans->transid; 781 *gen = trans->transid;
792 kunmap(page); 782 kunmap(page);
793 ClearPageChecked(page);
794 set_page_extent_mapped(page);
795 SetPageUptodate(page);
796 set_page_dirty(page);
797 unlock_page(page);
798 page_cache_release(page);
799 page_cache_release(page);
800 } 783 }
801 BTRFS_I(inode)->generation = trans->transid;
802 784
785 ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
786 bytes, &cached_state);
787 btrfs_drop_pages(pages, num_pages);
803 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, 788 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
804 i_size_read(inode) - 1, &cached_state, GFP_NOFS); 789 i_size_read(inode) - 1, &cached_state, GFP_NOFS);
805 790
791 if (ret) {
792 ret = 0;
793 goto out_free;
794 }
795
796 BTRFS_I(inode)->generation = trans->transid;
797
806 filemap_write_and_wait(inode->i_mapping); 798 filemap_write_and_wait(inode->i_mapping);
807 799
808 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 800 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
@@ -853,6 +845,7 @@ out_free:
853 BTRFS_I(inode)->generation = 0; 845 BTRFS_I(inode)->generation = 0;
854 } 846 }
855 kfree(checksums); 847 kfree(checksums);
848 kfree(pages);
856 btrfs_update_inode(trans, root, inode); 849 btrfs_update_inode(trans, root, inode);
857 iput(inode); 850 iput(inode);
858 return ret; 851 return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5cc64ab9c485..fcd66b6a8086 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1770,9 +1770,12 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1770 add_pending_csums(trans, inode, ordered_extent->file_offset, 1770 add_pending_csums(trans, inode, ordered_extent->file_offset,
1771 &ordered_extent->list); 1771 &ordered_extent->list);
1772 1772
1773 btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1773 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1774 ret = btrfs_update_inode(trans, root, inode); 1774 if (!ret) {
1775 BUG_ON(ret); 1775 ret = btrfs_update_inode(trans, root, inode);
1776 BUG_ON(ret);
1777 }
1778 ret = 0;
1776out: 1779out:
1777 if (nolock) { 1780 if (nolock) {
1778 if (trans) 1781 if (trans)
@@ -2590,6 +2593,13 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
2590 struct btrfs_inode_item *item, 2593 struct btrfs_inode_item *item,
2591 struct inode *inode) 2594 struct inode *inode)
2592{ 2595{
2596 if (!leaf->map_token)
2597 map_private_extent_buffer(leaf, (unsigned long)item,
2598 sizeof(struct btrfs_inode_item),
2599 &leaf->map_token, &leaf->kaddr,
2600 &leaf->map_start, &leaf->map_len,
2601 KM_USER1);
2602
2593 btrfs_set_inode_uid(leaf, item, inode->i_uid); 2603 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2594 btrfs_set_inode_gid(leaf, item, inode->i_gid); 2604 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2595 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); 2605 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
@@ -2618,6 +2628,11 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
2618 btrfs_set_inode_rdev(leaf, item, inode->i_rdev); 2628 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2619 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); 2629 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2620 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group); 2630 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2631
2632 if (leaf->map_token) {
2633 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
2634 leaf->map_token = NULL;
2635 }
2621} 2636}
2622 2637
2623/* 2638/*
@@ -4207,10 +4222,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4207 struct btrfs_key found_key; 4222 struct btrfs_key found_key;
4208 struct btrfs_path *path; 4223 struct btrfs_path *path;
4209 int ret; 4224 int ret;
4210 u32 nritems;
4211 struct extent_buffer *leaf; 4225 struct extent_buffer *leaf;
4212 int slot; 4226 int slot;
4213 int advance;
4214 unsigned char d_type; 4227 unsigned char d_type;
4215 int over = 0; 4228 int over = 0;
4216 u32 di_cur; 4229 u32 di_cur;
@@ -4253,27 +4266,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4253 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4266 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4254 if (ret < 0) 4267 if (ret < 0)
4255 goto err; 4268 goto err;
4256 advance = 0;
4257 4269
4258 while (1) { 4270 while (1) {
4259 leaf = path->nodes[0]; 4271 leaf = path->nodes[0];
4260 nritems = btrfs_header_nritems(leaf);
4261 slot = path->slots[0]; 4272 slot = path->slots[0];
4262 if (advance || slot >= nritems) { 4273 if (slot >= btrfs_header_nritems(leaf)) {
4263 if (slot >= nritems - 1) { 4274 ret = btrfs_next_leaf(root, path);
4264 ret = btrfs_next_leaf(root, path); 4275 if (ret < 0)
4265 if (ret) 4276 goto err;
4266 break; 4277 else if (ret > 0)
4267 leaf = path->nodes[0]; 4278 break;
4268 nritems = btrfs_header_nritems(leaf); 4279 continue;
4269 slot = path->slots[0];
4270 } else {
4271 slot++;
4272 path->slots[0]++;
4273 }
4274 } 4280 }
4275 4281
4276 advance = 1;
4277 item = btrfs_item_nr(leaf, slot); 4282 item = btrfs_item_nr(leaf, slot);
4278 btrfs_item_key_to_cpu(leaf, &found_key, slot); 4283 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4279 4284
@@ -4282,7 +4287,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4282 if (btrfs_key_type(&found_key) != key_type) 4287 if (btrfs_key_type(&found_key) != key_type)
4283 break; 4288 break;
4284 if (found_key.offset < filp->f_pos) 4289 if (found_key.offset < filp->f_pos)
4285 continue; 4290 goto next;
4286 4291
4287 filp->f_pos = found_key.offset; 4292 filp->f_pos = found_key.offset;
4288 4293
@@ -4335,6 +4340,8 @@ skip:
4335 di_cur += di_len; 4340 di_cur += di_len;
4336 di = (struct btrfs_dir_item *)((char *)di + di_len); 4341 di = (struct btrfs_dir_item *)((char *)di + di_len);
4337 } 4342 }
4343next:
4344 path->slots[0]++;
4338 } 4345 }
4339 4346
4340 /* Reached end of directory/root. Bump pos past the last item. */ 4347 /* Reached end of directory/root. Bump pos past the last item. */
@@ -4527,14 +4534,17 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4527 BUG_ON(!path); 4534 BUG_ON(!path);
4528 4535
4529 inode = new_inode(root->fs_info->sb); 4536 inode = new_inode(root->fs_info->sb);
4530 if (!inode) 4537 if (!inode) {
4538 btrfs_free_path(path);
4531 return ERR_PTR(-ENOMEM); 4539 return ERR_PTR(-ENOMEM);
4540 }
4532 4541
4533 if (dir) { 4542 if (dir) {
4534 trace_btrfs_inode_request(dir); 4543 trace_btrfs_inode_request(dir);
4535 4544
4536 ret = btrfs_set_inode_index(dir, index); 4545 ret = btrfs_set_inode_index(dir, index);
4537 if (ret) { 4546 if (ret) {
4547 btrfs_free_path(path);
4538 iput(inode); 4548 iput(inode);
4539 return ERR_PTR(ret); 4549 return ERR_PTR(ret);
4540 } 4550 }
@@ -4834,9 +4844,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4834 if (inode->i_nlink == ~0U) 4844 if (inode->i_nlink == ~0U)
4835 return -EMLINK; 4845 return -EMLINK;
4836 4846
4837 btrfs_inc_nlink(inode);
4838 inode->i_ctime = CURRENT_TIME;
4839
4840 err = btrfs_set_inode_index(dir, &index); 4847 err = btrfs_set_inode_index(dir, &index);
4841 if (err) 4848 if (err)
4842 goto fail; 4849 goto fail;
@@ -4852,6 +4859,9 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4852 goto fail; 4859 goto fail;
4853 } 4860 }
4854 4861
4862 btrfs_inc_nlink(inode);
4863 inode->i_ctime = CURRENT_TIME;
4864
4855 btrfs_set_trans_block_group(trans, dir); 4865 btrfs_set_trans_block_group(trans, dir);
4856 ihold(inode); 4866 ihold(inode);
4857 4867
@@ -5221,7 +5231,7 @@ again:
5221 btrfs_mark_buffer_dirty(leaf); 5231 btrfs_mark_buffer_dirty(leaf);
5222 } 5232 }
5223 set_extent_uptodate(io_tree, em->start, 5233 set_extent_uptodate(io_tree, em->start,
5224 extent_map_end(em) - 1, GFP_NOFS); 5234 extent_map_end(em) - 1, NULL, GFP_NOFS);
5225 goto insert; 5235 goto insert;
5226 } else { 5236 } else {
5227 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); 5237 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
@@ -5428,17 +5438,30 @@ out:
5428} 5438}
5429 5439
5430static struct extent_map *btrfs_new_extent_direct(struct inode *inode, 5440static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5441 struct extent_map *em,
5431 u64 start, u64 len) 5442 u64 start, u64 len)
5432{ 5443{
5433 struct btrfs_root *root = BTRFS_I(inode)->root; 5444 struct btrfs_root *root = BTRFS_I(inode)->root;
5434 struct btrfs_trans_handle *trans; 5445 struct btrfs_trans_handle *trans;
5435 struct extent_map *em;
5436 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 5446 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5437 struct btrfs_key ins; 5447 struct btrfs_key ins;
5438 u64 alloc_hint; 5448 u64 alloc_hint;
5439 int ret; 5449 int ret;
5450 bool insert = false;
5440 5451
5441 btrfs_drop_extent_cache(inode, start, start + len - 1, 0); 5452 /*
5453 * Ok if the extent map we looked up is a hole and is for the exact
5454 * range we want, there is no reason to allocate a new one, however if
5455 * it is not right then we need to free this one and drop the cache for
5456 * our range.
5457 */
5458 if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
5459 em->len != len) {
5460 free_extent_map(em);
5461 em = NULL;
5462 insert = true;
5463 btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
5464 }
5442 5465
5443 trans = btrfs_join_transaction(root, 0); 5466 trans = btrfs_join_transaction(root, 0);
5444 if (IS_ERR(trans)) 5467 if (IS_ERR(trans))
@@ -5454,10 +5477,12 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5454 goto out; 5477 goto out;
5455 } 5478 }
5456 5479
5457 em = alloc_extent_map(GFP_NOFS);
5458 if (!em) { 5480 if (!em) {
5459 em = ERR_PTR(-ENOMEM); 5481 em = alloc_extent_map(GFP_NOFS);
5460 goto out; 5482 if (!em) {
5483 em = ERR_PTR(-ENOMEM);
5484 goto out;
5485 }
5461 } 5486 }
5462 5487
5463 em->start = start; 5488 em->start = start;
@@ -5467,9 +5492,15 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5467 em->block_start = ins.objectid; 5492 em->block_start = ins.objectid;
5468 em->block_len = ins.offset; 5493 em->block_len = ins.offset;
5469 em->bdev = root->fs_info->fs_devices->latest_bdev; 5494 em->bdev = root->fs_info->fs_devices->latest_bdev;
5495
5496 /*
5497 * We need to do this because if we're using the original em we searched
5498 * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
5499 */
5500 em->flags = 0;
5470 set_bit(EXTENT_FLAG_PINNED, &em->flags); 5501 set_bit(EXTENT_FLAG_PINNED, &em->flags);
5471 5502
5472 while (1) { 5503 while (insert) {
5473 write_lock(&em_tree->lock); 5504 write_lock(&em_tree->lock);
5474 ret = add_extent_mapping(em_tree, em); 5505 ret = add_extent_mapping(em_tree, em);
5475 write_unlock(&em_tree->lock); 5506 write_unlock(&em_tree->lock);
@@ -5687,8 +5718,7 @@ must_cow:
5687 * it above 5718 * it above
5688 */ 5719 */
5689 len = bh_result->b_size; 5720 len = bh_result->b_size;
5690 free_extent_map(em); 5721 em = btrfs_new_extent_direct(inode, em, start, len);
5691 em = btrfs_new_extent_direct(inode, start, len);
5692 if (IS_ERR(em)) 5722 if (IS_ERR(em))
5693 return PTR_ERR(em); 5723 return PTR_ERR(em);
5694 len = min(len, em->len - (start - em->start)); 5724 len = min(len, em->len - (start - em->start));
@@ -5851,8 +5881,10 @@ again:
5851 } 5881 }
5852 5882
5853 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); 5883 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5854 btrfs_ordered_update_i_size(inode, 0, ordered); 5884 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5855 btrfs_update_inode(trans, root, inode); 5885 if (!ret)
5886 btrfs_update_inode(trans, root, inode);
5887 ret = 0;
5856out_unlock: 5888out_unlock:
5857 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, 5889 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5858 ordered->file_offset + ordered->len - 1, 5890 ordered->file_offset + ordered->len - 1,
@@ -5938,7 +5970,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
5938 5970
5939static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, 5971static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
5940 int rw, u64 file_offset, int skip_sum, 5972 int rw, u64 file_offset, int skip_sum,
5941 u32 *csums) 5973 u32 *csums, int async_submit)
5942{ 5974{
5943 int write = rw & REQ_WRITE; 5975 int write = rw & REQ_WRITE;
5944 struct btrfs_root *root = BTRFS_I(inode)->root; 5976 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -5949,13 +5981,24 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
5949 if (ret) 5981 if (ret)
5950 goto err; 5982 goto err;
5951 5983
5952 if (write && !skip_sum) { 5984 if (skip_sum)
5985 goto map;
5986
5987 if (write && async_submit) {
5953 ret = btrfs_wq_submit_bio(root->fs_info, 5988 ret = btrfs_wq_submit_bio(root->fs_info,
5954 inode, rw, bio, 0, 0, 5989 inode, rw, bio, 0, 0,
5955 file_offset, 5990 file_offset,
5956 __btrfs_submit_bio_start_direct_io, 5991 __btrfs_submit_bio_start_direct_io,
5957 __btrfs_submit_bio_done); 5992 __btrfs_submit_bio_done);
5958 goto err; 5993 goto err;
5994 } else if (write) {
5995 /*
5996 * If we aren't doing async submit, calculate the csum of the
5997 * bio now.
5998 */
5999 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
6000 if (ret)
6001 goto err;
5959 } else if (!skip_sum) { 6002 } else if (!skip_sum) {
5960 ret = btrfs_lookup_bio_sums_dio(root, inode, bio, 6003 ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
5961 file_offset, csums); 6004 file_offset, csums);
@@ -5963,7 +6006,8 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
5963 goto err; 6006 goto err;
5964 } 6007 }
5965 6008
5966 ret = btrfs_map_bio(root, rw, bio, 0, 1); 6009map:
6010 ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
5967err: 6011err:
5968 bio_put(bio); 6012 bio_put(bio);
5969 return ret; 6013 return ret;
@@ -5985,15 +6029,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
5985 int nr_pages = 0; 6029 int nr_pages = 0;
5986 u32 *csums = dip->csums; 6030 u32 *csums = dip->csums;
5987 int ret = 0; 6031 int ret = 0;
6032 int async_submit = 0;
5988 int write = rw & REQ_WRITE; 6033 int write = rw & REQ_WRITE;
5989 6034
5990 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
5991 if (!bio)
5992 return -ENOMEM;
5993 bio->bi_private = dip;
5994 bio->bi_end_io = btrfs_end_dio_bio;
5995 atomic_inc(&dip->pending_bios);
5996
5997 map_length = orig_bio->bi_size; 6035 map_length = orig_bio->bi_size;
5998 ret = btrfs_map_block(map_tree, READ, start_sector << 9, 6036 ret = btrfs_map_block(map_tree, READ, start_sector << 9,
5999 &map_length, NULL, 0); 6037 &map_length, NULL, 0);
@@ -6002,6 +6040,19 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6002 return -EIO; 6040 return -EIO;
6003 } 6041 }
6004 6042
6043 if (map_length >= orig_bio->bi_size) {
6044 bio = orig_bio;
6045 goto submit;
6046 }
6047
6048 async_submit = 1;
6049 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
6050 if (!bio)
6051 return -ENOMEM;
6052 bio->bi_private = dip;
6053 bio->bi_end_io = btrfs_end_dio_bio;
6054 atomic_inc(&dip->pending_bios);
6055
6005 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { 6056 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
6006 if (unlikely(map_length < submit_len + bvec->bv_len || 6057 if (unlikely(map_length < submit_len + bvec->bv_len ||
6007 bio_add_page(bio, bvec->bv_page, bvec->bv_len, 6058 bio_add_page(bio, bvec->bv_page, bvec->bv_len,
@@ -6015,7 +6066,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6015 atomic_inc(&dip->pending_bios); 6066 atomic_inc(&dip->pending_bios);
6016 ret = __btrfs_submit_dio_bio(bio, inode, rw, 6067 ret = __btrfs_submit_dio_bio(bio, inode, rw,
6017 file_offset, skip_sum, 6068 file_offset, skip_sum,
6018 csums); 6069 csums, async_submit);
6019 if (ret) { 6070 if (ret) {
6020 bio_put(bio); 6071 bio_put(bio);
6021 atomic_dec(&dip->pending_bios); 6072 atomic_dec(&dip->pending_bios);
@@ -6052,8 +6103,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6052 } 6103 }
6053 } 6104 }
6054 6105
6106submit:
6055 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, 6107 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
6056 csums); 6108 csums, async_submit);
6057 if (!ret) 6109 if (!ret)
6058 return 0; 6110 return 0;
6059 6111
@@ -6148,6 +6200,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
6148 unsigned long nr_segs) 6200 unsigned long nr_segs)
6149{ 6201{
6150 int seg; 6202 int seg;
6203 int i;
6151 size_t size; 6204 size_t size;
6152 unsigned long addr; 6205 unsigned long addr;
6153 unsigned blocksize_mask = root->sectorsize - 1; 6206 unsigned blocksize_mask = root->sectorsize - 1;
@@ -6162,8 +6215,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
6162 addr = (unsigned long)iov[seg].iov_base; 6215 addr = (unsigned long)iov[seg].iov_base;
6163 size = iov[seg].iov_len; 6216 size = iov[seg].iov_len;
6164 end += size; 6217 end += size;
6165 if ((addr & blocksize_mask) || (size & blocksize_mask)) 6218 if ((addr & blocksize_mask) || (size & blocksize_mask))
6166 goto out; 6219 goto out;
6220
6221 /* If this is a write we don't need to check anymore */
6222 if (rw & WRITE)
6223 continue;
6224
6225 /*
6226 * Check to make sure we don't have duplicate iov_base's in this
6227 * iovec, if so return EINVAL, otherwise we'll get csum errors
6228 * when reading back.
6229 */
6230 for (i = seg + 1; i < nr_segs; i++) {
6231 if (iov[seg].iov_base == iov[i].iov_base)
6232 goto out;
6233 }
6167 } 6234 }
6168 retval = 0; 6235 retval = 0;
6169out: 6236out:
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index cfc264fefdb0..ffb48d6c5433 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2287,7 +2287,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
2287 struct btrfs_ioctl_space_info space; 2287 struct btrfs_ioctl_space_info space;
2288 struct btrfs_ioctl_space_info *dest; 2288 struct btrfs_ioctl_space_info *dest;
2289 struct btrfs_ioctl_space_info *dest_orig; 2289 struct btrfs_ioctl_space_info *dest_orig;
2290 struct btrfs_ioctl_space_info *user_dest; 2290 struct btrfs_ioctl_space_info __user *user_dest;
2291 struct btrfs_space_info *info; 2291 struct btrfs_space_info *info;
2292 u64 types[] = {BTRFS_BLOCK_GROUP_DATA, 2292 u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
2293 BTRFS_BLOCK_GROUP_SYSTEM, 2293 BTRFS_BLOCK_GROUP_SYSTEM,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 58e7de9cc90c..0ac712efcdf2 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -159,7 +159,7 @@ enum {
159 Opt_compress_type, Opt_compress_force, Opt_compress_force_type, 159 Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
160 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, 160 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
161 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, 161 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
162 Opt_enospc_debug, Opt_err, 162 Opt_enospc_debug, Opt_subvolrootid, Opt_err,
163}; 163};
164 164
165static match_table_t tokens = { 165static match_table_t tokens = {
@@ -189,6 +189,7 @@ static match_table_t tokens = {
189 {Opt_clear_cache, "clear_cache"}, 189 {Opt_clear_cache, "clear_cache"},
190 {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, 190 {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
191 {Opt_enospc_debug, "enospc_debug"}, 191 {Opt_enospc_debug, "enospc_debug"},
192 {Opt_subvolrootid, "subvolrootid=%d"},
192 {Opt_err, NULL}, 193 {Opt_err, NULL},
193}; 194};
194 195
@@ -232,6 +233,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
232 break; 233 break;
233 case Opt_subvol: 234 case Opt_subvol:
234 case Opt_subvolid: 235 case Opt_subvolid:
236 case Opt_subvolrootid:
235 case Opt_device: 237 case Opt_device:
236 /* 238 /*
237 * These are parsed by btrfs_parse_early_options 239 * These are parsed by btrfs_parse_early_options
@@ -388,7 +390,7 @@ out:
388 */ 390 */
389static int btrfs_parse_early_options(const char *options, fmode_t flags, 391static int btrfs_parse_early_options(const char *options, fmode_t flags,
390 void *holder, char **subvol_name, u64 *subvol_objectid, 392 void *holder, char **subvol_name, u64 *subvol_objectid,
391 struct btrfs_fs_devices **fs_devices) 393 u64 *subvol_rootid, struct btrfs_fs_devices **fs_devices)
392{ 394{
393 substring_t args[MAX_OPT_ARGS]; 395 substring_t args[MAX_OPT_ARGS];
394 char *opts, *orig, *p; 396 char *opts, *orig, *p;
@@ -429,6 +431,18 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
429 *subvol_objectid = intarg; 431 *subvol_objectid = intarg;
430 } 432 }
431 break; 433 break;
434 case Opt_subvolrootid:
435 intarg = 0;
436 error = match_int(&args[0], &intarg);
437 if (!error) {
438 /* we want the original fs_tree */
439 if (!intarg)
440 *subvol_rootid =
441 BTRFS_FS_TREE_OBJECTID;
442 else
443 *subvol_rootid = intarg;
444 }
445 break;
432 case Opt_device: 446 case Opt_device:
433 error = btrfs_scan_one_device(match_strdup(&args[0]), 447 error = btrfs_scan_one_device(match_strdup(&args[0]),
434 flags, holder, fs_devices); 448 flags, holder, fs_devices);
@@ -736,6 +750,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
736 fmode_t mode = FMODE_READ; 750 fmode_t mode = FMODE_READ;
737 char *subvol_name = NULL; 751 char *subvol_name = NULL;
738 u64 subvol_objectid = 0; 752 u64 subvol_objectid = 0;
753 u64 subvol_rootid = 0;
739 int error = 0; 754 int error = 0;
740 755
741 if (!(flags & MS_RDONLY)) 756 if (!(flags & MS_RDONLY))
@@ -743,7 +758,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
743 758
744 error = btrfs_parse_early_options(data, mode, fs_type, 759 error = btrfs_parse_early_options(data, mode, fs_type,
745 &subvol_name, &subvol_objectid, 760 &subvol_name, &subvol_objectid,
746 &fs_devices); 761 &subvol_rootid, &fs_devices);
747 if (error) 762 if (error)
748 return ERR_PTR(error); 763 return ERR_PTR(error);
749 764
@@ -807,15 +822,17 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
807 s->s_flags |= MS_ACTIVE; 822 s->s_flags |= MS_ACTIVE;
808 } 823 }
809 824
810 root = get_default_root(s, subvol_objectid);
811 if (IS_ERR(root)) {
812 error = PTR_ERR(root);
813 deactivate_locked_super(s);
814 goto error_free_subvol_name;
815 }
816 /* if they gave us a subvolume name bind mount into that */ 825 /* if they gave us a subvolume name bind mount into that */
817 if (strcmp(subvol_name, ".")) { 826 if (strcmp(subvol_name, ".")) {
818 struct dentry *new_root; 827 struct dentry *new_root;
828
829 root = get_default_root(s, subvol_rootid);
830 if (IS_ERR(root)) {
831 error = PTR_ERR(root);
832 deactivate_locked_super(s);
833 goto error_free_subvol_name;
834 }
835
819 mutex_lock(&root->d_inode->i_mutex); 836 mutex_lock(&root->d_inode->i_mutex);
820 new_root = lookup_one_len(subvol_name, root, 837 new_root = lookup_one_len(subvol_name, root,
821 strlen(subvol_name)); 838 strlen(subvol_name));
@@ -836,6 +853,13 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
836 } 853 }
837 dput(root); 854 dput(root);
838 root = new_root; 855 root = new_root;
856 } else {
857 root = get_default_root(s, subvol_objectid);
858 if (IS_ERR(root)) {
859 error = PTR_ERR(root);
860 deactivate_locked_super(s);
861 goto error_free_subvol_name;
862 }
839 } 863 }
840 864
841 kfree(subvol_name); 865 kfree(subvol_name);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 5b158da7e0bb..c571734d5e5a 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -32,10 +32,8 @@
32 32
33static noinline void put_transaction(struct btrfs_transaction *transaction) 33static noinline void put_transaction(struct btrfs_transaction *transaction)
34{ 34{
35 WARN_ON(transaction->use_count == 0); 35 WARN_ON(atomic_read(&transaction->use_count) == 0);
36 transaction->use_count--; 36 if (atomic_dec_and_test(&transaction->use_count)) {
37 if (transaction->use_count == 0) {
38 list_del_init(&transaction->list);
39 memset(transaction, 0, sizeof(*transaction)); 37 memset(transaction, 0, sizeof(*transaction));
40 kmem_cache_free(btrfs_transaction_cachep, transaction); 38 kmem_cache_free(btrfs_transaction_cachep, transaction);
41 } 39 }
@@ -60,14 +58,14 @@ static noinline int join_transaction(struct btrfs_root *root)
60 if (!cur_trans) 58 if (!cur_trans)
61 return -ENOMEM; 59 return -ENOMEM;
62 root->fs_info->generation++; 60 root->fs_info->generation++;
63 cur_trans->num_writers = 1; 61 atomic_set(&cur_trans->num_writers, 1);
64 cur_trans->num_joined = 0; 62 cur_trans->num_joined = 0;
65 cur_trans->transid = root->fs_info->generation; 63 cur_trans->transid = root->fs_info->generation;
66 init_waitqueue_head(&cur_trans->writer_wait); 64 init_waitqueue_head(&cur_trans->writer_wait);
67 init_waitqueue_head(&cur_trans->commit_wait); 65 init_waitqueue_head(&cur_trans->commit_wait);
68 cur_trans->in_commit = 0; 66 cur_trans->in_commit = 0;
69 cur_trans->blocked = 0; 67 cur_trans->blocked = 0;
70 cur_trans->use_count = 1; 68 atomic_set(&cur_trans->use_count, 1);
71 cur_trans->commit_done = 0; 69 cur_trans->commit_done = 0;
72 cur_trans->start_time = get_seconds(); 70 cur_trans->start_time = get_seconds();
73 71
@@ -88,7 +86,7 @@ static noinline int join_transaction(struct btrfs_root *root)
88 root->fs_info->running_transaction = cur_trans; 86 root->fs_info->running_transaction = cur_trans;
89 spin_unlock(&root->fs_info->new_trans_lock); 87 spin_unlock(&root->fs_info->new_trans_lock);
90 } else { 88 } else {
91 cur_trans->num_writers++; 89 atomic_inc(&cur_trans->num_writers);
92 cur_trans->num_joined++; 90 cur_trans->num_joined++;
93 } 91 }
94 92
@@ -145,7 +143,7 @@ static void wait_current_trans(struct btrfs_root *root)
145 cur_trans = root->fs_info->running_transaction; 143 cur_trans = root->fs_info->running_transaction;
146 if (cur_trans && cur_trans->blocked) { 144 if (cur_trans && cur_trans->blocked) {
147 DEFINE_WAIT(wait); 145 DEFINE_WAIT(wait);
148 cur_trans->use_count++; 146 atomic_inc(&cur_trans->use_count);
149 while (1) { 147 while (1) {
150 prepare_to_wait(&root->fs_info->transaction_wait, &wait, 148 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
151 TASK_UNINTERRUPTIBLE); 149 TASK_UNINTERRUPTIBLE);
@@ -181,6 +179,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
181{ 179{
182 struct btrfs_trans_handle *h; 180 struct btrfs_trans_handle *h;
183 struct btrfs_transaction *cur_trans; 181 struct btrfs_transaction *cur_trans;
182 int retries = 0;
184 int ret; 183 int ret;
185 184
186 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 185 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
@@ -204,7 +203,7 @@ again:
204 } 203 }
205 204
206 cur_trans = root->fs_info->running_transaction; 205 cur_trans = root->fs_info->running_transaction;
207 cur_trans->use_count++; 206 atomic_inc(&cur_trans->use_count);
208 if (type != TRANS_JOIN_NOLOCK) 207 if (type != TRANS_JOIN_NOLOCK)
209 mutex_unlock(&root->fs_info->trans_mutex); 208 mutex_unlock(&root->fs_info->trans_mutex);
210 209
@@ -224,10 +223,18 @@ again:
224 223
225 if (num_items > 0) { 224 if (num_items > 0) {
226 ret = btrfs_trans_reserve_metadata(h, root, num_items); 225 ret = btrfs_trans_reserve_metadata(h, root, num_items);
227 if (ret == -EAGAIN) { 226 if (ret == -EAGAIN && !retries) {
227 retries++;
228 btrfs_commit_transaction(h, root); 228 btrfs_commit_transaction(h, root);
229 goto again; 229 goto again;
230 } else if (ret == -EAGAIN) {
231 /*
232 * We have already retried and got EAGAIN, so really we
233 * don't have space, so set ret to -ENOSPC.
234 */
235 ret = -ENOSPC;
230 } 236 }
237
231 if (ret < 0) { 238 if (ret < 0) {
232 btrfs_end_transaction(h, root); 239 btrfs_end_transaction(h, root);
233 return ERR_PTR(ret); 240 return ERR_PTR(ret);
@@ -327,7 +334,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
327 goto out_unlock; /* nothing committing|committed */ 334 goto out_unlock; /* nothing committing|committed */
328 } 335 }
329 336
330 cur_trans->use_count++; 337 atomic_inc(&cur_trans->use_count);
331 mutex_unlock(&root->fs_info->trans_mutex); 338 mutex_unlock(&root->fs_info->trans_mutex);
332 339
333 wait_for_commit(root, cur_trans); 340 wait_for_commit(root, cur_trans);
@@ -457,18 +464,14 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
457 wake_up_process(info->transaction_kthread); 464 wake_up_process(info->transaction_kthread);
458 } 465 }
459 466
460 if (lock)
461 mutex_lock(&info->trans_mutex);
462 WARN_ON(cur_trans != info->running_transaction); 467 WARN_ON(cur_trans != info->running_transaction);
463 WARN_ON(cur_trans->num_writers < 1); 468 WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
464 cur_trans->num_writers--; 469 atomic_dec(&cur_trans->num_writers);
465 470
466 smp_mb(); 471 smp_mb();
467 if (waitqueue_active(&cur_trans->writer_wait)) 472 if (waitqueue_active(&cur_trans->writer_wait))
468 wake_up(&cur_trans->writer_wait); 473 wake_up(&cur_trans->writer_wait);
469 put_transaction(cur_trans); 474 put_transaction(cur_trans);
470 if (lock)
471 mutex_unlock(&info->trans_mutex);
472 475
473 if (current->journal_info == trans) 476 if (current->journal_info == trans)
474 current->journal_info = NULL; 477 current->journal_info = NULL;
@@ -1178,7 +1181,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1178 /* take transaction reference */ 1181 /* take transaction reference */
1179 mutex_lock(&root->fs_info->trans_mutex); 1182 mutex_lock(&root->fs_info->trans_mutex);
1180 cur_trans = trans->transaction; 1183 cur_trans = trans->transaction;
1181 cur_trans->use_count++; 1184 atomic_inc(&cur_trans->use_count);
1182 mutex_unlock(&root->fs_info->trans_mutex); 1185 mutex_unlock(&root->fs_info->trans_mutex);
1183 1186
1184 btrfs_end_transaction(trans, root); 1187 btrfs_end_transaction(trans, root);
@@ -1237,7 +1240,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1237 1240
1238 mutex_lock(&root->fs_info->trans_mutex); 1241 mutex_lock(&root->fs_info->trans_mutex);
1239 if (cur_trans->in_commit) { 1242 if (cur_trans->in_commit) {
1240 cur_trans->use_count++; 1243 atomic_inc(&cur_trans->use_count);
1241 mutex_unlock(&root->fs_info->trans_mutex); 1244 mutex_unlock(&root->fs_info->trans_mutex);
1242 btrfs_end_transaction(trans, root); 1245 btrfs_end_transaction(trans, root);
1243 1246
@@ -1259,7 +1262,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1259 prev_trans = list_entry(cur_trans->list.prev, 1262 prev_trans = list_entry(cur_trans->list.prev,
1260 struct btrfs_transaction, list); 1263 struct btrfs_transaction, list);
1261 if (!prev_trans->commit_done) { 1264 if (!prev_trans->commit_done) {
1262 prev_trans->use_count++; 1265 atomic_inc(&prev_trans->use_count);
1263 mutex_unlock(&root->fs_info->trans_mutex); 1266 mutex_unlock(&root->fs_info->trans_mutex);
1264 1267
1265 wait_for_commit(root, prev_trans); 1268 wait_for_commit(root, prev_trans);
@@ -1300,14 +1303,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1300 TASK_UNINTERRUPTIBLE); 1303 TASK_UNINTERRUPTIBLE);
1301 1304
1302 smp_mb(); 1305 smp_mb();
1303 if (cur_trans->num_writers > 1) 1306 if (atomic_read(&cur_trans->num_writers) > 1)
1304 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 1307 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1305 else if (should_grow) 1308 else if (should_grow)
1306 schedule_timeout(1); 1309 schedule_timeout(1);
1307 1310
1308 mutex_lock(&root->fs_info->trans_mutex); 1311 mutex_lock(&root->fs_info->trans_mutex);
1309 finish_wait(&cur_trans->writer_wait, &wait); 1312 finish_wait(&cur_trans->writer_wait, &wait);
1310 } while (cur_trans->num_writers > 1 || 1313 } while (atomic_read(&cur_trans->num_writers) > 1 ||
1311 (should_grow && cur_trans->num_joined != joined)); 1314 (should_grow && cur_trans->num_joined != joined));
1312 1315
1313 ret = create_pending_snapshots(trans, root->fs_info); 1316 ret = create_pending_snapshots(trans, root->fs_info);
@@ -1394,6 +1397,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1394 1397
1395 wake_up(&cur_trans->commit_wait); 1398 wake_up(&cur_trans->commit_wait);
1396 1399
1400 list_del_init(&cur_trans->list);
1397 put_transaction(cur_trans); 1401 put_transaction(cur_trans);
1398 put_transaction(cur_trans); 1402 put_transaction(cur_trans);
1399 1403
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 229a594cacd5..e441acc6c584 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -27,11 +27,11 @@ struct btrfs_transaction {
27 * total writers in this transaction, it must be zero before the 27 * total writers in this transaction, it must be zero before the
28 * transaction can end 28 * transaction can end
29 */ 29 */
30 unsigned long num_writers; 30 atomic_t num_writers;
31 31
32 unsigned long num_joined; 32 unsigned long num_joined;
33 int in_commit; 33 int in_commit;
34 int use_count; 34 atomic_t use_count;
35 int commit_done; 35 int commit_done;
36 int blocked; 36 int blocked;
37 struct list_head list; 37 struct list_head list;
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index a5303b871b13..cfd660550ded 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -180,11 +180,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
180 struct btrfs_path *path; 180 struct btrfs_path *path;
181 struct extent_buffer *leaf; 181 struct extent_buffer *leaf;
182 struct btrfs_dir_item *di; 182 struct btrfs_dir_item *di;
183 int ret = 0, slot, advance; 183 int ret = 0, slot;
184 size_t total_size = 0, size_left = size; 184 size_t total_size = 0, size_left = size;
185 unsigned long name_ptr; 185 unsigned long name_ptr;
186 size_t name_len; 186 size_t name_len;
187 u32 nritems;
188 187
189 /* 188 /*
190 * ok we want all objects associated with this id. 189 * ok we want all objects associated with this id.
@@ -204,34 +203,24 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
204 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 203 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
205 if (ret < 0) 204 if (ret < 0)
206 goto err; 205 goto err;
207 advance = 0; 206
208 while (1) { 207 while (1) {
209 leaf = path->nodes[0]; 208 leaf = path->nodes[0];
210 nritems = btrfs_header_nritems(leaf);
211 slot = path->slots[0]; 209 slot = path->slots[0];
212 210
213 /* this is where we start walking through the path */ 211 /* this is where we start walking through the path */
214 if (advance || slot >= nritems) { 212 if (slot >= btrfs_header_nritems(leaf)) {
215 /* 213 /*
216 * if we've reached the last slot in this leaf we need 214 * if we've reached the last slot in this leaf we need
217 * to go to the next leaf and reset everything 215 * to go to the next leaf and reset everything
218 */ 216 */
219 if (slot >= nritems-1) { 217 ret = btrfs_next_leaf(root, path);
220 ret = btrfs_next_leaf(root, path); 218 if (ret < 0)
221 if (ret) 219 goto err;
222 break; 220 else if (ret > 0)
223 leaf = path->nodes[0]; 221 break;
224 nritems = btrfs_header_nritems(leaf); 222 continue;
225 slot = path->slots[0];
226 } else {
227 /*
228 * just walking through the slots on this leaf
229 */
230 slot++;
231 path->slots[0]++;
232 }
233 } 223 }
234 advance = 1;
235 224
236 btrfs_item_key_to_cpu(leaf, &found_key, slot); 225 btrfs_item_key_to_cpu(leaf, &found_key, slot);
237 226
@@ -250,7 +239,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
250 239
251 /* we are just looking for how big our buffer needs to be */ 240 /* we are just looking for how big our buffer needs to be */
252 if (!size) 241 if (!size)
253 continue; 242 goto next;
254 243
255 if (!buffer || (name_len + 1) > size_left) { 244 if (!buffer || (name_len + 1) > size_left) {
256 ret = -ERANGE; 245 ret = -ERANGE;
@@ -263,6 +252,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
263 252
264 size_left -= name_len + 1; 253 size_left -= name_len + 1;
265 buffer += name_len + 1; 254 buffer += name_len + 1;
255next:
256 path->slots[0]++;
266 } 257 }
267 ret = total_size; 258 ret = total_size;
268 259
diff --git a/fs/dcache.c b/fs/dcache.c
index ad25c4cec7d5..129a35730994 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2131,7 +2131,7 @@ EXPORT_SYMBOL(d_rehash);
2131 */ 2131 */
2132void dentry_update_name_case(struct dentry *dentry, struct qstr *name) 2132void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2133{ 2133{
2134 BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); 2134 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2135 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ 2135 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2136 2136
2137 spin_lock(&dentry->d_lock); 2137 spin_lock(&dentry->d_lock);
diff --git a/fs/fhandle.c b/fs/fhandle.c
index bf93ad2bee07..6b088641f5bf 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -7,6 +7,7 @@
7#include <linux/exportfs.h> 7#include <linux/exportfs.h>
8#include <linux/fs_struct.h> 8#include <linux/fs_struct.h>
9#include <linux/fsnotify.h> 9#include <linux/fsnotify.h>
10#include <linux/personality.h>
10#include <asm/uaccess.h> 11#include <asm/uaccess.h>
11#include "internal.h" 12#include "internal.h"
12 13
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 751d6b255a12..0845f84f2a5f 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -110,14 +110,13 @@ int unregister_filesystem(struct file_system_type * fs)
110 *tmp = fs->next; 110 *tmp = fs->next;
111 fs->next = NULL; 111 fs->next = NULL;
112 write_unlock(&file_systems_lock); 112 write_unlock(&file_systems_lock);
113 synchronize_rcu();
113 return 0; 114 return 0;
114 } 115 }
115 tmp = &(*tmp)->next; 116 tmp = &(*tmp)->next;
116 } 117 }
117 write_unlock(&file_systems_lock); 118 write_unlock(&file_systems_lock);
118 119
119 synchronize_rcu();
120
121 return -EINVAL; 120 return -EINVAL;
122} 121}
123 122
diff --git a/fs/namei.c b/fs/namei.c
index e6cd6113872c..54fc993e3027 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -697,6 +697,7 @@ static __always_inline void set_root_rcu(struct nameidata *nd)
697 do { 697 do {
698 seq = read_seqcount_begin(&fs->seq); 698 seq = read_seqcount_begin(&fs->seq);
699 nd->root = fs->root; 699 nd->root = fs->root;
700 nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
700 } while (read_seqcount_retry(&fs->seq, seq)); 701 } while (read_seqcount_retry(&fs->seq, seq));
701 } 702 }
702} 703}
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index b10e3540d5b7..ce4f62440425 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -1299,6 +1299,11 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
1299 1299
1300 BUG_ON (!data || !frags); 1300 BUG_ON (!data || !frags);
1301 1301
1302 if (size < 2 * VBLK_SIZE_HEAD) {
1303 ldm_error("Value of size is to small.");
1304 return false;
1305 }
1306
1302 group = get_unaligned_be32(data + 0x08); 1307 group = get_unaligned_be32(data + 0x08);
1303 rec = get_unaligned_be16(data + 0x0C); 1308 rec = get_unaligned_be16(data + 0x0C);
1304 num = get_unaligned_be16(data + 0x0E); 1309 num = get_unaligned_be16(data + 0x0E);
@@ -1306,6 +1311,10 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
1306 ldm_error ("A VBLK claims to have %d parts.", num); 1311 ldm_error ("A VBLK claims to have %d parts.", num);
1307 return false; 1312 return false;
1308 } 1313 }
1314 if (rec >= num) {
1315 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
1316 return false;
1317 }
1309 1318
1310 list_for_each (item, frags) { 1319 list_for_each (item, frags) {
1311 f = list_entry (item, struct frag, list); 1320 f = list_entry (item, struct frag, list);
@@ -1334,10 +1343,9 @@ found:
1334 1343
1335 f->map |= (1 << rec); 1344 f->map |= (1 << rec);
1336 1345
1337 if (num > 0) { 1346 data += VBLK_SIZE_HEAD;
1338 data += VBLK_SIZE_HEAD; 1347 size -= VBLK_SIZE_HEAD;
1339 size -= VBLK_SIZE_HEAD; 1348
1340 }
1341 memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size); 1349 memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size);
1342 1350
1343 return true; 1351 return true;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index dd6628d3ba42..dfa532730e55 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3124,11 +3124,16 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
3124/* for the /proc/ directory itself, after non-process stuff has been done */ 3124/* for the /proc/ directory itself, after non-process stuff has been done */
3125int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) 3125int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
3126{ 3126{
3127 unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY; 3127 unsigned int nr;
3128 struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode); 3128 struct task_struct *reaper;
3129 struct tgid_iter iter; 3129 struct tgid_iter iter;
3130 struct pid_namespace *ns; 3130 struct pid_namespace *ns;
3131 3131
3132 if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET)
3133 goto out_no_task;
3134 nr = filp->f_pos - FIRST_PROCESS_ENTRY;
3135
3136 reaper = get_proc_task(filp->f_path.dentry->d_inode);
3132 if (!reaper) 3137 if (!reaper)
3133 goto out_no_task; 3138 goto out_no_task;
3134 3139
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 9eead2c796b7..fbb0b478a346 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -112,6 +112,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
112 SetPageDirty(page); 112 SetPageDirty(page);
113 113
114 unlock_page(page); 114 unlock_page(page);
115 put_page(page);
115 } 116 }
116 117
117 return 0; 118 return 0;
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 919f0de29d8f..e6493cac193d 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -23,6 +23,12 @@
23#ifndef __UBIFS_DEBUG_H__ 23#ifndef __UBIFS_DEBUG_H__
24#define __UBIFS_DEBUG_H__ 24#define __UBIFS_DEBUG_H__
25 25
26/* Checking helper functions */
27typedef int (*dbg_leaf_callback)(struct ubifs_info *c,
28 struct ubifs_zbranch *zbr, void *priv);
29typedef int (*dbg_znode_callback)(struct ubifs_info *c,
30 struct ubifs_znode *znode, void *priv);
31
26#ifdef CONFIG_UBIFS_FS_DEBUG 32#ifdef CONFIG_UBIFS_FS_DEBUG
27 33
28/** 34/**
@@ -270,11 +276,6 @@ void dbg_dump_tnc(struct ubifs_info *c);
270void dbg_dump_index(struct ubifs_info *c); 276void dbg_dump_index(struct ubifs_info *c);
271void dbg_dump_lpt_lebs(const struct ubifs_info *c); 277void dbg_dump_lpt_lebs(const struct ubifs_info *c);
272 278
273/* Checking helper functions */
274typedef int (*dbg_leaf_callback)(struct ubifs_info *c,
275 struct ubifs_zbranch *zbr, void *priv);
276typedef int (*dbg_znode_callback)(struct ubifs_info *c,
277 struct ubifs_znode *znode, void *priv);
278int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, 279int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
279 dbg_znode_callback znode_cb, void *priv); 280 dbg_znode_callback znode_cb, void *priv);
280 281
@@ -295,7 +296,6 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size);
295int dbg_check_filesystem(struct ubifs_info *c); 296int dbg_check_filesystem(struct ubifs_info *c);
296void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, 297void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
297 int add_pos); 298 int add_pos);
298int dbg_check_lprops(struct ubifs_info *c);
299int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, 299int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
300 int row, int col); 300 int row, int col);
301int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, 301int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
@@ -401,58 +401,94 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
401#define DBGKEY(key) ((char *)(key)) 401#define DBGKEY(key) ((char *)(key))
402#define DBGKEY1(key) ((char *)(key)) 402#define DBGKEY1(key) ((char *)(key))
403 403
404#define ubifs_debugging_init(c) 0 404static inline int ubifs_debugging_init(struct ubifs_info *c) { return 0; }
405#define ubifs_debugging_exit(c) ({}) 405static inline void ubifs_debugging_exit(struct ubifs_info *c) { return; }
406 406static inline const char *dbg_ntype(int type) { return ""; }
407#define dbg_ntype(type) "" 407static inline const char *dbg_cstate(int cmt_state) { return ""; }
408#define dbg_cstate(cmt_state) "" 408static inline const char *dbg_jhead(int jhead) { return ""; }
409#define dbg_jhead(jhead) "" 409static inline const char *
410#define dbg_get_key_dump(c, key) ({}) 410dbg_get_key_dump(const struct ubifs_info *c,
411#define dbg_dump_inode(c, inode) ({}) 411 const union ubifs_key *key) { return ""; }
412#define dbg_dump_node(c, node) ({}) 412static inline void dbg_dump_inode(const struct ubifs_info *c,
413#define dbg_dump_lpt_node(c, node, lnum, offs) ({}) 413 const struct inode *inode) { return; }
414#define dbg_dump_budget_req(req) ({}) 414static inline void dbg_dump_node(const struct ubifs_info *c,
415#define dbg_dump_lstats(lst) ({}) 415 const void *node) { return; }
416#define dbg_dump_budg(c) ({}) 416static inline void dbg_dump_lpt_node(const struct ubifs_info *c,
417#define dbg_dump_lprop(c, lp) ({}) 417 void *node, int lnum,
418#define dbg_dump_lprops(c) ({}) 418 int offs) { return; }
419#define dbg_dump_lpt_info(c) ({}) 419static inline void
420#define dbg_dump_leb(c, lnum) ({}) 420dbg_dump_budget_req(const struct ubifs_budget_req *req) { return; }
421#define dbg_dump_znode(c, znode) ({}) 421static inline void
422#define dbg_dump_heap(c, heap, cat) ({}) 422dbg_dump_lstats(const struct ubifs_lp_stats *lst) { return; }
423#define dbg_dump_pnode(c, pnode, parent, iip) ({}) 423static inline void dbg_dump_budg(struct ubifs_info *c) { return; }
424#define dbg_dump_tnc(c) ({}) 424static inline void dbg_dump_lprop(const struct ubifs_info *c,
425#define dbg_dump_index(c) ({}) 425 const struct ubifs_lprops *lp) { return; }
426#define dbg_dump_lpt_lebs(c) ({}) 426static inline void dbg_dump_lprops(struct ubifs_info *c) { return; }
427 427static inline void dbg_dump_lpt_info(struct ubifs_info *c) { return; }
428#define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0 428static inline void dbg_dump_leb(const struct ubifs_info *c,
429#define dbg_old_index_check_init(c, zroot) 0 429 int lnum) { return; }
430#define dbg_save_space_info(c) ({}) 430static inline void
431#define dbg_check_space_info(c) 0 431dbg_dump_znode(const struct ubifs_info *c,
432#define dbg_check_old_index(c, zroot) 0 432 const struct ubifs_znode *znode) { return; }
433#define dbg_check_cats(c) 0 433static inline void dbg_dump_heap(struct ubifs_info *c,
434#define dbg_check_ltab(c) 0 434 struct ubifs_lpt_heap *heap,
435#define dbg_chk_lpt_free_spc(c) 0 435 int cat) { return; }
436#define dbg_chk_lpt_sz(c, action, len) 0 436static inline void dbg_dump_pnode(struct ubifs_info *c,
437#define dbg_check_synced_i_size(inode) 0 437 struct ubifs_pnode *pnode,
438#define dbg_check_dir_size(c, dir) 0 438 struct ubifs_nnode *parent,
439#define dbg_check_tnc(c, x) 0 439 int iip) { return; }
440#define dbg_check_idx_size(c, idx_size) 0 440static inline void dbg_dump_tnc(struct ubifs_info *c) { return; }
441#define dbg_check_filesystem(c) 0 441static inline void dbg_dump_index(struct ubifs_info *c) { return; }
442#define dbg_check_heap(c, heap, cat, add_pos) ({}) 442static inline void dbg_dump_lpt_lebs(const struct ubifs_info *c) { return; }
443#define dbg_check_lprops(c) 0 443
444#define dbg_check_lpt_nodes(c, cnode, row, col) 0 444static inline int dbg_walk_index(struct ubifs_info *c,
445#define dbg_check_inode_size(c, inode, size) 0 445 dbg_leaf_callback leaf_cb,
446#define dbg_check_data_nodes_order(c, head) 0 446 dbg_znode_callback znode_cb,
447#define dbg_check_nondata_nodes_order(c, head) 0 447 void *priv) { return 0; }
448#define dbg_force_in_the_gaps_enabled 0 448static inline void dbg_save_space_info(struct ubifs_info *c) { return; }
449#define dbg_force_in_the_gaps() 0 449static inline int dbg_check_space_info(struct ubifs_info *c) { return 0; }
450#define dbg_failure_mode 0 450static inline int dbg_check_lprops(struct ubifs_info *c) { return 0; }
451 451static inline int
452#define dbg_debugfs_init() 0 452dbg_old_index_check_init(struct ubifs_info *c,
453#define dbg_debugfs_exit() 453 struct ubifs_zbranch *zroot) { return 0; }
454#define dbg_debugfs_init_fs(c) 0 454static inline int
455#define dbg_debugfs_exit_fs(c) 0 455dbg_check_old_index(struct ubifs_info *c,
456 struct ubifs_zbranch *zroot) { return 0; }
457static inline int dbg_check_cats(struct ubifs_info *c) { return 0; }
458static inline int dbg_check_ltab(struct ubifs_info *c) { return 0; }
459static inline int dbg_chk_lpt_free_spc(struct ubifs_info *c) { return 0; }
460static inline int dbg_chk_lpt_sz(struct ubifs_info *c,
461 int action, int len) { return 0; }
462static inline int dbg_check_synced_i_size(struct inode *inode) { return 0; }
463static inline int dbg_check_dir_size(struct ubifs_info *c,
464 const struct inode *dir) { return 0; }
465static inline int dbg_check_tnc(struct ubifs_info *c, int extra) { return 0; }
466static inline int dbg_check_idx_size(struct ubifs_info *c,
467 long long idx_size) { return 0; }
468static inline int dbg_check_filesystem(struct ubifs_info *c) { return 0; }
469static inline void dbg_check_heap(struct ubifs_info *c,
470 struct ubifs_lpt_heap *heap,
471 int cat, int add_pos) { return; }
472static inline int dbg_check_lpt_nodes(struct ubifs_info *c,
473 struct ubifs_cnode *cnode, int row, int col) { return 0; }
474static inline int dbg_check_inode_size(struct ubifs_info *c,
475 const struct inode *inode,
476 loff_t size) { return 0; }
477static inline int
478dbg_check_data_nodes_order(struct ubifs_info *c,
479 struct list_head *head) { return 0; }
480static inline int
481dbg_check_nondata_nodes_order(struct ubifs_info *c,
482 struct list_head *head) { return 0; }
483
484static inline int dbg_force_in_the_gaps(void) { return 0; }
485#define dbg_force_in_the_gaps_enabled 0
486#define dbg_failure_mode 0
487
488static inline int dbg_debugfs_init(void) { return 0; }
489static inline void dbg_debugfs_exit(void) { return; }
490static inline int dbg_debugfs_init_fs(struct ubifs_info *c) { return 0; }
491static inline int dbg_debugfs_exit_fs(struct ubifs_info *c) { return 0; }
456 492
457#endif /* !CONFIG_UBIFS_FS_DEBUG */ 493#endif /* !CONFIG_UBIFS_FS_DEBUG */
458#endif /* !__UBIFS_DEBUG_H__ */ 494#endif /* !__UBIFS_DEBUG_H__ */
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 28be1e6a65e8..b286db79c686 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1312,6 +1312,9 @@ int ubifs_fsync(struct file *file, int datasync)
1312 1312
1313 dbg_gen("syncing inode %lu", inode->i_ino); 1313 dbg_gen("syncing inode %lu", inode->i_ino);
1314 1314
1315 if (inode->i_sb->s_flags & MS_RDONLY)
1316 return 0;
1317
1315 /* 1318 /*
1316 * VFS has already synchronized dirty pages for this inode. Synchronize 1319 * VFS has already synchronized dirty pages for this inode. Synchronize
1317 * the inode unless this is a 'datasync()' call. 1320 * the inode unless this is a 'datasync()' call.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 32176cc8e715..cbbfd98ad4a3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -697,7 +697,7 @@ extern void blk_start_queue(struct request_queue *q);
697extern void blk_stop_queue(struct request_queue *q); 697extern void blk_stop_queue(struct request_queue *q);
698extern void blk_sync_queue(struct request_queue *q); 698extern void blk_sync_queue(struct request_queue *q);
699extern void __blk_stop_queue(struct request_queue *q); 699extern void __blk_stop_queue(struct request_queue *q);
700extern void __blk_run_queue(struct request_queue *q, bool force_kblockd); 700extern void __blk_run_queue(struct request_queue *q);
701extern void blk_run_queue(struct request_queue *); 701extern void blk_run_queue(struct request_queue *);
702extern int blk_rq_map_user(struct request_queue *, struct request *, 702extern int blk_rq_map_user(struct request_queue *, struct request *,
703 struct rq_map_data *, void __user *, unsigned long, 703 struct rq_map_data *, void __user *, unsigned long,
@@ -857,26 +857,39 @@ extern void blk_put_queue(struct request_queue *);
857struct blk_plug { 857struct blk_plug {
858 unsigned long magic; 858 unsigned long magic;
859 struct list_head list; 859 struct list_head list;
860 struct list_head cb_list;
860 unsigned int should_sort; 861 unsigned int should_sort;
861}; 862};
863struct blk_plug_cb {
864 struct list_head list;
865 void (*callback)(struct blk_plug_cb *);
866};
862 867
863extern void blk_start_plug(struct blk_plug *); 868extern void blk_start_plug(struct blk_plug *);
864extern void blk_finish_plug(struct blk_plug *); 869extern void blk_finish_plug(struct blk_plug *);
865extern void __blk_flush_plug(struct task_struct *, struct blk_plug *); 870extern void blk_flush_plug_list(struct blk_plug *, bool);
866 871
867static inline void blk_flush_plug(struct task_struct *tsk) 872static inline void blk_flush_plug(struct task_struct *tsk)
868{ 873{
869 struct blk_plug *plug = tsk->plug; 874 struct blk_plug *plug = tsk->plug;
870 875
871 if (unlikely(plug)) 876 if (plug)
872 __blk_flush_plug(tsk, plug); 877 blk_flush_plug_list(plug, false);
878}
879
880static inline void blk_schedule_flush_plug(struct task_struct *tsk)
881{
882 struct blk_plug *plug = tsk->plug;
883
884 if (plug)
885 blk_flush_plug_list(plug, true);
873} 886}
874 887
875static inline bool blk_needs_flush_plug(struct task_struct *tsk) 888static inline bool blk_needs_flush_plug(struct task_struct *tsk)
876{ 889{
877 struct blk_plug *plug = tsk->plug; 890 struct blk_plug *plug = tsk->plug;
878 891
879 return plug && !list_empty(&plug->list); 892 return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
880} 893}
881 894
882/* 895/*
@@ -1314,6 +1327,11 @@ static inline void blk_flush_plug(struct task_struct *task)
1314{ 1327{
1315} 1328}
1316 1329
1330static inline void blk_schedule_flush_plug(struct task_struct *task)
1331{
1332}
1333
1334
1317static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1335static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1318{ 1336{
1319 return false; 1337 return false;
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index e2768834f397..32a4423710f5 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -197,7 +197,6 @@ struct dm_target {
197struct dm_target_callbacks { 197struct dm_target_callbacks {
198 struct list_head list; 198 struct list_head list;
199 int (*congested_fn) (struct dm_target_callbacks *, int); 199 int (*congested_fn) (struct dm_target_callbacks *, int);
200 void (*unplug_fn)(struct dm_target_callbacks *);
201}; 200};
202 201
203int dm_register_target(struct target_type *t); 202int dm_register_target(struct target_type *t);
diff --git a/include/linux/input.h b/include/linux/input.h
index f3a7794a18c4..771d6d85667d 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -167,6 +167,7 @@ struct input_keymap_entry {
167#define SYN_REPORT 0 167#define SYN_REPORT 0
168#define SYN_CONFIG 1 168#define SYN_CONFIG 1
169#define SYN_MT_REPORT 2 169#define SYN_MT_REPORT 2
170#define SYN_DROPPED 3
170 171
171/* 172/*
172 * Keys and buttons 173 * Keys and buttons
@@ -553,8 +554,8 @@ struct input_keymap_entry {
553#define KEY_DVD 0x185 /* Media Select DVD */ 554#define KEY_DVD 0x185 /* Media Select DVD */
554#define KEY_AUX 0x186 555#define KEY_AUX 0x186
555#define KEY_MP3 0x187 556#define KEY_MP3 0x187
556#define KEY_AUDIO 0x188 557#define KEY_AUDIO 0x188 /* AL Audio Browser */
557#define KEY_VIDEO 0x189 558#define KEY_VIDEO 0x189 /* AL Movie Browser */
558#define KEY_DIRECTORY 0x18a 559#define KEY_DIRECTORY 0x18a
559#define KEY_LIST 0x18b 560#define KEY_LIST 0x18b
560#define KEY_MEMO 0x18c /* Media Select Messages */ 561#define KEY_MEMO 0x18c /* Media Select Messages */
@@ -603,8 +604,9 @@ struct input_keymap_entry {
603#define KEY_FRAMEFORWARD 0x1b5 604#define KEY_FRAMEFORWARD 0x1b5
604#define KEY_CONTEXT_MENU 0x1b6 /* GenDesc - system context menu */ 605#define KEY_CONTEXT_MENU 0x1b6 /* GenDesc - system context menu */
605#define KEY_MEDIA_REPEAT 0x1b7 /* Consumer - transport control */ 606#define KEY_MEDIA_REPEAT 0x1b7 /* Consumer - transport control */
606#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */ 607#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */
607#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */ 608#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */
609#define KEY_IMAGES 0x1ba /* AL Image Browser */
608 610
609#define KEY_DEL_EOL 0x1c0 611#define KEY_DEL_EOL 0x1c0
610#define KEY_DEL_EOS 0x1c1 612#define KEY_DEL_EOS 0x1c1
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index b3ac06a4435d..318bb82325a6 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -48,6 +48,12 @@ static inline void input_mt_slot(struct input_dev *dev, int slot)
48 input_event(dev, EV_ABS, ABS_MT_SLOT, slot); 48 input_event(dev, EV_ABS, ABS_MT_SLOT, slot);
49} 49}
50 50
51static inline bool input_is_mt_axis(int axis)
52{
53 return axis == ABS_MT_SLOT ||
54 (axis >= ABS_MT_FIRST && axis <= ABS_MT_LAST);
55}
56
51void input_mt_report_slot_state(struct input_dev *dev, 57void input_mt_report_slot_state(struct input_dev *dev,
52 unsigned int tool_type, bool active); 58 unsigned int tool_type, bool active);
53 59
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 5a5ce7055839..5e9840f50980 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -216,7 +216,7 @@ static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
216 return ; 216 return ;
217} 217}
218 218
219static inline inline void mem_cgroup_rotate_reclaimable_page(struct page *page) 219static inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
220{ 220{
221 return ; 221 return ;
222} 222}
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 31afb7ecbe1f..cdced84261d7 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -117,7 +117,7 @@ extern struct pid *find_vpid(int nr);
117 */ 117 */
118extern struct pid *find_get_pid(int nr); 118extern struct pid *find_get_pid(int nr);
119extern struct pid *find_ge_pid(int nr, struct pid_namespace *); 119extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
120int next_pidmap(struct pid_namespace *pid_ns, int last); 120int next_pidmap(struct pid_namespace *pid_ns, unsigned int last);
121 121
122extern struct pid *alloc_pid(struct pid_namespace *ns); 122extern struct pid *alloc_pid(struct pid_namespace *ns);
123extern void free_pid(struct pid *pid); 123extern void free_pid(struct pid *pid);
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 4e37a7cfa726..4d50611112ba 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -396,7 +396,7 @@ union rio_pw_msg {
396}; 396};
397 397
398/* Architecture and hardware-specific functions */ 398/* Architecture and hardware-specific functions */
399extern void rio_register_mport(struct rio_mport *); 399extern int rio_register_mport(struct rio_mport *);
400extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); 400extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
401extern void rio_close_inb_mbox(struct rio_mport *, int); 401extern void rio_close_inb_mbox(struct rio_mport *, int);
402extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int); 402extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int);
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index 7410d3365e2a..0cee0152aca9 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -35,6 +35,7 @@
35#define RIO_DID_IDTCPS6Q 0x035f 35#define RIO_DID_IDTCPS6Q 0x035f
36#define RIO_DID_IDTCPS10Q 0x035e 36#define RIO_DID_IDTCPS10Q 0x035e
37#define RIO_DID_IDTCPS1848 0x0374 37#define RIO_DID_IDTCPS1848 0x0374
38#define RIO_DID_IDTCPS1432 0x0375
38#define RIO_DID_IDTCPS1616 0x0379 39#define RIO_DID_IDTCPS1616 0x0379
39#define RIO_DID_IDTVPS1616 0x0377 40#define RIO_DID_IDTVPS1616 0x0377
40#define RIO_DID_IDTSPS1616 0x0378 41#define RIO_DID_IDTSPS1616 0x0378
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 2ca7e8a78060..877ece45426f 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -228,6 +228,8 @@ extern int rtc_read_alarm(struct rtc_device *rtc,
228 struct rtc_wkalrm *alrm); 228 struct rtc_wkalrm *alrm);
229extern int rtc_set_alarm(struct rtc_device *rtc, 229extern int rtc_set_alarm(struct rtc_device *rtc,
230 struct rtc_wkalrm *alrm); 230 struct rtc_wkalrm *alrm);
231extern int rtc_initialize_alarm(struct rtc_device *rtc,
232 struct rtc_wkalrm *alrm);
231extern void rtc_update_irq(struct rtc_device *rtc, 233extern void rtc_update_irq(struct rtc_device *rtc,
232 unsigned long num, unsigned long events); 234 unsigned long num, unsigned long events);
233 235
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d9ca3aa511ff..171ba24b08a7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1247,6 +1247,9 @@ struct task_struct {
1247#endif 1247#endif
1248 1248
1249 struct mm_struct *mm, *active_mm; 1249 struct mm_struct *mm, *active_mm;
1250#ifdef CONFIG_COMPAT_BRK
1251 unsigned brk_randomized:1;
1252#endif
1250#if defined(SPLIT_RSS_COUNTING) 1253#if defined(SPLIT_RSS_COUNTING)
1251 struct task_rss_stat rss_stat; 1254 struct task_rss_stat rss_stat;
1252#endif 1255#endif
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 461c0119664f..2b3831b58aa4 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -58,6 +58,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
58 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ 58 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
59 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ 59 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
60 UNEVICTABLE_MLOCKFREED, 60 UNEVICTABLE_MLOCKFREED,
61#ifdef CONFIG_TRANSPARENT_HUGEPAGE
62 THP_FAULT_ALLOC,
63 THP_FAULT_FALLBACK,
64 THP_COLLAPSE_ALLOC,
65 THP_COLLAPSE_ALLOC_FAILED,
66 THP_SPLIT,
67#endif
61 NR_VM_EVENT_ITEMS 68 NR_VM_EVENT_ITEMS
62}; 69};
63 70
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index cdf2e8ac4309..d2df55b0c213 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -139,8 +139,6 @@ do { \
139 */ 139 */
140 140
141enum p9_msg_t { 141enum p9_msg_t {
142 P9_TSYNCFS = 0,
143 P9_RSYNCFS,
144 P9_TLERROR = 6, 142 P9_TLERROR = 6,
145 P9_RLERROR, 143 P9_RLERROR,
146 P9_TSTATFS = 8, 144 P9_TSTATFS = 8,
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 85c1413f054d..051a99f79769 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -218,8 +218,8 @@ void p9_client_disconnect(struct p9_client *clnt);
218void p9_client_begin_disconnect(struct p9_client *clnt); 218void p9_client_begin_disconnect(struct p9_client *clnt);
219struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, 219struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
220 char *uname, u32 n_uname, char *aname); 220 char *uname, u32 n_uname, char *aname);
221struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, 221struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
222 int clone); 222 char **wnames, int clone);
223int p9_client_open(struct p9_fid *fid, int mode); 223int p9_client_open(struct p9_fid *fid, int mode);
224int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, 224int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
225 char *extension); 225 char *extension);
@@ -230,7 +230,6 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
230 gid_t gid, struct p9_qid *qid); 230 gid_t gid, struct p9_qid *qid);
231int p9_client_clunk(struct p9_fid *fid); 231int p9_client_clunk(struct p9_fid *fid);
232int p9_client_fsync(struct p9_fid *fid, int datasync); 232int p9_client_fsync(struct p9_fid *fid, int datasync);
233int p9_client_sync_fs(struct p9_fid *fid);
234int p9_client_remove(struct p9_fid *fid); 233int p9_client_remove(struct p9_fid *fid);
235int p9_client_read(struct p9_fid *fid, char *data, char __user *udata, 234int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
236 u64 offset, u32 count); 235 u64 offset, u32 count);
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 78f18adb49c8..bf366547da25 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -401,9 +401,9 @@ TRACE_EVENT(block_plug,
401 401
402DECLARE_EVENT_CLASS(block_unplug, 402DECLARE_EVENT_CLASS(block_unplug,
403 403
404 TP_PROTO(struct request_queue *q), 404 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
405 405
406 TP_ARGS(q), 406 TP_ARGS(q, depth, explicit),
407 407
408 TP_STRUCT__entry( 408 TP_STRUCT__entry(
409 __field( int, nr_rq ) 409 __field( int, nr_rq )
@@ -411,7 +411,7 @@ DECLARE_EVENT_CLASS(block_unplug,
411 ), 411 ),
412 412
413 TP_fast_assign( 413 TP_fast_assign(
414 __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; 414 __entry->nr_rq = depth;
415 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 415 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
416 ), 416 ),
417 417
@@ -419,31 +419,19 @@ DECLARE_EVENT_CLASS(block_unplug,
419); 419);
420 420
421/** 421/**
422 * block_unplug_timer - timed release of operations requests in queue to device driver 422 * block_unplug - release of operations requests in request queue
423 * @q: request queue to unplug
424 *
425 * Unplug the request queue @q because a timer expired and allow block
426 * operation requests to be sent to the device driver.
427 */
428DEFINE_EVENT(block_unplug, block_unplug_timer,
429
430 TP_PROTO(struct request_queue *q),
431
432 TP_ARGS(q)
433);
434
435/**
436 * block_unplug_io - release of operations requests in request queue
437 * @q: request queue to unplug 423 * @q: request queue to unplug
424 * @depth: number of requests just added to the queue
425 * @explicit: whether this was an explicit unplug, or one from schedule()
438 * 426 *
439 * Unplug request queue @q because device driver is scheduled to work 427 * Unplug request queue @q because device driver is scheduled to work
440 * on elements in the request queue. 428 * on elements in the request queue.
441 */ 429 */
442DEFINE_EVENT(block_unplug, block_unplug_io, 430DEFINE_EVENT(block_unplug, block_unplug,
443 431
444 TP_PROTO(struct request_queue *q), 432 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
445 433
446 TP_ARGS(q) 434 TP_ARGS(q, depth, explicit)
447); 435);
448 436
449/** 437/**
diff --git a/kernel/futex.c b/kernel/futex.c
index dfb924ffe65b..fe28dc282eae 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1886,7 +1886,7 @@ retry:
1886 restart->futex.val = val; 1886 restart->futex.val = val;
1887 restart->futex.time = abs_time->tv64; 1887 restart->futex.time = abs_time->tv64;
1888 restart->futex.bitset = bitset; 1888 restart->futex.bitset = bitset;
1889 restart->futex.flags = flags; 1889 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
1890 1890
1891 ret = -ERESTART_RESTARTBLOCK; 1891 ret = -ERESTART_RESTARTBLOCK;
1892 1892
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 27960f114efd..8e81a9860a0d 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -364,6 +364,7 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
364 } 364 }
365 365
366 if (mode & PERF_CGROUP_SWIN) { 366 if (mode & PERF_CGROUP_SWIN) {
367 WARN_ON_ONCE(cpuctx->cgrp);
367 /* set cgrp before ctxsw in to 368 /* set cgrp before ctxsw in to
368 * allow event_filter_match() to not 369 * allow event_filter_match() to not
369 * have to pass task around 370 * have to pass task around
@@ -2423,6 +2424,14 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2423 if (!ctx || !ctx->nr_events) 2424 if (!ctx || !ctx->nr_events)
2424 goto out; 2425 goto out;
2425 2426
2427 /*
2428 * We must ctxsw out cgroup events to avoid conflict
2429 * when invoking perf_task_event_sched_in() later on
2430 * in this function. Otherwise we end up trying to
2431 * ctxswin cgroup events which are already scheduled
2432 * in.
2433 */
2434 perf_cgroup_sched_out(current);
2426 task_ctx_sched_out(ctx, EVENT_ALL); 2435 task_ctx_sched_out(ctx, EVENT_ALL);
2427 2436
2428 raw_spin_lock(&ctx->lock); 2437 raw_spin_lock(&ctx->lock);
@@ -2447,6 +2456,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2447 2456
2448 raw_spin_unlock(&ctx->lock); 2457 raw_spin_unlock(&ctx->lock);
2449 2458
2459 /*
2460 * Also calls ctxswin for cgroup events, if any:
2461 */
2450 perf_event_context_sched_in(ctx, ctx->task); 2462 perf_event_context_sched_in(ctx, ctx->task);
2451out: 2463out:
2452 local_irq_restore(flags); 2464 local_irq_restore(flags);
diff --git a/kernel/pid.c b/kernel/pid.c
index 02f221274265..57a8346a270e 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -217,11 +217,14 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
217 return -1; 217 return -1;
218} 218}
219 219
220int next_pidmap(struct pid_namespace *pid_ns, int last) 220int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
221{ 221{
222 int offset; 222 int offset;
223 struct pidmap *map, *end; 223 struct pidmap *map, *end;
224 224
225 if (last >= PID_MAX_LIMIT)
226 return -1;
227
225 offset = (last + 1) & BITS_PER_PAGE_MASK; 228 offset = (last + 1) & BITS_PER_PAGE_MASK;
226 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; 229 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
227 end = &pid_ns->pidmap[PIDMAP_ENTRIES]; 230 end = &pid_ns->pidmap[PIDMAP_ENTRIES];
diff --git a/kernel/sched.c b/kernel/sched.c
index 27d3e73a2af6..30a29ad46eb1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4218,7 +4218,7 @@ need_resched:
4218 */ 4218 */
4219 if (blk_needs_flush_plug(prev)) { 4219 if (blk_needs_flush_plug(prev)) {
4220 raw_spin_unlock(&rq->lock); 4220 raw_spin_unlock(&rq->lock);
4221 blk_flush_plug(prev); 4221 blk_schedule_flush_plug(prev);
4222 raw_spin_lock(&rq->lock); 4222 raw_spin_lock(&rq->lock);
4223 } 4223 }
4224 } 4224 }
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 7aa40f8e182d..6957aa298dfa 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -850,29 +850,21 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); 850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
851} 851}
852 852
853static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q) 853static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
854 unsigned int depth, bool explicit)
854{ 855{
855 struct blk_trace *bt = q->blk_trace; 856 struct blk_trace *bt = q->blk_trace;
856 857
857 if (bt) { 858 if (bt) {
858 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; 859 __be64 rpdu = cpu_to_be64(depth);
859 __be64 rpdu = cpu_to_be64(pdu); 860 u32 what;
860 861
861 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, 862 if (explicit)
862 sizeof(rpdu), &rpdu); 863 what = BLK_TA_UNPLUG_IO;
863 } 864 else
864} 865 what = BLK_TA_UNPLUG_TIMER;
865
866static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q)
867{
868 struct blk_trace *bt = q->blk_trace;
869
870 if (bt) {
871 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
872 __be64 rpdu = cpu_to_be64(pdu);
873 866
874 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0, 867 __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
875 sizeof(rpdu), &rpdu);
876 } 868 }
877} 869}
878 870
@@ -1015,9 +1007,7 @@ static void blk_register_tracepoints(void)
1015 WARN_ON(ret); 1007 WARN_ON(ret);
1016 ret = register_trace_block_plug(blk_add_trace_plug, NULL); 1008 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1017 WARN_ON(ret); 1009 WARN_ON(ret);
1018 ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); 1010 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1019 WARN_ON(ret);
1020 ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
1021 WARN_ON(ret); 1011 WARN_ON(ret);
1022 ret = register_trace_block_split(blk_add_trace_split, NULL); 1012 ret = register_trace_block_split(blk_add_trace_split, NULL);
1023 WARN_ON(ret); 1013 WARN_ON(ret);
@@ -1032,8 +1022,7 @@ static void blk_unregister_tracepoints(void)
1032 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); 1022 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1033 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1023 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1034 unregister_trace_block_split(blk_add_trace_split, NULL); 1024 unregister_trace_block_split(blk_add_trace_split, NULL);
1035 unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); 1025 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1036 unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
1037 unregister_trace_block_plug(blk_add_trace_plug, NULL); 1026 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1038 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); 1027 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1039 unregister_trace_block_getrq(blk_add_trace_getrq, NULL); 1028 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 05672e819f8c..a235f3cc471c 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -49,12 +49,9 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
49 val = *s - '0'; 49 val = *s - '0';
50 else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f') 50 else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f')
51 val = _tolower(*s) - 'a' + 10; 51 val = _tolower(*s) - 'a' + 10;
52 else if (*s == '\n') { 52 else if (*s == '\n' && *(s + 1) == '\0')
53 if (*(s + 1) == '\0') 53 break;
54 break; 54 else
55 else
56 return -EINVAL;
57 } else
58 return -EINVAL; 55 return -EINVAL;
59 56
60 if (val >= base) 57 if (val >= base)
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c
index 325c2f9ecebd..d55769d63cb8 100644
--- a/lib/test-kstrtox.c
+++ b/lib/test-kstrtox.c
@@ -315,12 +315,12 @@ static void __init test_kstrtou64_ok(void)
315 {"65537", 10, 65537}, 315 {"65537", 10, 65537},
316 {"2147483646", 10, 2147483646}, 316 {"2147483646", 10, 2147483646},
317 {"2147483647", 10, 2147483647}, 317 {"2147483647", 10, 2147483647},
318 {"2147483648", 10, 2147483648}, 318 {"2147483648", 10, 2147483648ULL},
319 {"2147483649", 10, 2147483649}, 319 {"2147483649", 10, 2147483649ULL},
320 {"4294967294", 10, 4294967294}, 320 {"4294967294", 10, 4294967294ULL},
321 {"4294967295", 10, 4294967295}, 321 {"4294967295", 10, 4294967295ULL},
322 {"4294967296", 10, 4294967296}, 322 {"4294967296", 10, 4294967296ULL},
323 {"4294967297", 10, 4294967297}, 323 {"4294967297", 10, 4294967297ULL},
324 {"9223372036854775806", 10, 9223372036854775806ULL}, 324 {"9223372036854775806", 10, 9223372036854775806ULL},
325 {"9223372036854775807", 10, 9223372036854775807ULL}, 325 {"9223372036854775807", 10, 9223372036854775807ULL},
326 {"9223372036854775808", 10, 9223372036854775808ULL}, 326 {"9223372036854775808", 10, 9223372036854775808ULL},
@@ -369,12 +369,12 @@ static void __init test_kstrtos64_ok(void)
369 {"65537", 10, 65537}, 369 {"65537", 10, 65537},
370 {"2147483646", 10, 2147483646}, 370 {"2147483646", 10, 2147483646},
371 {"2147483647", 10, 2147483647}, 371 {"2147483647", 10, 2147483647},
372 {"2147483648", 10, 2147483648}, 372 {"2147483648", 10, 2147483648LL},
373 {"2147483649", 10, 2147483649}, 373 {"2147483649", 10, 2147483649LL},
374 {"4294967294", 10, 4294967294}, 374 {"4294967294", 10, 4294967294LL},
375 {"4294967295", 10, 4294967295}, 375 {"4294967295", 10, 4294967295LL},
376 {"4294967296", 10, 4294967296}, 376 {"4294967296", 10, 4294967296LL},
377 {"4294967297", 10, 4294967297}, 377 {"4294967297", 10, 4294967297LL},
378 {"9223372036854775806", 10, 9223372036854775806LL}, 378 {"9223372036854775806", 10, 9223372036854775806LL},
379 {"9223372036854775807", 10, 9223372036854775807LL}, 379 {"9223372036854775807", 10, 9223372036854775807LL},
380 }; 380 };
@@ -418,10 +418,10 @@ static void __init test_kstrtou32_ok(void)
418 {"65537", 10, 65537}, 418 {"65537", 10, 65537},
419 {"2147483646", 10, 2147483646}, 419 {"2147483646", 10, 2147483646},
420 {"2147483647", 10, 2147483647}, 420 {"2147483647", 10, 2147483647},
421 {"2147483648", 10, 2147483648}, 421 {"2147483648", 10, 2147483648U},
422 {"2147483649", 10, 2147483649}, 422 {"2147483649", 10, 2147483649U},
423 {"4294967294", 10, 4294967294}, 423 {"4294967294", 10, 4294967294U},
424 {"4294967295", 10, 4294967295}, 424 {"4294967295", 10, 4294967295U},
425 }; 425 };
426 TEST_OK(kstrtou32, u32, "%u", test_u32_ok); 426 TEST_OK(kstrtou32, u32, "%u", test_u32_ok);
427} 427}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0a619e0e2e0b..470dcda10add 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -244,24 +244,28 @@ static ssize_t single_flag_show(struct kobject *kobj,
244 struct kobj_attribute *attr, char *buf, 244 struct kobj_attribute *attr, char *buf,
245 enum transparent_hugepage_flag flag) 245 enum transparent_hugepage_flag flag)
246{ 246{
247 if (test_bit(flag, &transparent_hugepage_flags)) 247 return sprintf(buf, "%d\n",
248 return sprintf(buf, "[yes] no\n"); 248 !!test_bit(flag, &transparent_hugepage_flags));
249 else
250 return sprintf(buf, "yes [no]\n");
251} 249}
250
252static ssize_t single_flag_store(struct kobject *kobj, 251static ssize_t single_flag_store(struct kobject *kobj,
253 struct kobj_attribute *attr, 252 struct kobj_attribute *attr,
254 const char *buf, size_t count, 253 const char *buf, size_t count,
255 enum transparent_hugepage_flag flag) 254 enum transparent_hugepage_flag flag)
256{ 255{
257 if (!memcmp("yes", buf, 256 unsigned long value;
258 min(sizeof("yes")-1, count))) { 257 int ret;
258
259 ret = kstrtoul(buf, 10, &value);
260 if (ret < 0)
261 return ret;
262 if (value > 1)
263 return -EINVAL;
264
265 if (value)
259 set_bit(flag, &transparent_hugepage_flags); 266 set_bit(flag, &transparent_hugepage_flags);
260 } else if (!memcmp("no", buf, 267 else
261 min(sizeof("no")-1, count))) {
262 clear_bit(flag, &transparent_hugepage_flags); 268 clear_bit(flag, &transparent_hugepage_flags);
263 } else
264 return -EINVAL;
265 269
266 return count; 270 return count;
267} 271}
@@ -680,8 +684,11 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
680 return VM_FAULT_OOM; 684 return VM_FAULT_OOM;
681 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 685 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
682 vma, haddr, numa_node_id(), 0); 686 vma, haddr, numa_node_id(), 0);
683 if (unlikely(!page)) 687 if (unlikely(!page)) {
688 count_vm_event(THP_FAULT_FALLBACK);
684 goto out; 689 goto out;
690 }
691 count_vm_event(THP_FAULT_ALLOC);
685 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { 692 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
686 put_page(page); 693 put_page(page);
687 goto out; 694 goto out;
@@ -909,11 +916,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
909 new_page = NULL; 916 new_page = NULL;
910 917
911 if (unlikely(!new_page)) { 918 if (unlikely(!new_page)) {
919 count_vm_event(THP_FAULT_FALLBACK);
912 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, 920 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
913 pmd, orig_pmd, page, haddr); 921 pmd, orig_pmd, page, haddr);
914 put_page(page); 922 put_page(page);
915 goto out; 923 goto out;
916 } 924 }
925 count_vm_event(THP_FAULT_ALLOC);
917 926
918 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 927 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
919 put_page(new_page); 928 put_page(new_page);
@@ -1390,6 +1399,7 @@ int split_huge_page(struct page *page)
1390 1399
1391 BUG_ON(!PageSwapBacked(page)); 1400 BUG_ON(!PageSwapBacked(page));
1392 __split_huge_page(page, anon_vma); 1401 __split_huge_page(page, anon_vma);
1402 count_vm_event(THP_SPLIT);
1393 1403
1394 BUG_ON(PageCompound(page)); 1404 BUG_ON(PageCompound(page));
1395out_unlock: 1405out_unlock:
@@ -1784,9 +1794,11 @@ static void collapse_huge_page(struct mm_struct *mm,
1784 node, __GFP_OTHER_NODE); 1794 node, __GFP_OTHER_NODE);
1785 if (unlikely(!new_page)) { 1795 if (unlikely(!new_page)) {
1786 up_read(&mm->mmap_sem); 1796 up_read(&mm->mmap_sem);
1797 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1787 *hpage = ERR_PTR(-ENOMEM); 1798 *hpage = ERR_PTR(-ENOMEM);
1788 return; 1799 return;
1789 } 1800 }
1801 count_vm_event(THP_COLLAPSE_ALLOC);
1790 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1802 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1791 up_read(&mm->mmap_sem); 1803 up_read(&mm->mmap_sem);
1792 put_page(new_page); 1804 put_page(new_page);
@@ -2151,8 +2163,11 @@ static void khugepaged_do_scan(struct page **hpage)
2151#ifndef CONFIG_NUMA 2163#ifndef CONFIG_NUMA
2152 if (!*hpage) { 2164 if (!*hpage) {
2153 *hpage = alloc_hugepage(khugepaged_defrag()); 2165 *hpage = alloc_hugepage(khugepaged_defrag());
2154 if (unlikely(!*hpage)) 2166 if (unlikely(!*hpage)) {
2167 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2155 break; 2168 break;
2169 }
2170 count_vm_event(THP_COLLAPSE_ALLOC);
2156 } 2171 }
2157#else 2172#else
2158 if (IS_ERR(*hpage)) 2173 if (IS_ERR(*hpage))
@@ -2192,8 +2207,11 @@ static struct page *khugepaged_alloc_hugepage(void)
2192 2207
2193 do { 2208 do {
2194 hpage = alloc_hugepage(khugepaged_defrag()); 2209 hpage = alloc_hugepage(khugepaged_defrag());
2195 if (!hpage) 2210 if (!hpage) {
2211 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2196 khugepaged_alloc_sleep(); 2212 khugepaged_alloc_sleep();
2213 } else
2214 count_vm_event(THP_COLLAPSE_ALLOC);
2197 } while (unlikely(!hpage) && 2215 } while (unlikely(!hpage) &&
2198 likely(khugepaged_enabled())); 2216 likely(khugepaged_enabled()));
2199 return hpage; 2217 return hpage;
@@ -2210,8 +2228,11 @@ static void khugepaged_loop(void)
2210 while (likely(khugepaged_enabled())) { 2228 while (likely(khugepaged_enabled())) {
2211#ifndef CONFIG_NUMA 2229#ifndef CONFIG_NUMA
2212 hpage = khugepaged_alloc_hugepage(); 2230 hpage = khugepaged_alloc_hugepage();
2213 if (unlikely(!hpage)) 2231 if (unlikely(!hpage)) {
2232 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2214 break; 2233 break;
2234 }
2235 count_vm_event(THP_COLLAPSE_ALLOC);
2215#else 2236#else
2216 if (IS_ERR(hpage)) { 2237 if (IS_ERR(hpage)) {
2217 khugepaged_alloc_sleep(); 2238 khugepaged_alloc_sleep();
diff --git a/mm/memory.c b/mm/memory.c
index b623a249918c..ce22a250926f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3688,7 +3688,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3688 */ 3688 */
3689#ifdef CONFIG_HAVE_IOREMAP_PROT 3689#ifdef CONFIG_HAVE_IOREMAP_PROT
3690 vma = find_vma(mm, addr); 3690 vma = find_vma(mm, addr);
3691 if (!vma) 3691 if (!vma || vma->vm_start > addr)
3692 break; 3692 break;
3693 if (vma->vm_ops && vma->vm_ops->access) 3693 if (vma->vm_ops && vma->vm_ops->access)
3694 ret = vma->vm_ops->access(vma, addr, buf, 3694 ret = vma->vm_ops->access(vma, addr, buf,
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a2acaf820fe5..9ca1d604f7cd 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -375,7 +375,7 @@ void online_page(struct page *page)
375#endif 375#endif
376 376
377#ifdef CONFIG_FLATMEM 377#ifdef CONFIG_FLATMEM
378 max_mapnr = max(page_to_pfn(page), max_mapnr); 378 max_mapnr = max(pfn, max_mapnr);
379#endif 379#endif
380 380
381 ClearPageReserved(page); 381 ClearPageReserved(page);
diff --git a/mm/mmap.c b/mm/mmap.c
index 8c05e5b43b69..e27e0cf0de03 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -259,7 +259,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
259 * randomize_va_space to 2, which will still cause mm->start_brk 259 * randomize_va_space to 2, which will still cause mm->start_brk
260 * to be arbitrarily shifted 260 * to be arbitrarily shifted
261 */ 261 */
262 if (mm->start_brk > PAGE_ALIGN(mm->end_data)) 262 if (current->brk_randomized)
263 min_brk = mm->start_brk; 263 min_brk = mm->start_brk;
264 else 264 else
265 min_brk = mm->end_data; 265 min_brk = mm->end_data;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 6a819d1b2c7d..83fb72c108b7 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -84,24 +84,6 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
84#endif /* CONFIG_NUMA */ 84#endif /* CONFIG_NUMA */
85 85
86/* 86/*
87 * If this is a system OOM (not a memcg OOM) and the task selected to be
88 * killed is not already running at high (RT) priorities, speed up the
89 * recovery by boosting the dying task to the lowest FIFO priority.
90 * That helps with the recovery and avoids interfering with RT tasks.
91 */
92static void boost_dying_task_prio(struct task_struct *p,
93 struct mem_cgroup *mem)
94{
95 struct sched_param param = { .sched_priority = 1 };
96
97 if (mem)
98 return;
99
100 if (!rt_task(p))
101 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
102}
103
104/*
105 * The process p may have detached its own ->mm while exiting or through 87 * The process p may have detached its own ->mm while exiting or through
106 * use_mm(), but one or more of its subthreads may still have a valid 88 * use_mm(), but one or more of its subthreads may still have a valid
107 * pointer. Return p, or any of its subthreads with a valid ->mm, with 89 * pointer. Return p, or any of its subthreads with a valid ->mm, with
@@ -452,13 +434,6 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
452 set_tsk_thread_flag(p, TIF_MEMDIE); 434 set_tsk_thread_flag(p, TIF_MEMDIE);
453 force_sig(SIGKILL, p); 435 force_sig(SIGKILL, p);
454 436
455 /*
456 * We give our sacrificial lamb high priority and access to
457 * all the memory it needs. That way it should be able to
458 * exit() and clear out its resources quickly...
459 */
460 boost_dying_task_prio(p, mem);
461
462 return 0; 437 return 0;
463} 438}
464#undef K 439#undef K
@@ -482,7 +457,6 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
482 */ 457 */
483 if (p->flags & PF_EXITING) { 458 if (p->flags & PF_EXITING) {
484 set_tsk_thread_flag(p, TIF_MEMDIE); 459 set_tsk_thread_flag(p, TIF_MEMDIE);
485 boost_dying_task_prio(p, mem);
486 return 0; 460 return 0;
487 } 461 }
488 462
@@ -556,7 +530,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
556 */ 530 */
557 if (fatal_signal_pending(current)) { 531 if (fatal_signal_pending(current)) {
558 set_thread_flag(TIF_MEMDIE); 532 set_thread_flag(TIF_MEMDIE);
559 boost_dying_task_prio(current, NULL);
560 return; 533 return;
561 } 534 }
562 535
@@ -712,7 +685,6 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
712 */ 685 */
713 if (fatal_signal_pending(current)) { 686 if (fatal_signal_pending(current)) {
714 set_thread_flag(TIF_MEMDIE); 687 set_thread_flag(TIF_MEMDIE);
715 boost_dying_task_prio(current, NULL);
716 return; 688 return;
717 } 689 }
718 690
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2747f5e5abc1..9f8a97b9a350 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3176,7 +3176,7 @@ static __init_refok int __build_all_zonelists(void *data)
3176 * Called with zonelists_mutex held always 3176 * Called with zonelists_mutex held always
3177 * unless system_state == SYSTEM_BOOTING. 3177 * unless system_state == SYSTEM_BOOTING.
3178 */ 3178 */
3179void build_all_zonelists(void *data) 3179void __ref build_all_zonelists(void *data)
3180{ 3180{
3181 set_zonelist_order(); 3181 set_zonelist_order();
3182 3182
diff --git a/mm/shmem.c b/mm/shmem.c
index 58da7c150ba6..8fa27e4e582a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -421,7 +421,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
421 * a waste to allocate index if we cannot allocate data. 421 * a waste to allocate index if we cannot allocate data.
422 */ 422 */
423 if (sbinfo->max_blocks) { 423 if (sbinfo->max_blocks) {
424 if (percpu_counter_compare(&sbinfo->used_blocks, (sbinfo->max_blocks - 1)) > 0) 424 if (percpu_counter_compare(&sbinfo->used_blocks,
425 sbinfo->max_blocks - 1) >= 0)
425 return ERR_PTR(-ENOSPC); 426 return ERR_PTR(-ENOSPC);
426 percpu_counter_inc(&sbinfo->used_blocks); 427 percpu_counter_inc(&sbinfo->used_blocks);
427 spin_lock(&inode->i_lock); 428 spin_lock(&inode->i_lock);
@@ -1397,7 +1398,8 @@ repeat:
1397 shmem_swp_unmap(entry); 1398 shmem_swp_unmap(entry);
1398 sbinfo = SHMEM_SB(inode->i_sb); 1399 sbinfo = SHMEM_SB(inode->i_sb);
1399 if (sbinfo->max_blocks) { 1400 if (sbinfo->max_blocks) {
1400 if ((percpu_counter_compare(&sbinfo->used_blocks, sbinfo->max_blocks) > 0) || 1401 if (percpu_counter_compare(&sbinfo->used_blocks,
1402 sbinfo->max_blocks) >= 0 ||
1401 shmem_acct_block(info->flags)) { 1403 shmem_acct_block(info->flags)) {
1402 spin_unlock(&info->lock); 1404 spin_unlock(&info->lock);
1403 error = -ENOSPC; 1405 error = -ENOSPC;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c7f5a6d4b75b..f6b435c80079 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -41,6 +41,7 @@
41#include <linux/memcontrol.h> 41#include <linux/memcontrol.h>
42#include <linux/delayacct.h> 42#include <linux/delayacct.h>
43#include <linux/sysctl.h> 43#include <linux/sysctl.h>
44#include <linux/oom.h>
44 45
45#include <asm/tlbflush.h> 46#include <asm/tlbflush.h>
46#include <asm/div64.h> 47#include <asm/div64.h>
@@ -1988,17 +1989,12 @@ static bool zone_reclaimable(struct zone *zone)
1988 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; 1989 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
1989} 1990}
1990 1991
1991/* 1992/* All zones in zonelist are unreclaimable? */
1992 * As hibernation is going on, kswapd is freezed so that it can't mark
1993 * the zone into all_unreclaimable. It can't handle OOM during hibernation.
1994 * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
1995 */
1996static bool all_unreclaimable(struct zonelist *zonelist, 1993static bool all_unreclaimable(struct zonelist *zonelist,
1997 struct scan_control *sc) 1994 struct scan_control *sc)
1998{ 1995{
1999 struct zoneref *z; 1996 struct zoneref *z;
2000 struct zone *zone; 1997 struct zone *zone;
2001 bool all_unreclaimable = true;
2002 1998
2003 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1999 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2004 gfp_zone(sc->gfp_mask), sc->nodemask) { 2000 gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2006,13 +2002,11 @@ static bool all_unreclaimable(struct zonelist *zonelist,
2006 continue; 2002 continue;
2007 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2003 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2008 continue; 2004 continue;
2009 if (zone_reclaimable(zone)) { 2005 if (!zone->all_unreclaimable)
2010 all_unreclaimable = false; 2006 return false;
2011 break;
2012 }
2013 } 2007 }
2014 2008
2015 return all_unreclaimable; 2009 return true;
2016} 2010}
2017 2011
2018/* 2012/*
@@ -2108,6 +2102,14 @@ out:
2108 if (sc->nr_reclaimed) 2102 if (sc->nr_reclaimed)
2109 return sc->nr_reclaimed; 2103 return sc->nr_reclaimed;
2110 2104
2105 /*
2106 * As hibernation is going on, kswapd is freezed so that it can't mark
2107 * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
2108 * check.
2109 */
2110 if (oom_killer_disabled)
2111 return 0;
2112
2111 /* top priority shrink_zones still had more to do? don't OOM, then */ 2113 /* top priority shrink_zones still had more to do? don't OOM, then */
2112 if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) 2114 if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
2113 return 1; 2115 return 1;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 772b39b87d95..897ea9e88238 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -321,9 +321,12 @@ static inline void mod_state(struct zone *zone,
321 /* 321 /*
322 * The fetching of the stat_threshold is racy. We may apply 322 * The fetching of the stat_threshold is racy. We may apply
323 * a counter threshold to the wrong the cpu if we get 323 * a counter threshold to the wrong the cpu if we get
324 * rescheduled while executing here. However, the following 324 * rescheduled while executing here. However, the next
325 * will apply the threshold again and therefore bring the 325 * counter update will apply the threshold again and
326 * counter under the threshold. 326 * therefore bring the counter under the threshold again.
327 *
328 * Most of the time the thresholds are the same anyways
329 * for all cpus in a zone.
327 */ 330 */
328 t = this_cpu_read(pcp->stat_threshold); 331 t = this_cpu_read(pcp->stat_threshold);
329 332
@@ -945,7 +948,16 @@ static const char * const vmstat_text[] = {
945 "unevictable_pgs_cleared", 948 "unevictable_pgs_cleared",
946 "unevictable_pgs_stranded", 949 "unevictable_pgs_stranded",
947 "unevictable_pgs_mlockfreed", 950 "unevictable_pgs_mlockfreed",
951
952#ifdef CONFIG_TRANSPARENT_HUGEPAGE
953 "thp_fault_alloc",
954 "thp_fault_fallback",
955 "thp_collapse_alloc",
956 "thp_collapse_alloc_failed",
957 "thp_split",
948#endif 958#endif
959
960#endif /* CONFIG_VM_EVENTS_COUNTERS */
949}; 961};
950 962
951static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, 963static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
diff --git a/net/9p/client.c b/net/9p/client.c
index 48b8e084e710..77367745be9b 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -929,15 +929,15 @@ error:
929} 929}
930EXPORT_SYMBOL(p9_client_attach); 930EXPORT_SYMBOL(p9_client_attach);
931 931
932struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, 932struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
933 int clone) 933 char **wnames, int clone)
934{ 934{
935 int err; 935 int err;
936 struct p9_client *clnt; 936 struct p9_client *clnt;
937 struct p9_fid *fid; 937 struct p9_fid *fid;
938 struct p9_qid *wqids; 938 struct p9_qid *wqids;
939 struct p9_req_t *req; 939 struct p9_req_t *req;
940 int16_t nwqids, count; 940 uint16_t nwqids, count;
941 941
942 err = 0; 942 err = 0;
943 wqids = NULL; 943 wqids = NULL;
@@ -955,7 +955,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
955 fid = oldfid; 955 fid = oldfid;
956 956
957 957
958 P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %d wname[0] %s\n", 958 P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %ud wname[0] %s\n",
959 oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL); 959 oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL);
960 960
961 req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid, 961 req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid,
@@ -1220,27 +1220,6 @@ error:
1220} 1220}
1221EXPORT_SYMBOL(p9_client_fsync); 1221EXPORT_SYMBOL(p9_client_fsync);
1222 1222
1223int p9_client_sync_fs(struct p9_fid *fid)
1224{
1225 int err = 0;
1226 struct p9_req_t *req;
1227 struct p9_client *clnt;
1228
1229 P9_DPRINTK(P9_DEBUG_9P, ">>> TSYNC_FS fid %d\n", fid->fid);
1230
1231 clnt = fid->clnt;
1232 req = p9_client_rpc(clnt, P9_TSYNCFS, "d", fid->fid);
1233 if (IS_ERR(req)) {
1234 err = PTR_ERR(req);
1235 goto error;
1236 }
1237 P9_DPRINTK(P9_DEBUG_9P, "<<< RSYNCFS fid %d\n", fid->fid);
1238 p9_free_req(clnt, req);
1239error:
1240 return err;
1241}
1242EXPORT_SYMBOL(p9_client_sync_fs);
1243
1244int p9_client_clunk(struct p9_fid *fid) 1223int p9_client_clunk(struct p9_fid *fid)
1245{ 1224{
1246 int err; 1225 int err;
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 8a4084fa8b5a..b58a501cf3d1 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -265,7 +265,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
265 } 265 }
266 break; 266 break;
267 case 'T':{ 267 case 'T':{
268 int16_t *nwname = va_arg(ap, int16_t *); 268 uint16_t *nwname = va_arg(ap, uint16_t *);
269 char ***wnames = va_arg(ap, char ***); 269 char ***wnames = va_arg(ap, char ***);
270 270
271 errcode = p9pdu_readf(pdu, proto_version, 271 errcode = p9pdu_readf(pdu, proto_version,
@@ -468,7 +468,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
468 case 'E':{ 468 case 'E':{
469 int32_t cnt = va_arg(ap, int32_t); 469 int32_t cnt = va_arg(ap, int32_t);
470 const char *k = va_arg(ap, const void *); 470 const char *k = va_arg(ap, const void *);
471 const char *u = va_arg(ap, const void *); 471 const char __user *u = va_arg(ap,
472 const void __user *);
472 errcode = p9pdu_writef(pdu, proto_version, "d", 473 errcode = p9pdu_writef(pdu, proto_version, "d",
473 cnt); 474 cnt);
474 if (!errcode && pdu_write_urw(pdu, k, u, cnt)) 475 if (!errcode && pdu_write_urw(pdu, k, u, cnt))
@@ -495,7 +496,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
495 } 496 }
496 break; 497 break;
497 case 'T':{ 498 case 'T':{
498 int16_t nwname = va_arg(ap, int); 499 uint16_t nwname = va_arg(ap, int);
499 const char **wnames = va_arg(ap, const char **); 500 const char **wnames = va_arg(ap, const char **);
500 501
501 errcode = p9pdu_writef(pdu, proto_version, "w", 502 errcode = p9pdu_writef(pdu, proto_version, "w",
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index d47880e971dd..e883172f9aa2 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -66,7 +66,7 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
66 uint32_t pdata_mapped_pages; 66 uint32_t pdata_mapped_pages;
67 struct trans_rpage_info *rpinfo; 67 struct trans_rpage_info *rpinfo;
68 68
69 *pdata_off = (size_t)req->tc->pubuf & (PAGE_SIZE-1); 69 *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1);
70 70
71 if (*pdata_off) 71 if (*pdata_off)
72 first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off), 72 first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off),
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index e8f046b07182..244e70742183 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -326,8 +326,11 @@ req_retry_pinned:
326 outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, 326 outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
327 pdata_off, rpinfo->rp_data, pdata_len); 327 pdata_off, rpinfo->rp_data, pdata_len);
328 } else { 328 } else {
329 char *pbuf = req->tc->pubuf ? req->tc->pubuf : 329 char *pbuf;
330 req->tc->pkbuf; 330 if (req->tc->pubuf)
331 pbuf = (__force char *) req->tc->pubuf;
332 else
333 pbuf = req->tc->pkbuf;
331 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf, 334 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
332 req->tc->pbuf_size); 335 req->tc->pbuf_size);
333 } 336 }
@@ -352,8 +355,12 @@ req_retry_pinned:
352 in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM, 355 in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM,
353 pdata_off, rpinfo->rp_data, pdata_len); 356 pdata_off, rpinfo->rp_data, pdata_len);
354 } else { 357 } else {
355 char *pbuf = req->tc->pubuf ? req->tc->pubuf : 358 char *pbuf;
356 req->tc->pkbuf; 359 if (req->tc->pubuf)
360 pbuf = (__force char *) req->tc->pubuf;
361 else
362 pbuf = req->tc->pkbuf;
363
357 in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM, 364 in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM,
358 pbuf, req->tc->pbuf_size); 365 pbuf, req->tc->pbuf_size);
359 } 366 }
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 50af02737a3d..5a80f41c0cba 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -579,9 +579,15 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
579 579
580 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, 580 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
581 r_linger_osd) { 581 r_linger_osd) {
582 __unregister_linger_request(osdc, req); 582 /*
583 * reregister request prior to unregistering linger so
584 * that r_osd is preserved.
585 */
586 BUG_ON(!list_empty(&req->r_req_lru_item));
583 __register_request(osdc, req); 587 __register_request(osdc, req);
584 list_move(&req->r_req_lru_item, &osdc->req_unsent); 588 list_add(&req->r_req_lru_item, &osdc->req_unsent);
589 list_add(&req->r_osd_item, &req->r_osd->o_requests);
590 __unregister_linger_request(osdc, req);
585 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, 591 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
586 osd->o_osd); 592 osd->o_osd);
587 } 593 }
@@ -798,7 +804,7 @@ static void __register_request(struct ceph_osd_client *osdc,
798 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); 804 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
799 INIT_LIST_HEAD(&req->r_req_lru_item); 805 INIT_LIST_HEAD(&req->r_req_lru_item);
800 806
801 dout("register_request %p tid %lld\n", req, req->r_tid); 807 dout("__register_request %p tid %lld\n", req, req->r_tid);
802 __insert_request(osdc, req); 808 __insert_request(osdc, req);
803 ceph_osdc_get_request(req); 809 ceph_osdc_get_request(req);
804 osdc->num_requests++; 810 osdc->num_requests++;
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index 9fea75535221..96bee5c46008 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -13,7 +13,7 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen)
13{ 13{
14 FILE *fp; 14 FILE *fp;
15 char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1]; 15 char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1];
16 char *token, *saved_ptr; 16 char *token, *saved_ptr = NULL;
17 int found = 0; 17 int found = 0;
18 18
19 fp = fopen("/proc/mounts", "r"); 19 fp = fopen("/proc/mounts", "r");