aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/biodoc.txt19
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/i2c.txt46
-rw-r--r--Documentation/sound/alsa/HD-Audio.txt4
-rw-r--r--MAINTAINERS3
-rw-r--r--Makefile9
-rw-r--r--arch/microblaze/include/asm/auxvec.h1
-rw-r--r--arch/microblaze/include/asm/cputable.h1
-rw-r--r--arch/microblaze/include/asm/ftrace.h1
-rw-r--r--arch/microblaze/include/asm/hw_irq.h1
-rw-r--r--arch/microblaze/include/asm/io.h1
-rw-r--r--arch/microblaze/include/asm/socket.h3
-rw-r--r--arch/microblaze/include/asm/user.h1
-rw-r--r--arch/microblaze/include/asm/vga.h1
-rw-r--r--arch/microblaze/kernel/of_device.c10
-rw-r--r--arch/microblaze/kernel/process.c3
-rw-r--r--arch/microblaze/kernel/prom.c1
-rw-r--r--arch/microblaze/kernel/ptrace.c1
-rw-r--r--arch/microblaze/kernel/signal.c1
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c2
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/boot/dts/tqm8540.dts4
-rw-r--r--arch/powerpc/boot/dts/tqm8541.dts4
-rw-r--r--arch/powerpc/boot/dts/tqm8548-bigflash.dts8
-rw-r--r--arch/powerpc/boot/dts/tqm8548.dts8
-rw-r--r--arch/powerpc/boot/dts/tqm8555.dts4
-rw-r--r--arch/powerpc/boot/dts/tqm8560.dts4
-rw-r--r--arch/powerpc/configs/85xx/tqm8548_defconfig164
-rw-r--r--arch/powerpc/include/asm/futex.h12
-rw-r--r--arch/powerpc/include/asm/mmu.h6
-rw-r--r--arch/powerpc/include/asm/parport.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h13
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/mm/tlb_nohash.c1
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S14
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c2
-rw-r--r--arch/sh/Kconfig18
-rw-r--r--arch/sh/boards/board-ap325rxa.c1
-rw-r--r--arch/sh/boards/board-urquell.c30
-rw-r--r--arch/sh/drivers/pci/ops-sh7785lcr.c5
-rw-r--r--arch/sh/drivers/pci/pci-sh7780.h2
-rw-r--r--arch/sh/drivers/pci/pci.c3
-rw-r--r--arch/sh/include/asm/dma-mapping.h36
-rw-r--r--arch/sh/include/asm/scatterlist.h11
-rw-r--r--arch/sh/include/asm/topology.h7
-rw-r--r--arch/sh/include/asm/unistd_32.h4
-rw-r--r--arch/sh/include/asm/unistd_64.h4
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c14
-rw-r--r--arch/sh/kernel/syscalls_32.S2
-rw-r--r--arch/sh/kernel/syscalls_64.S2
-rw-r--r--arch/sh/mm/consistent.c31
-rw-r--r--arch/sparc/include/asm/parport.h5
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c6
-rw-r--r--arch/x86/kernel/microcode_core.c33
-rw-r--r--block/as-iosched.c116
-rw-r--r--block/blk-barrier.c3
-rw-r--r--block/blk-sysfs.c4
-rw-r--r--block/blk.h4
-rw-r--r--block/cfq-iosched.c270
-rw-r--r--block/elevator.c8
-rw-r--r--block/ioctl.c2
-rw-r--r--block/scsi_ioctl.c6
-rw-r--r--drivers/block/brd.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c187
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c111
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c23
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c22
-rw-r--r--drivers/md/dm-bio-list.h117
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-mpath.c1
-rw-r--r--drivers/md/dm-raid1.c1
-rw-r--r--drivers/md/dm-region-hash.c1
-rw-r--r--drivers/md/dm-snap.c1
-rw-r--r--drivers/md/dm.c1
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid10.c1
-rw-r--r--drivers/parisc/superio.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/sh/intc.c35
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c26
-rw-r--r--fs/bio.c18
-rw-r--r--fs/buffer.c11
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/ext4/extents.c2
-rw-r--r--fs/fuse/file.c8
-rw-r--r--fs/gfs2/glock.c10
-rw-r--r--fs/gfs2/inode.c8
-rw-r--r--fs/gfs2/inode.h14
-rw-r--r--fs/gfs2/ops_file.c8
-rw-r--r--fs/gfs2/ops_fstype.c5
-rw-r--r--fs/gfs2/ops_inode.c1
-rw-r--r--fs/gfs2/quota.c4
-rw-r--r--fs/inode.c36
-rw-r--r--fs/nilfs2/bmap.c5
-rw-r--r--fs/nilfs2/nilfs.h5
-rw-r--r--fs/nilfs2/recovery.c20
-rw-r--r--fs/nilfs2/sufile.c290
-rw-r--r--fs/nilfs2/sufile.h79
-rw-r--r--fs/nilfs2/super.c7
-rw-r--r--fs/nilfs2/the_nilfs.c4
-rw-r--r--fs/ocfs2/file.c94
-rw-r--r--fs/pipe.c42
-rw-r--r--fs/splice.c371
-rw-r--r--include/drm/i915_drm.h3
-rw-r--r--include/linux/bio.h109
-rw-r--r--include/linux/debug_locks.h1
-rw-r--r--include/linux/fs.h64
-rw-r--r--include/linux/fsl_devices.h4
-rw-r--r--include/linux/pipe_fs_i.h5
-rw-r--r--include/linux/splice.h12
-rw-r--r--include/linux/usb/serial.h7
-rw-r--r--include/sound/jack.h2
-rw-r--r--include/sound/pcm.h3
-rw-r--r--kernel/power/swap.c2
-rw-r--r--sound/core/control.c35
-rw-r--r--sound/core/jack.c3
-rw-r--r--sound/core/pcm_compat.c11
-rw-r--r--sound/core/pcm_lib.c47
-rw-r--r--sound/core/pcm_native.c93
-rw-r--r--sound/core/seq/seq_compat.c9
-rw-r--r--sound/core/timer.c11
-rw-r--r--sound/isa/sb/sb16_csp.c19
-rw-r--r--sound/isa/wavefront/wavefront_fx.c14
-rw-r--r--sound/isa/wavefront/wavefront_synth.c11
-rw-r--r--sound/pci/emu10k1/emufx.c41
-rw-r--r--sound/pci/hda/hda_codec.c6
-rw-r--r--sound/pci/hda/hda_intel.c39
-rw-r--r--sound/pci/hda/patch_conexant.c21
-rw-r--r--sound/pci/hda/patch_realtek.c5
-rw-r--r--sound/pci/hda/patch_sigmatel.c27
-rw-r--r--sound/pci/intel8x0.c91
-rw-r--r--sound/soc/pxa/magician.c2
-rw-r--r--sound/soc/s3c24xx/Kconfig6
-rw-r--r--sound/usb/caiaq/Makefile4
-rw-r--r--sound/usb/caiaq/audio.c (renamed from sound/usb/caiaq/caiaq-audio.c)12
-rw-r--r--sound/usb/caiaq/audio.h (renamed from sound/usb/caiaq/caiaq-audio.h)0
-rw-r--r--sound/usb/caiaq/control.c (renamed from sound/usb/caiaq/caiaq-control.c)10
-rw-r--r--sound/usb/caiaq/control.h (renamed from sound/usb/caiaq/caiaq-control.h)0
-rw-r--r--sound/usb/caiaq/device.c (renamed from sound/usb/caiaq/caiaq-device.c)23
-rw-r--r--sound/usb/caiaq/device.h (renamed from sound/usb/caiaq/caiaq-device.h)0
-rw-r--r--sound/usb/caiaq/input.c (renamed from sound/usb/caiaq/caiaq-input.c)11
-rw-r--r--sound/usb/caiaq/input.h (renamed from sound/usb/caiaq/caiaq-input.h)0
-rw-r--r--sound/usb/caiaq/midi.c (renamed from sound/usb/caiaq/caiaq-midi.c)13
-rw-r--r--sound/usb/caiaq/midi.h (renamed from sound/usb/caiaq/caiaq-midi.h)0
-rw-r--r--sound/usb/usx2y/us122l.c10
-rw-r--r--sound/usb/usx2y/usX2Yhwdep.c13
151 files changed, 2144 insertions, 1336 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index ecad6ee75705..6fab97ea7e6b 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -1040,23 +1040,21 @@ Front merges are handled by the binary trees in AS and deadline schedulers.
1040iii. Plugging the queue to batch requests in anticipation of opportunities for 1040iii. Plugging the queue to batch requests in anticipation of opportunities for
1041 merge/sort optimizations 1041 merge/sort optimizations
1042 1042
1043This is just the same as in 2.4 so far, though per-device unplugging
1044support is anticipated for 2.5. Also with a priority-based i/o scheduler,
1045such decisions could be based on request priorities.
1046
1047Plugging is an approach that the current i/o scheduling algorithm resorts to so 1043Plugging is an approach that the current i/o scheduling algorithm resorts to so
1048that it collects up enough requests in the queue to be able to take 1044that it collects up enough requests in the queue to be able to take
1049advantage of the sorting/merging logic in the elevator. If the 1045advantage of the sorting/merging logic in the elevator. If the
1050queue is empty when a request comes in, then it plugs the request queue 1046queue is empty when a request comes in, then it plugs the request queue
1051(sort of like plugging the bottom of a vessel to get fluid to build up) 1047(sort of like plugging the bath tub of a vessel to get fluid to build up)
1052till it fills up with a few more requests, before starting to service 1048till it fills up with a few more requests, before starting to service
1053the requests. This provides an opportunity to merge/sort the requests before 1049the requests. This provides an opportunity to merge/sort the requests before
1054passing them down to the device. There are various conditions when the queue is 1050passing them down to the device. There are various conditions when the queue is
1055unplugged (to open up the flow again), either through a scheduled task or 1051unplugged (to open up the flow again), either through a scheduled task or
1056could be on demand. For example wait_on_buffer sets the unplugging going 1052could be on demand. For example wait_on_buffer sets the unplugging going
1057(by running tq_disk) so the read gets satisfied soon. So in the read case, 1053through sync_buffer() running blk_run_address_space(mapping). Or the caller
1058the queue gets explicitly unplugged as part of waiting for completion, 1054can do it explicity through blk_unplug(bdev). So in the read case,
1059in fact all queues get unplugged as a side-effect. 1055the queue gets explicitly unplugged as part of waiting for completion on that
1056buffer. For page driven IO, the address space ->sync_page() takes care of
1057doing the blk_run_address_space().
1060 1058
1061Aside: 1059Aside:
1062 This is kind of controversial territory, as it's not clear if plugging is 1060 This is kind of controversial territory, as it's not clear if plugging is
@@ -1067,11 +1065,6 @@ Aside:
1067 multi-page bios being queued in one shot, we may not need to wait to merge 1065 multi-page bios being queued in one shot, we may not need to wait to merge
1068 a big request from the broken up pieces coming by. 1066 a big request from the broken up pieces coming by.
1069 1067
1070 Per-queue granularity unplugging (still a Todo) may help reduce some of the
1071 concerns with just a single tq_disk flush approach. Something like
1072 blk_kick_queue() to unplug a specific queue (right away ?)
1073 or optionally, all queues, is in the plan.
1074
10754.4 I/O contexts 10684.4 I/O contexts
1076I/O contexts provide a dynamically allocated per process data area. They may 1069I/O contexts provide a dynamically allocated per process data area. They may
1077be used in I/O schedulers, and in the block layer (could be used for IO statis, 1070be used in I/O schedulers, and in the block layer (could be used for IO statis,
diff --git a/Documentation/powerpc/dts-bindings/fsl/i2c.txt b/Documentation/powerpc/dts-bindings/fsl/i2c.txt
index d0ab33e21fe6..b6d2e21474f9 100644
--- a/Documentation/powerpc/dts-bindings/fsl/i2c.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/i2c.txt
@@ -7,8 +7,10 @@ Required properties :
7 7
8Recommended properties : 8Recommended properties :
9 9
10 - compatible : Should be "fsl-i2c" for parts compatible with 10 - compatible : compatibility list with 2 entries, the first should
11 Freescale I2C specifications. 11 be "fsl,CHIP-i2c" where CHIP is the name of a compatible processor,
12 e.g. mpc8313, mpc8543, mpc8544, mpc5200 or mpc5200b. The second one
13 should be "fsl-i2c".
12 - interrupts : <a b> where a is the interrupt number and b is a 14 - interrupts : <a b> where a is the interrupt number and b is a
13 field that represents an encoding of the sense and level 15 field that represents an encoding of the sense and level
14 information for the interrupt. This should be encoded based on 16 information for the interrupt. This should be encoded based on
@@ -16,17 +18,31 @@ Recommended properties :
16 controller you have. 18 controller you have.
17 - interrupt-parent : the phandle for the interrupt controller that 19 - interrupt-parent : the phandle for the interrupt controller that
18 services interrupts for this device. 20 services interrupts for this device.
19 - dfsrr : boolean; if defined, indicates that this I2C device has 21 - fsl,preserve-clocking : boolean; if defined, the clock settings
20 a digital filter sampling rate register 22 from the bootloader are preserved (not touched).
21 - fsl5200-clocking : boolean; if defined, indicated that this device 23 - clock-frequency : desired I2C bus clock frequency in Hz.
22 uses the FSL 5200 clocking mechanism. 24
23 25Examples :
24Example : 26
25 i2c@3000 { 27 i2c@3d00 {
26 interrupt-parent = <40000>; 28 #address-cells = <1>;
27 interrupts = <1b 3>; 29 #size-cells = <0>;
28 reg = <3000 18>; 30 compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
29 device_type = "i2c"; 31 cell-index = <0>;
30 compatible = "fsl-i2c"; 32 reg = <0x3d00 0x40>;
31 dfsrr; 33 interrupts = <2 15 0>;
34 interrupt-parent = <&mpc5200_pic>;
35 fsl,preserve-clocking;
32 }; 36 };
37
38 i2c@3100 {
39 #address-cells = <1>;
40 #size-cells = <0>;
41 cell-index = <1>;
42 compatible = "fsl,mpc8544-i2c", "fsl-i2c";
43 reg = <0x3100 0x100>;
44 interrupts = <43 2>;
45 interrupt-parent = <&mpic>;
46 clock-frequency = <400000>;
47 };
48
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt
index c5948f2f9a25..88b7433d2f11 100644
--- a/Documentation/sound/alsa/HD-Audio.txt
+++ b/Documentation/sound/alsa/HD-Audio.txt
@@ -169,7 +169,7 @@ PCI SSID look-up.
169What `model` option values are available depends on the codec chip. 169What `model` option values are available depends on the codec chip.
170Check your codec chip from the codec proc file (see "Codec Proc-File" 170Check your codec chip from the codec proc file (see "Codec Proc-File"
171section below). It will show the vendor/product name of your codec 171section below). It will show the vendor/product name of your codec
172chip. Then, see Documentation/sound/alsa/HD-Audio-Modelstxt file, 172chip. Then, see Documentation/sound/alsa/HD-Audio-Models.txt file,
173the section of HD-audio driver. You can find a list of codecs 173the section of HD-audio driver. You can find a list of codecs
174and `model` options belonging to each codec. For example, for Realtek 174and `model` options belonging to each codec. For example, for Realtek
175ALC262 codec chip, pass `model=ultra` for devices that are compatible 175ALC262 codec chip, pass `model=ultra` for devices that are compatible
@@ -177,7 +177,7 @@ with Samsung Q1 Ultra.
177 177
178Thus, the first thing you can do for any brand-new, unsupported and 178Thus, the first thing you can do for any brand-new, unsupported and
179non-working HD-audio hardware is to check HD-audio codec and several 179non-working HD-audio hardware is to check HD-audio codec and several
180different `model` option values. If you have a luck, some of them 180different `model` option values. If you have any luck, some of them
181might suit with your device well. 181might suit with your device well.
182 182
183Some codecs such as ALC880 have a special model option `model=test`. 183Some codecs such as ALC880 have a special model option `model=test`.
diff --git a/MAINTAINERS b/MAINTAINERS
index 29d74f47ba86..0cb20d821694 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3680,6 +3680,7 @@ L: microblaze-uclinux@itee.uq.edu.au
3680W: http://www.monstr.eu/fdt/ 3680W: http://www.monstr.eu/fdt/
3681T: git git://git.monstr.eu/linux-2.6-microblaze.git 3681T: git git://git.monstr.eu/linux-2.6-microblaze.git
3682S: Supported 3682S: Supported
3683F: arch/microblaze/
3683 3684
3684MICROTEK X6 SCANNER 3685MICROTEK X6 SCANNER
3685P: Oliver Neukum 3686P: Oliver Neukum
@@ -5313,7 +5314,9 @@ L: linux-sh@vger.kernel.org
5313W: http://www.linux-sh.org 5314W: http://www.linux-sh.org
5314T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git 5315T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git
5315S: Supported 5316S: Supported
5317F: Documentation/sh/
5316F: arch/sh/ 5318F: arch/sh/
5319F: drivers/sh/
5317 5320
5318SUSPEND TO RAM 5321SUSPEND TO RAM
5319P: Len Brown 5322P: Len Brown
diff --git a/Makefile b/Makefile
index ad830bd45a4b..bfdef56add34 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 30 3SUBLEVEL = 30
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc2
5NAME = Temporary Tasmanian Devil 5NAME = Temporary Tasmanian Devil
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -169,7 +169,7 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
169 -e s/arm.*/arm/ -e s/sa110/arm/ \ 169 -e s/arm.*/arm/ -e s/sa110/arm/ \
170 -e s/s390x/s390/ -e s/parisc64/parisc/ \ 170 -e s/s390x/s390/ -e s/parisc64/parisc/ \
171 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ 171 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
172 -e s/sh.*/sh/ ) 172 -e s/sh[234].*/sh/ )
173 173
174# Cross compiling and selecting different set of gcc/bin-utils 174# Cross compiling and selecting different set of gcc/bin-utils
175# --------------------------------------------------------------------------- 175# ---------------------------------------------------------------------------
@@ -210,6 +210,11 @@ ifeq ($(ARCH),sparc64)
210 SRCARCH := sparc 210 SRCARCH := sparc
211endif 211endif
212 212
213# Additional ARCH settings for sh
214ifeq ($(ARCH),sh64)
215 SRCARCH := sh
216endif
217
213# Where to locate arch specific headers 218# Where to locate arch specific headers
214hdr-arch := $(SRCARCH) 219hdr-arch := $(SRCARCH)
215 220
diff --git a/arch/microblaze/include/asm/auxvec.h b/arch/microblaze/include/asm/auxvec.h
index e69de29bb2d1..8b137891791f 100644
--- a/arch/microblaze/include/asm/auxvec.h
+++ b/arch/microblaze/include/asm/auxvec.h
@@ -0,0 +1 @@
diff --git a/arch/microblaze/include/asm/cputable.h b/arch/microblaze/include/asm/cputable.h
index e69de29bb2d1..8b137891791f 100644
--- a/arch/microblaze/include/asm/cputable.h
+++ b/arch/microblaze/include/asm/cputable.h
@@ -0,0 +1 @@
diff --git a/arch/microblaze/include/asm/ftrace.h b/arch/microblaze/include/asm/ftrace.h
new file mode 100644
index 000000000000..8b137891791f
--- /dev/null
+++ b/arch/microblaze/include/asm/ftrace.h
@@ -0,0 +1 @@
diff --git a/arch/microblaze/include/asm/hw_irq.h b/arch/microblaze/include/asm/hw_irq.h
index e69de29bb2d1..8b137891791f 100644
--- a/arch/microblaze/include/asm/hw_irq.h
+++ b/arch/microblaze/include/asm/hw_irq.h
@@ -0,0 +1 @@
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index cfab0342588d..8b5853ee6b5c 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -12,7 +12,6 @@
12#include <asm/byteorder.h> 12#include <asm/byteorder.h>
13#include <asm/page.h> 13#include <asm/page.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <asm/page.h>
16 15
17#define IO_SPACE_LIMIT (0xFFFFFFFF) 16#define IO_SPACE_LIMIT (0xFFFFFFFF)
18 17
diff --git a/arch/microblaze/include/asm/socket.h b/arch/microblaze/include/asm/socket.h
index f919b6b540ac..825936860314 100644
--- a/arch/microblaze/include/asm/socket.h
+++ b/arch/microblaze/include/asm/socket.h
@@ -63,4 +63,7 @@
63 63
64#define SO_MARK 36 64#define SO_MARK 36
65 65
66#define SO_TIMESTAMPING 37
67#define SCM_TIMESTAMPING SO_TIMESTAMPING
68
66#endif /* _ASM_MICROBLAZE_SOCKET_H */ 69#endif /* _ASM_MICROBLAZE_SOCKET_H */
diff --git a/arch/microblaze/include/asm/user.h b/arch/microblaze/include/asm/user.h
index e69de29bb2d1..8b137891791f 100644
--- a/arch/microblaze/include/asm/user.h
+++ b/arch/microblaze/include/asm/user.h
@@ -0,0 +1 @@
diff --git a/arch/microblaze/include/asm/vga.h b/arch/microblaze/include/asm/vga.h
index e69de29bb2d1..8b137891791f 100644
--- a/arch/microblaze/include/asm/vga.h
+++ b/arch/microblaze/include/asm/vga.h
@@ -0,0 +1 @@
diff --git a/arch/microblaze/kernel/of_device.c b/arch/microblaze/kernel/of_device.c
index 717edf4ad0b4..9a0f7632c47c 100644
--- a/arch/microblaze/kernel/of_device.c
+++ b/arch/microblaze/kernel/of_device.c
@@ -13,7 +13,6 @@ void of_device_make_bus_id(struct of_device *dev)
13{ 13{
14 static atomic_t bus_no_reg_magic; 14 static atomic_t bus_no_reg_magic;
15 struct device_node *node = dev->node; 15 struct device_node *node = dev->node;
16 char *name = dev->dev.bus_id;
17 const u32 *reg; 16 const u32 *reg;
18 u64 addr; 17 u64 addr;
19 int magic; 18 int magic;
@@ -25,9 +24,8 @@ void of_device_make_bus_id(struct of_device *dev)
25 if (reg) { 24 if (reg) {
26 addr = of_translate_address(node, reg); 25 addr = of_translate_address(node, reg);
27 if (addr != OF_BAD_ADDR) { 26 if (addr != OF_BAD_ADDR) {
28 snprintf(name, BUS_ID_SIZE, 27 dev_set_name(&dev->dev, "%llx.%s",
29 "%llx.%s", (unsigned long long)addr, 28 (unsigned long long)addr, node->name);
30 node->name);
31 return; 29 return;
32 } 30 }
33 } 31 }
@@ -37,7 +35,7 @@ void of_device_make_bus_id(struct of_device *dev)
37 * counter (and pray...) 35 * counter (and pray...)
38 */ 36 */
39 magic = atomic_add_return(1, &bus_no_reg_magic); 37 magic = atomic_add_return(1, &bus_no_reg_magic);
40 snprintf(name, BUS_ID_SIZE, "%s.%d", node->name, magic - 1); 38 dev_set_name(&dev->dev, "%s.%d", node->name, magic - 1);
41} 39}
42EXPORT_SYMBOL(of_device_make_bus_id); 40EXPORT_SYMBOL(of_device_make_bus_id);
43 41
@@ -58,7 +56,7 @@ struct of_device *of_device_alloc(struct device_node *np,
58 dev->dev.archdata.of_node = np; 56 dev->dev.archdata.of_node = np;
59 57
60 if (bus_id) 58 if (bus_id)
61 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE); 59 dev_set_name(&dev->dev, bus_id);
62 else 60 else
63 of_device_make_bus_id(dev); 61 of_device_make_bus_id(dev);
64 62
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 60e9ed7d3132..436f26ccbfa9 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -115,8 +115,7 @@ void flush_thread(void)
115{ 115{
116} 116}
117 117
118/* FIXME - here will be a proposed change -> remove nr parameter */ 118int copy_thread(unsigned long clone_flags, unsigned long usp,
119int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
120 unsigned long unused, 119 unsigned long unused,
121 struct task_struct *p, struct pt_regs *regs) 120 struct task_struct *p, struct pt_regs *regs)
122{ 121{
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 475b1fac5cfd..34c48718061a 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -39,7 +39,6 @@
39#include <asm/system.h> 39#include <asm/system.h>
40#include <asm/mmu.h> 40#include <asm/mmu.h>
41#include <asm/pgtable.h> 41#include <asm/pgtable.h>
42#include <linux/pci.h>
43#include <asm/sections.h> 42#include <asm/sections.h>
44#include <asm/pci-bridge.h> 43#include <asm/pci-bridge.h>
45 44
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c
index 3171e39e3220..b86aa623e36d 100644
--- a/arch/microblaze/kernel/ptrace.c
+++ b/arch/microblaze/kernel/ptrace.c
@@ -32,7 +32,6 @@
32#include <linux/signal.h> 32#include <linux/signal.h>
33 33
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/ptrace.h>
36#include <asm/processor.h> 35#include <asm/processor.h>
37#include <linux/uaccess.h> 36#include <linux/uaccess.h>
38#include <asm/asm-offsets.h> 37#include <asm/asm-offsets.h>
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index ff347b98863a..3889cf45fa71 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -37,7 +37,6 @@
37#include <linux/uaccess.h> 37#include <linux/uaccess.h>
38#include <asm/pgtable.h> 38#include <asm/pgtable.h>
39#include <asm/pgalloc.h> 39#include <asm/pgalloc.h>
40#include <linux/signal.h>
41#include <linux/syscalls.h> 40#include <linux/syscalls.h>
42#include <asm/cacheflush.h> 41#include <asm/cacheflush.h>
43#include <asm/syscalls.h> 42#include <asm/syscalls.h>
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index d90b548fb1bb..ba0568c2cc1c 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -29,9 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/err.h> 30#include <linux/err.h>
31#include <linux/fs.h> 31#include <linux/fs.h>
32#include <linux/ipc.h>
33#include <linux/semaphore.h> 32#include <linux/semaphore.h>
34#include <linux/syscalls.h>
35#include <linux/uaccess.h> 33#include <linux/uaccess.h>
36#include <linux/unistd.h> 34#include <linux/unistd.h>
37 35
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 5b50e1ac6179..4c7804551362 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -462,7 +462,7 @@ config PPC_64K_PAGES
462 462
463config PPC_256K_PAGES 463config PPC_256K_PAGES
464 bool "256k page size" if 44x 464 bool "256k page size" if 44x
465 depends on !STDBINUTILS && (!SHMEM || BROKEN) 465 depends on !STDBINUTILS
466 help 466 help
467 Make the page size 256k. 467 Make the page size 256k.
468 468
diff --git a/arch/powerpc/boot/dts/tqm8540.dts b/arch/powerpc/boot/dts/tqm8540.dts
index 231bae756637..b6f1fc6eb960 100644
--- a/arch/powerpc/boot/dts/tqm8540.dts
+++ b/arch/powerpc/boot/dts/tqm8540.dts
@@ -84,9 +84,9 @@
84 interrupt-parent = <&mpic>; 84 interrupt-parent = <&mpic>;
85 dfsrr; 85 dfsrr;
86 86
87 dtt@50 { 87 dtt@48 {
88 compatible = "national,lm75"; 88 compatible = "national,lm75";
89 reg = <0x50>; 89 reg = <0x48>;
90 }; 90 };
91 91
92 rtc@68 { 92 rtc@68 {
diff --git a/arch/powerpc/boot/dts/tqm8541.dts b/arch/powerpc/boot/dts/tqm8541.dts
index 4356a1f08295..fa6a3d54a8a5 100644
--- a/arch/powerpc/boot/dts/tqm8541.dts
+++ b/arch/powerpc/boot/dts/tqm8541.dts
@@ -83,9 +83,9 @@
83 interrupt-parent = <&mpic>; 83 interrupt-parent = <&mpic>;
84 dfsrr; 84 dfsrr;
85 85
86 dtt@50 { 86 dtt@48 {
87 compatible = "national,lm75"; 87 compatible = "national,lm75";
88 reg = <0x50>; 88 reg = <0x48>;
89 }; 89 };
90 90
91 rtc@68 { 91 rtc@68 {
diff --git a/arch/powerpc/boot/dts/tqm8548-bigflash.dts b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
index 19aa72301c83..00f7ed7a2455 100644
--- a/arch/powerpc/boot/dts/tqm8548-bigflash.dts
+++ b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
@@ -85,9 +85,9 @@
85 interrupt-parent = <&mpic>; 85 interrupt-parent = <&mpic>;
86 dfsrr; 86 dfsrr;
87 87
88 dtt@50 { 88 dtt@48 {
89 compatible = "national,lm75"; 89 compatible = "national,lm75";
90 reg = <0x50>; 90 reg = <0x48>;
91 }; 91 };
92 92
93 rtc@68 { 93 rtc@68 {
@@ -247,7 +247,7 @@
247 interrupts = <31 2 32 2 33 2>; 247 interrupts = <31 2 32 2 33 2>;
248 interrupt-parent = <&mpic>; 248 interrupt-parent = <&mpic>;
249 tbi-handle = <&tbi2>; 249 tbi-handle = <&tbi2>;
250 phy-handle = <&phy3>; 250 phy-handle = <&phy4>;
251 251
252 mdio@520 { 252 mdio@520 {
253 #address-cells = <1>; 253 #address-cells = <1>;
@@ -275,7 +275,7 @@
275 interrupts = <37 2 38 2 39 2>; 275 interrupts = <37 2 38 2 39 2>;
276 interrupt-parent = <&mpic>; 276 interrupt-parent = <&mpic>;
277 tbi-handle = <&tbi3>; 277 tbi-handle = <&tbi3>;
278 phy-handle = <&phy4>; 278 phy-handle = <&phy5>;
279 279
280 mdio@520 { 280 mdio@520 {
281 #address-cells = <1>; 281 #address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/tqm8548.dts b/arch/powerpc/boot/dts/tqm8548.dts
index 49145a04fc6c..673e4a778ac8 100644
--- a/arch/powerpc/boot/dts/tqm8548.dts
+++ b/arch/powerpc/boot/dts/tqm8548.dts
@@ -85,9 +85,9 @@
85 interrupt-parent = <&mpic>; 85 interrupt-parent = <&mpic>;
86 dfsrr; 86 dfsrr;
87 87
88 dtt@50 { 88 dtt@48 {
89 compatible = "national,lm75"; 89 compatible = "national,lm75";
90 reg = <0x50>; 90 reg = <0x48>;
91 }; 91 };
92 92
93 rtc@68 { 93 rtc@68 {
@@ -247,7 +247,7 @@
247 interrupts = <31 2 32 2 33 2>; 247 interrupts = <31 2 32 2 33 2>;
248 interrupt-parent = <&mpic>; 248 interrupt-parent = <&mpic>;
249 tbi-handle = <&tbi2>; 249 tbi-handle = <&tbi2>;
250 phy-handle = <&phy3>; 250 phy-handle = <&phy4>;
251 251
252 mdio@520 { 252 mdio@520 {
253 #address-cells = <1>; 253 #address-cells = <1>;
@@ -275,7 +275,7 @@
275 interrupts = <37 2 38 2 39 2>; 275 interrupts = <37 2 38 2 39 2>;
276 interrupt-parent = <&mpic>; 276 interrupt-parent = <&mpic>;
277 tbi-handle = <&tbi3>; 277 tbi-handle = <&tbi3>;
278 phy-handle = <&phy4>; 278 phy-handle = <&phy5>;
279 279
280 mdio@520 { 280 mdio@520 {
281 #address-cells = <1>; 281 #address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/tqm8555.dts b/arch/powerpc/boot/dts/tqm8555.dts
index 06d366ebbda3..6a99f1eef7ad 100644
--- a/arch/powerpc/boot/dts/tqm8555.dts
+++ b/arch/powerpc/boot/dts/tqm8555.dts
@@ -83,9 +83,9 @@
83 interrupt-parent = <&mpic>; 83 interrupt-parent = <&mpic>;
84 dfsrr; 84 dfsrr;
85 85
86 dtt@50 { 86 dtt@48 {
87 compatible = "national,lm75"; 87 compatible = "national,lm75";
88 reg = <0x50>; 88 reg = <0x48>;
89 }; 89 };
90 90
91 rtc@68 { 91 rtc@68 {
diff --git a/arch/powerpc/boot/dts/tqm8560.dts b/arch/powerpc/boot/dts/tqm8560.dts
index feff915e0492..b6c2d71defd3 100644
--- a/arch/powerpc/boot/dts/tqm8560.dts
+++ b/arch/powerpc/boot/dts/tqm8560.dts
@@ -85,9 +85,9 @@
85 interrupt-parent = <&mpic>; 85 interrupt-parent = <&mpic>;
86 dfsrr; 86 dfsrr;
87 87
88 dtt@50 { 88 dtt@48 {
89 compatible = "national,lm75"; 89 compatible = "national,lm75";
90 reg = <0x50>; 90 reg = <0x48>;
91 }; 91 };
92 92
93 rtc@68 { 93 rtc@68 {
diff --git a/arch/powerpc/configs/85xx/tqm8548_defconfig b/arch/powerpc/configs/85xx/tqm8548_defconfig
index 0bc45975911a..43030fea2eee 100644
--- a/arch/powerpc/configs/85xx/tqm8548_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8548_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29-rc2 3# Linux kernel version: 2.6.29-rc7
4# Mon Jan 26 15:36:20 2009 4# Mon Mar 16 09:03:28 2009
5# 5#
6# CONFIG_PPC64 is not set 6# CONFIG_PPC64 is not set
7 7
@@ -22,6 +22,7 @@ CONFIG_FSL_EMB_PERFMON=y
22# CONFIG_PHYS_64BIT is not set 22# CONFIG_PHYS_64BIT is not set
23CONFIG_SPE=y 23CONFIG_SPE=y
24CONFIG_PPC_MMU_NOHASH=y 24CONFIG_PPC_MMU_NOHASH=y
25CONFIG_PPC_BOOK3E_MMU=y
25# CONFIG_PPC_MM_SLICES is not set 26# CONFIG_PPC_MM_SLICES is not set
26# CONFIG_SMP is not set 27# CONFIG_SMP is not set
27CONFIG_PPC32=y 28CONFIG_PPC32=y
@@ -75,6 +76,15 @@ CONFIG_SYSVIPC_SYSCTL=y
75# CONFIG_BSD_PROCESS_ACCT is not set 76# CONFIG_BSD_PROCESS_ACCT is not set
76# CONFIG_TASKSTATS is not set 77# CONFIG_TASKSTATS is not set
77# CONFIG_AUDIT is not set 78# CONFIG_AUDIT is not set
79
80#
81# RCU Subsystem
82#
83CONFIG_CLASSIC_RCU=y
84# CONFIG_TREE_RCU is not set
85# CONFIG_PREEMPT_RCU is not set
86# CONFIG_TREE_RCU_TRACE is not set
87# CONFIG_PREEMPT_RCU_TRACE is not set
78# CONFIG_IKCONFIG is not set 88# CONFIG_IKCONFIG is not set
79CONFIG_LOG_BUF_SHIFT=14 89CONFIG_LOG_BUF_SHIFT=14
80CONFIG_GROUP_SCHED=y 90CONFIG_GROUP_SCHED=y
@@ -152,11 +162,6 @@ CONFIG_DEFAULT_AS=y
152# CONFIG_DEFAULT_CFQ is not set 162# CONFIG_DEFAULT_CFQ is not set
153# CONFIG_DEFAULT_NOOP is not set 163# CONFIG_DEFAULT_NOOP is not set
154CONFIG_DEFAULT_IOSCHED="anticipatory" 164CONFIG_DEFAULT_IOSCHED="anticipatory"
155CONFIG_CLASSIC_RCU=y
156# CONFIG_TREE_RCU is not set
157# CONFIG_PREEMPT_RCU is not set
158# CONFIG_TREE_RCU_TRACE is not set
159# CONFIG_PREEMPT_RCU_TRACE is not set
160# CONFIG_FREEZER is not set 165# CONFIG_FREEZER is not set
161 166
162# 167#
@@ -202,7 +207,7 @@ CONFIG_MPIC=y
202# 207#
203# Kernel options 208# Kernel options
204# 209#
205# CONFIG_HIGHMEM is not set 210CONFIG_HIGHMEM=y
206CONFIG_TICK_ONESHOT=y 211CONFIG_TICK_ONESHOT=y
207CONFIG_NO_HZ=y 212CONFIG_NO_HZ=y
208CONFIG_HIGH_RES_TIMERS=y 213CONFIG_HIGH_RES_TIMERS=y
@@ -244,6 +249,7 @@ CONFIG_UNEVICTABLE_LRU=y
244CONFIG_PPC_4K_PAGES=y 249CONFIG_PPC_4K_PAGES=y
245# CONFIG_PPC_16K_PAGES is not set 250# CONFIG_PPC_16K_PAGES is not set
246# CONFIG_PPC_64K_PAGES is not set 251# CONFIG_PPC_64K_PAGES is not set
252# CONFIG_PPC_256K_PAGES is not set
247CONFIG_FORCE_MAX_ZONEORDER=11 253CONFIG_FORCE_MAX_ZONEORDER=11
248CONFIG_PROC_DEVICETREE=y 254CONFIG_PROC_DEVICETREE=y
249# CONFIG_CMDLINE_BOOL is not set 255# CONFIG_CMDLINE_BOOL is not set
@@ -259,6 +265,7 @@ CONFIG_ZONE_DMA=y
259CONFIG_PPC_INDIRECT_PCI=y 265CONFIG_PPC_INDIRECT_PCI=y
260CONFIG_FSL_SOC=y 266CONFIG_FSL_SOC=y
261CONFIG_FSL_PCI=y 267CONFIG_FSL_PCI=y
268CONFIG_FSL_LBC=y
262CONFIG_PPC_PCI_CHOICE=y 269CONFIG_PPC_PCI_CHOICE=y
263CONFIG_PCI=y 270CONFIG_PCI=y
264CONFIG_PCI_DOMAINS=y 271CONFIG_PCI_DOMAINS=y
@@ -284,10 +291,11 @@ CONFIG_ARCH_SUPPORTS_MSI=y
284# Default settings for advanced configuration options are used 291# Default settings for advanced configuration options are used
285# 292#
286CONFIG_LOWMEM_SIZE=0x30000000 293CONFIG_LOWMEM_SIZE=0x30000000
294CONFIG_LOWMEM_CAM_NUM=3
287CONFIG_PAGE_OFFSET=0xc0000000 295CONFIG_PAGE_OFFSET=0xc0000000
288CONFIG_KERNEL_START=0xc0000000 296CONFIG_KERNEL_START=0xc0000000
289CONFIG_PHYSICAL_START=0x00000000 297CONFIG_PHYSICAL_START=0x00000000
290CONFIG_PHYSICAL_ALIGN=0x10000000 298CONFIG_PHYSICAL_ALIGN=0x04000000
291CONFIG_TASK_SIZE=0xc0000000 299CONFIG_TASK_SIZE=0xc0000000
292CONFIG_NET=y 300CONFIG_NET=y
293 301
@@ -363,12 +371,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
363# CONFIG_BT is not set 371# CONFIG_BT is not set
364# CONFIG_AF_RXRPC is not set 372# CONFIG_AF_RXRPC is not set
365# CONFIG_PHONET is not set 373# CONFIG_PHONET is not set
366CONFIG_WIRELESS=y 374# CONFIG_WIRELESS is not set
367# CONFIG_CFG80211 is not set
368CONFIG_WIRELESS_OLD_REGULATORY=y
369# CONFIG_WIRELESS_EXT is not set
370# CONFIG_LIB80211 is not set
371# CONFIG_MAC80211 is not set
372# CONFIG_WIMAX is not set 375# CONFIG_WIMAX is not set
373# CONFIG_RFKILL is not set 376# CONFIG_RFKILL is not set
374# CONFIG_NET_9P is not set 377# CONFIG_NET_9P is not set
@@ -471,27 +474,18 @@ CONFIG_MTD_NAND_IDS=y
471# CONFIG_MTD_NAND_NANDSIM is not set 474# CONFIG_MTD_NAND_NANDSIM is not set
472# CONFIG_MTD_NAND_PLATFORM is not set 475# CONFIG_MTD_NAND_PLATFORM is not set
473# CONFIG_MTD_NAND_FSL_ELBC is not set 476# CONFIG_MTD_NAND_FSL_ELBC is not set
474# CONFIG_MTD_NAND_FSL_UPM is not set 477CONFIG_MTD_NAND_FSL_UPM=y
475# CONFIG_MTD_ONENAND is not set 478# CONFIG_MTD_ONENAND is not set
476 479
477# 480#
478# LPDDR flash memory drivers 481# LPDDR flash memory drivers
479# 482#
480# CONFIG_MTD_LPDDR is not set 483# CONFIG_MTD_LPDDR is not set
481# CONFIG_MTD_QINFO_PROBE is not set
482 484
483# 485#
484# UBI - Unsorted block images 486# UBI - Unsorted block images
485# 487#
486CONFIG_MTD_UBI=m 488# CONFIG_MTD_UBI is not set
487CONFIG_MTD_UBI_WL_THRESHOLD=4096
488CONFIG_MTD_UBI_BEB_RESERVE=1
489# CONFIG_MTD_UBI_GLUEBI is not set
490
491#
492# UBI debugging options
493#
494# CONFIG_MTD_UBI_DEBUG is not set
495CONFIG_OF_DEVICE=y 489CONFIG_OF_DEVICE=y
496CONFIG_OF_I2C=y 490CONFIG_OF_I2C=y
497# CONFIG_PARPORT is not set 491# CONFIG_PARPORT is not set
@@ -515,69 +509,21 @@ CONFIG_BLK_DEV_RAM_SIZE=32768
515# CONFIG_BLK_DEV_HD is not set 509# CONFIG_BLK_DEV_HD is not set
516CONFIG_MISC_DEVICES=y 510CONFIG_MISC_DEVICES=y
517# CONFIG_PHANTOM is not set 511# CONFIG_PHANTOM is not set
518# CONFIG_EEPROM_93CX6 is not set
519# CONFIG_SGI_IOC4 is not set 512# CONFIG_SGI_IOC4 is not set
520# CONFIG_TIFM_CORE is not set 513# CONFIG_TIFM_CORE is not set
521# CONFIG_ICS932S401 is not set 514# CONFIG_ICS932S401 is not set
522# CONFIG_ENCLOSURE_SERVICES is not set 515# CONFIG_ENCLOSURE_SERVICES is not set
523# CONFIG_HP_ILO is not set 516# CONFIG_HP_ILO is not set
524# CONFIG_C2PORT is not set 517# CONFIG_C2PORT is not set
518
519#
520# EEPROM support
521#
522# CONFIG_EEPROM_AT24 is not set
523# CONFIG_EEPROM_LEGACY is not set
524# CONFIG_EEPROM_93CX6 is not set
525CONFIG_HAVE_IDE=y 525CONFIG_HAVE_IDE=y
526CONFIG_IDE=y 526# CONFIG_IDE is not set
527
528#
529# Please see Documentation/ide/ide.txt for help/info on IDE drives
530#
531CONFIG_IDE_TIMINGS=y
532# CONFIG_BLK_DEV_IDE_SATA is not set
533CONFIG_IDE_GD=y
534CONFIG_IDE_GD_ATA=y
535# CONFIG_IDE_GD_ATAPI is not set
536# CONFIG_BLK_DEV_IDECD is not set
537# CONFIG_BLK_DEV_IDETAPE is not set
538# CONFIG_IDE_TASK_IOCTL is not set
539CONFIG_IDE_PROC_FS=y
540
541#
542# IDE chipset support/bugfixes
543#
544# CONFIG_BLK_DEV_PLATFORM is not set
545CONFIG_BLK_DEV_IDEDMA_SFF=y
546
547#
548# PCI IDE chipsets support
549#
550CONFIG_BLK_DEV_IDEPCI=y
551CONFIG_IDEPCI_PCIBUS_ORDER=y
552# CONFIG_BLK_DEV_OFFBOARD is not set
553CONFIG_BLK_DEV_GENERIC=y
554# CONFIG_BLK_DEV_OPTI621 is not set
555CONFIG_BLK_DEV_IDEDMA_PCI=y
556# CONFIG_BLK_DEV_AEC62XX is not set
557# CONFIG_BLK_DEV_ALI15X3 is not set
558# CONFIG_BLK_DEV_AMD74XX is not set
559# CONFIG_BLK_DEV_CMD64X is not set
560# CONFIG_BLK_DEV_TRIFLEX is not set
561# CONFIG_BLK_DEV_CS5520 is not set
562# CONFIG_BLK_DEV_CS5530 is not set
563# CONFIG_BLK_DEV_HPT366 is not set
564# CONFIG_BLK_DEV_JMICRON is not set
565# CONFIG_BLK_DEV_SC1200 is not set
566# CONFIG_BLK_DEV_PIIX is not set
567# CONFIG_BLK_DEV_IT8172 is not set
568# CONFIG_BLK_DEV_IT8213 is not set
569# CONFIG_BLK_DEV_IT821X is not set
570# CONFIG_BLK_DEV_NS87415 is not set
571# CONFIG_BLK_DEV_PDC202XX_OLD is not set
572# CONFIG_BLK_DEV_PDC202XX_NEW is not set
573# CONFIG_BLK_DEV_SVWKS is not set
574# CONFIG_BLK_DEV_SIIMAGE is not set
575# CONFIG_BLK_DEV_SL82C105 is not set
576# CONFIG_BLK_DEV_SLC90E66 is not set
577# CONFIG_BLK_DEV_TRM290 is not set
578CONFIG_BLK_DEV_VIA82CXXX=y
579# CONFIG_BLK_DEV_TC86C001 is not set
580CONFIG_BLK_DEV_IDEDMA=y
581 527
582# 528#
583# SCSI device support 529# SCSI device support
@@ -650,7 +596,7 @@ CONFIG_MII=y
650CONFIG_NETDEV_1000=y 596CONFIG_NETDEV_1000=y
651# CONFIG_ACENIC is not set 597# CONFIG_ACENIC is not set
652# CONFIG_DL2K is not set 598# CONFIG_DL2K is not set
653CONFIG_E1000=y 599# CONFIG_E1000 is not set
654# CONFIG_E1000E is not set 600# CONFIG_E1000E is not set
655# CONFIG_IP1000 is not set 601# CONFIG_IP1000 is not set
656# CONFIG_IGB is not set 602# CONFIG_IGB is not set
@@ -668,6 +614,7 @@ CONFIG_GIANFAR=y
668# CONFIG_QLA3XXX is not set 614# CONFIG_QLA3XXX is not set
669# CONFIG_ATL1 is not set 615# CONFIG_ATL1 is not set
670# CONFIG_ATL1E is not set 616# CONFIG_ATL1E is not set
617# CONFIG_ATL1C is not set
671# CONFIG_JME is not set 618# CONFIG_JME is not set
672CONFIG_NETDEV_10000=y 619CONFIG_NETDEV_10000=y
673# CONFIG_CHELSIO_T1 is not set 620# CONFIG_CHELSIO_T1 is not set
@@ -835,8 +782,6 @@ CONFIG_I2C_MPC=y
835# Miscellaneous I2C Chip support 782# Miscellaneous I2C Chip support
836# 783#
837# CONFIG_DS1682 is not set 784# CONFIG_DS1682 is not set
838# CONFIG_EEPROM_AT24 is not set
839# CONFIG_EEPROM_LEGACY is not set
840# CONFIG_SENSORS_PCF8574 is not set 785# CONFIG_SENSORS_PCF8574 is not set
841# CONFIG_PCF8575 is not set 786# CONFIG_PCF8575 is not set
842# CONFIG_SENSORS_PCA9539 is not set 787# CONFIG_SENSORS_PCA9539 is not set
@@ -975,26 +920,7 @@ CONFIG_HID=y
975# Special HID drivers 920# Special HID drivers
976# 921#
977CONFIG_HID_COMPAT=y 922CONFIG_HID_COMPAT=y
978CONFIG_USB_SUPPORT=y 923# CONFIG_USB_SUPPORT is not set
979CONFIG_USB_ARCH_HAS_HCD=y
980CONFIG_USB_ARCH_HAS_OHCI=y
981CONFIG_USB_ARCH_HAS_EHCI=y
982# CONFIG_USB is not set
983# CONFIG_USB_OTG_WHITELIST is not set
984# CONFIG_USB_OTG_BLACKLIST_HUB is not set
985
986#
987# Enable Host or Gadget support to see Inventra options
988#
989
990#
991# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
992#
993# CONFIG_USB_GADGET is not set
994
995#
996# OTG and related infrastructure
997#
998# CONFIG_UWB is not set 924# CONFIG_UWB is not set
999# CONFIG_MMC is not set 925# CONFIG_MMC is not set
1000# CONFIG_MEMSTICK is not set 926# CONFIG_MEMSTICK is not set
@@ -1064,16 +990,9 @@ CONFIG_RTC_DRV_DS1307=y
1064# 990#
1065# File systems 991# File systems
1066# 992#
1067CONFIG_EXT2_FS=y 993# CONFIG_EXT2_FS is not set
1068# CONFIG_EXT2_FS_XATTR is not set 994# CONFIG_EXT3_FS is not set
1069# CONFIG_EXT2_FS_XIP is not set
1070CONFIG_EXT3_FS=y
1071CONFIG_EXT3_FS_XATTR=y
1072# CONFIG_EXT3_FS_POSIX_ACL is not set
1073# CONFIG_EXT3_FS_SECURITY is not set
1074# CONFIG_EXT4_FS is not set 995# CONFIG_EXT4_FS is not set
1075CONFIG_JBD=y
1076CONFIG_FS_MBCACHE=y
1077# CONFIG_REISERFS_FS is not set 996# CONFIG_REISERFS_FS is not set
1078# CONFIG_JFS_FS is not set 997# CONFIG_JFS_FS is not set
1079# CONFIG_FS_POSIX_ACL is not set 998# CONFIG_FS_POSIX_ACL is not set
@@ -1122,8 +1041,17 @@ CONFIG_MISC_FILESYSTEMS=y
1122# CONFIG_BEFS_FS is not set 1041# CONFIG_BEFS_FS is not set
1123# CONFIG_BFS_FS is not set 1042# CONFIG_BFS_FS is not set
1124# CONFIG_EFS_FS is not set 1043# CONFIG_EFS_FS is not set
1125# CONFIG_JFFS2_FS is not set 1044CONFIG_JFFS2_FS=y
1126# CONFIG_UBIFS_FS is not set 1045CONFIG_JFFS2_FS_DEBUG=0
1046CONFIG_JFFS2_FS_WRITEBUFFER=y
1047# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
1048# CONFIG_JFFS2_SUMMARY is not set
1049# CONFIG_JFFS2_FS_XATTR is not set
1050# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
1051CONFIG_JFFS2_ZLIB=y
1052# CONFIG_JFFS2_LZO is not set
1053CONFIG_JFFS2_RTIME=y
1054# CONFIG_JFFS2_RUBIN is not set
1127# CONFIG_CRAMFS is not set 1055# CONFIG_CRAMFS is not set
1128# CONFIG_SQUASHFS is not set 1056# CONFIG_SQUASHFS is not set
1129# CONFIG_VXFS_FS is not set 1057# CONFIG_VXFS_FS is not set
@@ -1184,6 +1112,8 @@ CONFIG_GENERIC_FIND_LAST_BIT=y
1184CONFIG_CRC32=y 1112CONFIG_CRC32=y
1185# CONFIG_CRC7 is not set 1113# CONFIG_CRC7 is not set
1186# CONFIG_LIBCRC32C is not set 1114# CONFIG_LIBCRC32C is not set
1115CONFIG_ZLIB_INFLATE=y
1116CONFIG_ZLIB_DEFLATE=y
1187CONFIG_PLIST=y 1117CONFIG_PLIST=y
1188CONFIG_HAS_IOMEM=y 1118CONFIG_HAS_IOMEM=y
1189CONFIG_HAS_IOPORT=y 1119CONFIG_HAS_IOPORT=y
@@ -1219,6 +1149,7 @@ CONFIG_DEBUG_MUTEXES=y
1219# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1149# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1220# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1150# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1221# CONFIG_DEBUG_KOBJECT is not set 1151# CONFIG_DEBUG_KOBJECT is not set
1152# CONFIG_DEBUG_HIGHMEM is not set
1222# CONFIG_DEBUG_BUGVERBOSE is not set 1153# CONFIG_DEBUG_BUGVERBOSE is not set
1223# CONFIG_DEBUG_INFO is not set 1154# CONFIG_DEBUG_INFO is not set
1224# CONFIG_DEBUG_VM is not set 1155# CONFIG_DEBUG_VM is not set
@@ -1236,6 +1167,7 @@ CONFIG_DEBUG_MUTEXES=y
1236# CONFIG_LATENCYTOP is not set 1167# CONFIG_LATENCYTOP is not set
1237CONFIG_SYSCTL_SYSCALL_CHECK=y 1168CONFIG_SYSCTL_SYSCALL_CHECK=y
1238CONFIG_HAVE_FUNCTION_TRACER=y 1169CONFIG_HAVE_FUNCTION_TRACER=y
1170CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1239CONFIG_HAVE_DYNAMIC_FTRACE=y 1171CONFIG_HAVE_DYNAMIC_FTRACE=y
1240CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y 1172CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
1241 1173
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 6d406c5c5de4..9696cc36d2dc 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -27,7 +27,7 @@
27 PPC_LONG "1b,4b,2b,4b\n" \ 27 PPC_LONG "1b,4b,2b,4b\n" \
28 ".previous" \ 28 ".previous" \
29 : "=&r" (oldval), "=&r" (ret) \ 29 : "=&r" (oldval), "=&r" (ret) \
30 : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \ 30 : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
31 : "cr0", "memory") 31 : "cr0", "memory")
32 32
33static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) 33static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
@@ -47,19 +47,19 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
47 47
48 switch (op) { 48 switch (op) {
49 case FUTEX_OP_SET: 49 case FUTEX_OP_SET:
50 __futex_atomic_op("", ret, oldval, uaddr, oparg); 50 __futex_atomic_op("mr %1,%4\n", ret, oldval, uaddr, oparg);
51 break; 51 break;
52 case FUTEX_OP_ADD: 52 case FUTEX_OP_ADD:
53 __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg); 53 __futex_atomic_op("add %1,%0,%4\n", ret, oldval, uaddr, oparg);
54 break; 54 break;
55 case FUTEX_OP_OR: 55 case FUTEX_OP_OR:
56 __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg); 56 __futex_atomic_op("or %1,%0,%4\n", ret, oldval, uaddr, oparg);
57 break; 57 break;
58 case FUTEX_OP_ANDN: 58 case FUTEX_OP_ANDN:
59 __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg); 59 __futex_atomic_op("andc %1,%0,%4\n", ret, oldval, uaddr, oparg);
60 break; 60 break;
61 case FUTEX_OP_XOR: 61 case FUTEX_OP_XOR:
62 __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg); 62 __futex_atomic_op("xor %1,%0,%4\n", ret, oldval, uaddr, oparg);
63 break; 63 break;
64 default: 64 default:
65 ret = -ENOSYS; 65 ret = -ENOSYS;
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index cbf154387091..86d2366ab6a1 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -52,6 +52,12 @@
52 */ 52 */
53#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000) 53#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000)
54 54
55/* This indicates that the processor uses the wrong opcode for tlbilx
56 * instructions. During the ISA 2.06 development the opcode for tlbilx
57 * changed and some early implementations used to old opcode
58 */
59#define MMU_FTR_TLBILX_EARLY_OPCODE ASM_CONST(0x00400000)
60
55#ifndef __ASSEMBLY__ 61#ifndef __ASSEMBLY__
56#include <asm/cputable.h> 62#include <asm/cputable.h>
57 63
diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h
index 414c50e2e881..94942d60ddfd 100644
--- a/arch/powerpc/include/asm/parport.h
+++ b/arch/powerpc/include/asm/parport.h
@@ -29,7 +29,7 @@ static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
29 prop = of_get_property(np, "interrupts", NULL); 29 prop = of_get_property(np, "interrupts", NULL);
30 if (!prop) 30 if (!prop)
31 continue; 31 continue;
32 if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL) != NULL) 32 if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL, 0) != NULL)
33 count++; 33 count++;
34 } 34 }
35 return count; 35 return count;
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index f4a4db8d5555..ef4da37f3c10 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -43,7 +43,8 @@
43 43
44#define PPC_INST_STSWI 0x7c0005aa 44#define PPC_INST_STSWI 0x7c0005aa
45#define PPC_INST_STSWX 0x7c00052a 45#define PPC_INST_STSWX 0x7c00052a
46#define PPC_INST_TLBILX 0x7c000626 46#define PPC_INST_TLBILX 0x7c000024
47#define PPC_INST_TLBILX_EARLY 0x7c000626
47#define PPC_INST_WAIT 0x7c00007c 48#define PPC_INST_WAIT 0x7c00007c
48 49
49/* macros to insert fields into opcodes */ 50/* macros to insert fields into opcodes */
@@ -63,10 +64,18 @@
63#define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI) 64#define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI)
64#define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI) 65#define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI)
65#define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_INST_TLBILX | \ 66#define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_INST_TLBILX | \
66 __PPC_T_TLB(t) | __PPC_RA(a) | __PPC_RB(b)) 67 __PPC_T_TLB(t) | \
68 __PPC_RA(a) | __PPC_RB(b))
67#define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b) 69#define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b)
68#define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b) 70#define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b)
69#define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b) 71#define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b)
72
73#define PPC_TLBILX_EARLY(t, a, b) stringify_in_c(.long PPC_INST_TLBILX_EARLY | \
74 __PPC_T_TLB(t) | \
75 __PPC_RA(a) | __PPC_RB(b))
76#define PPC_TLBILX_ALL_EARLY(a, b) PPC_TLBILX_EARLY(0, a, b)
77#define PPC_TLBILX_PID_EARLY(a, b) PPC_TLBILX_EARLY(1, a, b)
78#define PPC_TLBILX_VA_EARLY(a, b) PPC_TLBILX_EARLY(3, a, b)
70#define PPC_WAIT(w) stringify_in_c(.long PPC_INST_WAIT | \ 79#define PPC_WAIT(w) stringify_in_c(.long PPC_INST_WAIT | \
71 __PPC_WC(w)) 80 __PPC_WC(w))
72 81
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index cd1b687544f3..57db50f40289 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1766,7 +1766,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1766 .cpu_features = CPU_FTRS_E500MC, 1766 .cpu_features = CPU_FTRS_E500MC,
1767 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1767 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1768 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | 1768 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
1769 MMU_FTR_USE_TLBILX, 1769 MMU_FTR_USE_TLBILX | MMU_FTR_TLBILX_EARLY_OPCODE,
1770 .icache_bsize = 64, 1770 .icache_bsize = 64,
1771 .dcache_bsize = 64, 1771 .dcache_bsize = 64,
1772 .num_pmcs = 4, 1772 .num_pmcs = 4,
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 7af72970faed..ad2eb4d34dd4 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -125,7 +125,6 @@ static void do_flush_tlb_page_ipi(void *param)
125 125
126void flush_tlb_mm(struct mm_struct *mm) 126void flush_tlb_mm(struct mm_struct *mm)
127{ 127{
128 cpumask_t cpu_mask;
129 unsigned int pid; 128 unsigned int pid;
130 129
131 preempt_disable(); 130 preempt_disable();
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index 788b87c36f77..45fed3698349 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -138,7 +138,11 @@ BEGIN_MMU_FTR_SECTION
138 andi. r3,r3,MMUCSR0_TLBFI@l 138 andi. r3,r3,MMUCSR0_TLBFI@l
139 bne 1b 139 bne 1b
140MMU_FTR_SECTION_ELSE 140MMU_FTR_SECTION_ELSE
141 PPC_TLBILX_ALL(0,0) 141 BEGIN_MMU_FTR_SECTION_NESTED(96)
142 PPC_TLBILX_ALL(0,r3)
143 MMU_FTR_SECTION_ELSE_NESTED(96)
144 PPC_TLBILX_ALL_EARLY(0,r3)
145 ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_TLBILX_EARLY_OPCODE, 96)
142ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX) 146ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
143 msync 147 msync
144 isync 148 isync
@@ -151,7 +155,11 @@ BEGIN_MMU_FTR_SECTION
151 wrteei 0 155 wrteei 0
152 mfspr r4,SPRN_MAS6 /* save MAS6 */ 156 mfspr r4,SPRN_MAS6 /* save MAS6 */
153 mtspr SPRN_MAS6,r3 157 mtspr SPRN_MAS6,r3
158 BEGIN_MMU_FTR_SECTION_NESTED(96)
154 PPC_TLBILX_PID(0,0) 159 PPC_TLBILX_PID(0,0)
160 MMU_FTR_SECTION_ELSE_NESTED(96)
161 PPC_TLBILX_PID_EARLY(0,0)
162 ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_TLBILX_EARLY_OPCODE, 96)
155 mtspr SPRN_MAS6,r4 /* restore MAS6 */ 163 mtspr SPRN_MAS6,r4 /* restore MAS6 */
156 wrtee r10 164 wrtee r10
157MMU_FTR_SECTION_ELSE 165MMU_FTR_SECTION_ELSE
@@ -185,7 +193,11 @@ BEGIN_MMU_FTR_SECTION
185 mtspr SPRN_MAS1,r4 193 mtspr SPRN_MAS1,r4
186 tlbwe 194 tlbwe
187MMU_FTR_SECTION_ELSE 195MMU_FTR_SECTION_ELSE
196 BEGIN_MMU_FTR_SECTION_NESTED(96)
188 PPC_TLBILX_VA(0,r3) 197 PPC_TLBILX_VA(0,r3)
198 MMU_FTR_SECTION_ELSE_NESTED(96)
199 PPC_TLBILX_VA_EARLY(0,r3)
200 ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_TLBILX_EARLY_OPCODE, 96)
189ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX) 201ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
190 msync 202 msync
191 isync 203 isync
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index fafcaa0e81ef..ab69925d579b 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -25,6 +25,7 @@
25#include <asm/smp.h> 25#include <asm/smp.h>
26#include <asm/system.h> 26#include <asm/system.h>
27#include <asm/uaccess.h> 27#include <asm/uaccess.h>
28#include <asm/firmware.h>
28 29
29#include "plpar_wrappers.h" 30#include "plpar_wrappers.h"
30 31
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 380420f8c400..9a2a6e32f00f 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -182,6 +182,8 @@ static void eeh_report_reset(struct pci_dev *dev, void *userdata)
182 if (!driver) 182 if (!driver)
183 return; 183 return;
184 184
185 dev->error_state = pci_channel_io_normal;
186
185 eeh_enable_irq(dev); 187 eeh_enable_irq(dev);
186 188
187 if (!driver->err_handler || 189 if (!driver->err_handler ||
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5e4babecf934..e7390dd0283d 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -14,6 +14,7 @@ config SUPERH
14 select HAVE_GENERIC_DMA_COHERENT 14 select HAVE_GENERIC_DMA_COHERENT
15 select HAVE_IOREMAP_PROT if MMU 15 select HAVE_IOREMAP_PROT if MMU
16 select HAVE_ARCH_TRACEHOOK 16 select HAVE_ARCH_TRACEHOOK
17 select HAVE_DMA_API_DEBUG
17 help 18 help
18 The SuperH is a RISC processor targeted for use in embedded systems 19 The SuperH is a RISC processor targeted for use in embedded systems
19 and consumer electronics; it was also used in the Sega Dreamcast 20 and consumer electronics; it was also used in the Sega Dreamcast
@@ -21,7 +22,7 @@ config SUPERH
21 <http://www.linux-sh.org/>. 22 <http://www.linux-sh.org/>.
22 23
23config SUPERH32 24config SUPERH32
24 def_bool !SUPERH64 25 def_bool ARCH = "sh"
25 select HAVE_KPROBES 26 select HAVE_KPROBES
26 select HAVE_KRETPROBES 27 select HAVE_KRETPROBES
27 select HAVE_FUNCTION_TRACER 28 select HAVE_FUNCTION_TRACER
@@ -31,7 +32,7 @@ config SUPERH32
31 select ARCH_HIBERNATION_POSSIBLE if MMU 32 select ARCH_HIBERNATION_POSSIBLE if MMU
32 33
33config SUPERH64 34config SUPERH64
34 def_bool y if CPU_SH5 35 def_bool ARCH = "sh64"
35 36
36config ARCH_DEFCONFIG 37config ARCH_DEFCONFIG
37 string 38 string
@@ -187,6 +188,8 @@ config ARCH_SHMOBILE
187 bool 188 bool
188 select ARCH_SUSPEND_POSSIBLE 189 select ARCH_SUSPEND_POSSIBLE
189 190
191if SUPERH32
192
190choice 193choice
191 prompt "Processor sub-type selection" 194 prompt "Processor sub-type selection"
192 195
@@ -408,6 +411,15 @@ config CPU_SUBTYPE_SH7366
408 select SYS_SUPPORTS_NUMA 411 select SYS_SUPPORTS_NUMA
409 select SYS_SUPPORTS_CMT 412 select SYS_SUPPORTS_CMT
410 413
414endchoice
415
416endif
417
418if SUPERH64
419
420choice
421 prompt "Processor sub-type selection"
422
411# SH-5 Processor Support 423# SH-5 Processor Support
412 424
413config CPU_SUBTYPE_SH5_101 425config CPU_SUBTYPE_SH5_101
@@ -420,6 +432,8 @@ config CPU_SUBTYPE_SH5_103
420 432
421endchoice 433endchoice
422 434
435endif
436
423source "arch/sh/mm/Kconfig" 437source "arch/sh/mm/Kconfig"
424 438
425source "arch/sh/Kconfig.cpu" 439source "arch/sh/Kconfig.cpu"
diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c
index 912458f666eb..39e46919df14 100644
--- a/arch/sh/boards/board-ap325rxa.c
+++ b/arch/sh/boards/board-ap325rxa.c
@@ -349,6 +349,7 @@ static int ov7725_power(struct device *dev, int mode)
349static struct ov772x_camera_info ov7725_info = { 349static struct ov772x_camera_info ov7725_info = {
350 .buswidth = SOCAM_DATAWIDTH_8, 350 .buswidth = SOCAM_DATAWIDTH_8,
351 .flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP, 351 .flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP,
352 .edgectrl = OV772X_AUTO_EDGECTRL(0xf, 0),
352 .link = { 353 .link = {
353 .power = ov7725_power, 354 .power = ov7725_power,
354 }, 355 },
diff --git a/arch/sh/boards/board-urquell.c b/arch/sh/boards/board-urquell.c
index 8367d1d789c3..beb88c4da2c1 100644
--- a/arch/sh/boards/board-urquell.c
+++ b/arch/sh/boards/board-urquell.c
@@ -2,6 +2,8 @@
2 * Renesas Technology Corp. SH7786 Urquell Support. 2 * Renesas Technology Corp. SH7786 Urquell Support.
3 * 3 *
4 * Copyright (C) 2008 Kuninori Morimoto <morimoto.kuninori@renesas.com> 4 * Copyright (C) 2008 Kuninori Morimoto <morimoto.kuninori@renesas.com>
5 *
6 * Based on board-sh7785lcr.c
5 * Copyright (C) 2008 Yoshihiro Shimoda 7 * Copyright (C) 2008 Yoshihiro Shimoda
6 * 8 *
7 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
@@ -21,6 +23,32 @@
21#include <asm/heartbeat.h> 23#include <asm/heartbeat.h>
22#include <asm/sizes.h> 24#include <asm/sizes.h>
23 25
26/*
27 * bit 1234 5678
28 *----------------------------
29 * SW1 0101 0010 -> Pck 33MHz version
30 * (1101 0010) Pck 66MHz version
31 * SW2 0x1x xxxx -> little endian
32 * 29bit mode
33 * SW47 0001 1000 -> CS0 : on-board flash
34 * CS1 : SRAM, registers, LAN, PCMCIA
35 * 38400 bps for SCIF1
36 *
37 * Address
38 * 0x00000000 - 0x04000000 (CS0) Nor Flash
39 * 0x04000000 - 0x04200000 (CS1) SRAM
40 * 0x05000000 - 0x05800000 (CS1) on board register
41 * 0x05800000 - 0x06000000 (CS1) LAN91C111
42 * 0x06000000 - 0x06400000 (CS1) PCMCIA
43 * 0x08000000 - 0x10000000 (CS2-CS3) DDR3
44 * 0x10000000 - 0x14000000 (CS4) PCIe
45 * 0x14000000 - 0x14800000 (CS5) Core0 LRAM/URAM
46 * 0x14800000 - 0x15000000 (CS5) Core1 LRAM/URAM
47 * 0x18000000 - 0x1C000000 (CS6) ATA/NAND-Flash
48 * 0x1C000000 - (CS7) SH7786 Control register
49 */
50
51/* HeartBeat */
24static struct resource heartbeat_resources[] = { 52static struct resource heartbeat_resources[] = {
25 [0] = { 53 [0] = {
26 .start = BOARDREG(SLEDR), 54 .start = BOARDREG(SLEDR),
@@ -43,6 +71,7 @@ static struct platform_device heartbeat_device = {
43 .resource = heartbeat_resources, 71 .resource = heartbeat_resources,
44}; 72};
45 73
74/* LAN91C111 */
46static struct smc91x_platdata smc91x_info = { 75static struct smc91x_platdata smc91x_info = {
47 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, 76 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
48}; 77};
@@ -69,6 +98,7 @@ static struct platform_device smc91x_eth_device = {
69 }, 98 },
70}; 99};
71 100
101/* Nor Flash */
72static struct mtd_partition nor_flash_partitions[] = { 102static struct mtd_partition nor_flash_partitions[] = {
73 { 103 {
74 .name = "loader", 104 .name = "loader",
diff --git a/arch/sh/drivers/pci/ops-sh7785lcr.c b/arch/sh/drivers/pci/ops-sh7785lcr.c
index e8b7446a7c2b..fb0869f0bef8 100644
--- a/arch/sh/drivers/pci/ops-sh7785lcr.c
+++ b/arch/sh/drivers/pci/ops-sh7785lcr.c
@@ -48,8 +48,13 @@ EXPORT_SYMBOL(board_pci_channels);
48 48
49static struct sh4_pci_address_map sh7785_pci_map = { 49static struct sh4_pci_address_map sh7785_pci_map = {
50 .window0 = { 50 .window0 = {
51#if defined(CONFIG_32BIT)
52 .base = SH7780_32BIT_DDR_BASE_ADDR,
53 .size = 0x40000000,
54#else
51 .base = SH7780_CS0_BASE_ADDR, 55 .base = SH7780_CS0_BASE_ADDR,
52 .size = 0x20000000, 56 .size = 0x20000000,
57#endif
53 }, 58 },
54 59
55 .flags = SH4_PCIC_NO_RESET, 60 .flags = SH4_PCIC_NO_RESET,
diff --git a/arch/sh/drivers/pci/pci-sh7780.h b/arch/sh/drivers/pci/pci-sh7780.h
index 97b2c98f05c4..93adc7119b79 100644
--- a/arch/sh/drivers/pci/pci-sh7780.h
+++ b/arch/sh/drivers/pci/pci-sh7780.h
@@ -104,6 +104,8 @@
104#define SH7780_CS5_BASE_ADDR (SH7780_CS4_BASE_ADDR + SH7780_MEM_REGION_SIZE) 104#define SH7780_CS5_BASE_ADDR (SH7780_CS4_BASE_ADDR + SH7780_MEM_REGION_SIZE)
105#define SH7780_CS6_BASE_ADDR (SH7780_CS5_BASE_ADDR + SH7780_MEM_REGION_SIZE) 105#define SH7780_CS6_BASE_ADDR (SH7780_CS5_BASE_ADDR + SH7780_MEM_REGION_SIZE)
106 106
107#define SH7780_32BIT_DDR_BASE_ADDR 0x40000000
108
107struct sh4_pci_address_map; 109struct sh4_pci_address_map;
108 110
109/* arch/sh/drivers/pci/pci-sh7780.c */ 111/* arch/sh/drivers/pci/pci-sh7780.c */
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index e36c7b870861..0d6ac7a1db49 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -19,6 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/dma-debug.h>
22#include <asm/io.h> 23#include <asm/io.h>
23 24
24static int __init pcibios_init(void) 25static int __init pcibios_init(void)
@@ -43,6 +44,8 @@ static int __init pcibios_init(void)
43 44
44 pci_fixup_irqs(pci_common_swizzle, pcibios_map_platform_irq); 45 pci_fixup_irqs(pci_common_swizzle, pcibios_map_platform_irq);
45 46
47 dma_debug_add_bus(&pci_bus_type);
48
46 return 0; 49 return 0;
47} 50}
48subsys_initcall(pcibios_init); 51subsys_initcall(pcibios_init);
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index 627315ecdb52..ea9d4f41c9d2 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/scatterlist.h> 5#include <linux/scatterlist.h>
6#include <linux/dma-debug.h>
6#include <asm/cacheflush.h> 7#include <asm/cacheflush.h>
7#include <asm/io.h> 8#include <asm/io.h>
8#include <asm-generic/dma-coherent.h> 9#include <asm-generic/dma-coherent.h>
@@ -38,16 +39,26 @@ static inline dma_addr_t dma_map_single(struct device *dev,
38 void *ptr, size_t size, 39 void *ptr, size_t size,
39 enum dma_data_direction dir) 40 enum dma_data_direction dir)
40{ 41{
42 dma_addr_t addr = virt_to_phys(ptr);
43
41#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) 44#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
42 if (dev->bus == &pci_bus_type) 45 if (dev->bus == &pci_bus_type)
43 return virt_to_phys(ptr); 46 return addr;
44#endif 47#endif
45 dma_cache_sync(dev, ptr, size, dir); 48 dma_cache_sync(dev, ptr, size, dir);
46 49
47 return virt_to_phys(ptr); 50 debug_dma_map_page(dev, virt_to_page(ptr),
51 (unsigned long)ptr & ~PAGE_MASK, size,
52 dir, addr, true);
53
54 return addr;
48} 55}
49 56
50#define dma_unmap_single(dev, addr, size, dir) do { } while (0) 57static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
58 size_t size, enum dma_data_direction dir)
59{
60 debug_dma_unmap_page(dev, addr, size, dir, true);
61}
51 62
52static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, 63static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
53 int nents, enum dma_data_direction dir) 64 int nents, enum dma_data_direction dir)
@@ -59,12 +70,19 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
59 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); 70 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
60#endif 71#endif
61 sg[i].dma_address = sg_phys(&sg[i]); 72 sg[i].dma_address = sg_phys(&sg[i]);
73 sg[i].dma_length = sg[i].length;
62 } 74 }
63 75
76 debug_dma_map_sg(dev, sg, nents, i, dir);
77
64 return nents; 78 return nents;
65} 79}
66 80
67#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) 81static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
82 int nents, enum dma_data_direction dir)
83{
84 debug_dma_unmap_sg(dev, sg, nents, dir);
85}
68 86
69static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 87static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
70 unsigned long offset, size_t size, 88 unsigned long offset, size_t size,
@@ -111,6 +129,7 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
111 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); 129 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
112#endif 130#endif
113 sg[i].dma_address = sg_phys(&sg[i]); 131 sg[i].dma_address = sg_phys(&sg[i]);
132 sg[i].dma_length = sg[i].length;
114 } 133 }
115} 134}
116 135
@@ -119,6 +138,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev,
119 enum dma_data_direction dir) 138 enum dma_data_direction dir)
120{ 139{
121 dma_sync_single(dev, dma_handle, size, dir); 140 dma_sync_single(dev, dma_handle, size, dir);
141 debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
122} 142}
123 143
124static inline void dma_sync_single_for_device(struct device *dev, 144static inline void dma_sync_single_for_device(struct device *dev,
@@ -127,6 +147,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
127 enum dma_data_direction dir) 147 enum dma_data_direction dir)
128{ 148{
129 dma_sync_single(dev, dma_handle, size, dir); 149 dma_sync_single(dev, dma_handle, size, dir);
150 debug_dma_sync_single_for_device(dev, dma_handle, size, dir);
130} 151}
131 152
132static inline void dma_sync_single_range_for_cpu(struct device *dev, 153static inline void dma_sync_single_range_for_cpu(struct device *dev,
@@ -136,6 +157,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
136 enum dma_data_direction direction) 157 enum dma_data_direction direction)
137{ 158{
138 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); 159 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
160 debug_dma_sync_single_range_for_cpu(dev, dma_handle,
161 offset, size, direction);
139} 162}
140 163
141static inline void dma_sync_single_range_for_device(struct device *dev, 164static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -145,6 +168,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
145 enum dma_data_direction direction) 168 enum dma_data_direction direction)
146{ 169{
147 dma_sync_single_for_device(dev, dma_handle+offset, size, direction); 170 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
171 debug_dma_sync_single_range_for_device(dev, dma_handle,
172 offset, size, direction);
148} 173}
149 174
150 175
@@ -153,6 +178,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev,
153 enum dma_data_direction dir) 178 enum dma_data_direction dir)
154{ 179{
155 dma_sync_sg(dev, sg, nelems, dir); 180 dma_sync_sg(dev, sg, nelems, dir);
181 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
156} 182}
157 183
158static inline void dma_sync_sg_for_device(struct device *dev, 184static inline void dma_sync_sg_for_device(struct device *dev,
@@ -160,9 +186,9 @@ static inline void dma_sync_sg_for_device(struct device *dev,
160 enum dma_data_direction dir) 186 enum dma_data_direction dir)
161{ 187{
162 dma_sync_sg(dev, sg, nelems, dir); 188 dma_sync_sg(dev, sg, nelems, dir);
189 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
163} 190}
164 191
165
166static inline int dma_get_cache_alignment(void) 192static inline int dma_get_cache_alignment(void)
167{ 193{
168 /* 194 /*
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h
index 2084d0373693..c693d268a413 100644
--- a/arch/sh/include/asm/scatterlist.h
+++ b/arch/sh/include/asm/scatterlist.h
@@ -5,12 +5,13 @@
5 5
6struct scatterlist { 6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic; 8 unsigned long sg_magic;
9#endif 9#endif
10 unsigned long page_link; 10 unsigned long page_link;
11 unsigned int offset;/* for highmem, page offset */ 11 unsigned int offset; /* for highmem, page offset */
12 dma_addr_t dma_address; 12 unsigned int length;
13 unsigned int length; 13 dma_addr_t dma_address;
14 unsigned int dma_length;
14}; 15};
15 16
16#define ISA_DMA_THRESHOLD PHYS_ADDR_MASK 17#define ISA_DMA_THRESHOLD PHYS_ADDR_MASK
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index a3f239545897..8489a0905a87 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -37,8 +37,11 @@
37#define pcibus_to_node(bus) ((void)(bus), -1) 37#define pcibus_to_node(bus) ((void)(bus), -1)
38#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \ 38#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
39 CPU_MASK_ALL : \ 39 CPU_MASK_ALL : \
40 node_to_cpumask(pcibus_to_node(bus)) \ 40 node_to_cpumask(pcibus_to_node(bus)))
41 ) 41#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
42 CPU_MASK_ALL_PTR : \
43 cpumask_of_node(pcibus_to_node(bus)))
44
42#endif 45#endif
43 46
44#include <asm-generic/topology.h> 47#include <asm-generic/topology.h>
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index d52c000cf924..2efb819e2db3 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -341,8 +341,10 @@
341#define __NR_dup3 330 341#define __NR_dup3 330
342#define __NR_pipe2 331 342#define __NR_pipe2 331
343#define __NR_inotify_init1 332 343#define __NR_inotify_init1 332
344#define __NR_preadv 333
345#define __NR_pwritev 334
344 346
345#define NR_syscalls 333 347#define NR_syscalls 335
346 348
347#ifdef __KERNEL__ 349#ifdef __KERNEL__
348 350
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index 7c54e91753c1..6eb9d2934c0f 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -381,10 +381,12 @@
381#define __NR_dup3 358 381#define __NR_dup3 358
382#define __NR_pipe2 359 382#define __NR_pipe2 359
383#define __NR_inotify_init1 360 383#define __NR_inotify_init1 360
384#define __NR_preadv 361
385#define __NR_pwritev 362
384 386
385#ifdef __KERNEL__ 387#ifdef __KERNEL__
386 388
387#define NR_syscalls 361 389#define NR_syscalls 363
388 390
389#define __ARCH_WANT_IPC_PARSE_VERSION 391#define __ARCH_WANT_IPC_PARSE_VERSION
390#define __ARCH_WANT_OLD_READDIR 392#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 5a47e1cf442e..90e8cfff55fd 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -143,14 +143,14 @@ static void __init sh7786_usb_setup(void)
143 * Set the PHY and PLL enable bit 143 * Set the PHY and PLL enable bit
144 */ 144 */
145 __raw_writel(PHY_ENB | PLL_ENB, USBPCTL1); 145 __raw_writel(PHY_ENB | PLL_ENB, USBPCTL1);
146 while (i-- && 146 while (i--) {
147 ((__raw_readl(USBST) & ACT_PLL_STATUS) != ACT_PLL_STATUS)) 147 if (ACT_PLL_STATUS == (__raw_readl(USBST) & ACT_PLL_STATUS)) {
148 /* Set the PHY RST bit */
149 __raw_writel(PHY_ENB | PLL_ENB | PHY_RST, USBPCTL1);
150 printk(KERN_INFO "sh7786 usb setup done\n");
151 break;
152 }
148 cpu_relax(); 153 cpu_relax();
149
150 if (i) {
151 /* Set the PHY RST bit */
152 __raw_writel(PHY_ENB | PLL_ENB | PHY_RST, USBPCTL1);
153 printk(KERN_INFO "sh7786 usb setup done\n");
154 } 154 }
155} 155}
156 156
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index e67c1733e1b9..05202edd8e21 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -349,3 +349,5 @@ ENTRY(sys_call_table)
349 .long sys_dup3 /* 330 */ 349 .long sys_dup3 /* 330 */
350 .long sys_pipe2 350 .long sys_pipe2
351 .long sys_inotify_init1 351 .long sys_inotify_init1
352 .long sys_preadv
353 .long sys_writev
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index 557cb91f5caf..a083609f9284 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -387,3 +387,5 @@ sys_call_table:
387 .long sys_dup3 387 .long sys_dup3
388 .long sys_pipe2 388 .long sys_pipe2
389 .long sys_inotify_init1 /* 360 */ 389 .long sys_inotify_init1 /* 360 */
390 .long sys_preadv
391 .long sys_pwritev
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index edcd5fbf9651..e098ec158ddb 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -10,11 +10,22 @@
10 * for more details. 10 * for more details.
11 */ 11 */
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/init.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/dma-mapping.h> 15#include <linux/dma-mapping.h>
16#include <linux/dma-debug.h>
17#include <linux/io.h>
15#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
16#include <asm/addrspace.h> 19#include <asm/addrspace.h>
17#include <asm/io.h> 20
21#define PREALLOC_DMA_DEBUG_ENTRIES 4096
22
23static int __init dma_init(void)
24{
25 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
26 return 0;
27}
28fs_initcall(dma_init);
18 29
19void *dma_alloc_coherent(struct device *dev, size_t size, 30void *dma_alloc_coherent(struct device *dev, size_t size,
20 dma_addr_t *dma_handle, gfp_t gfp) 31 dma_addr_t *dma_handle, gfp_t gfp)
@@ -45,6 +56,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
45 split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); 56 split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
46 57
47 *dma_handle = virt_to_phys(ret); 58 *dma_handle = virt_to_phys(ret);
59
60 debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache);
61
48 return ret_nocache; 62 return ret_nocache;
49} 63}
50EXPORT_SYMBOL(dma_alloc_coherent); 64EXPORT_SYMBOL(dma_alloc_coherent);
@@ -56,12 +70,15 @@ void dma_free_coherent(struct device *dev, size_t size,
56 unsigned long pfn = dma_handle >> PAGE_SHIFT; 70 unsigned long pfn = dma_handle >> PAGE_SHIFT;
57 int k; 71 int k;
58 72
59 if (!dma_release_from_coherent(dev, order, vaddr)) { 73 WARN_ON(irqs_disabled()); /* for portability */
60 WARN_ON(irqs_disabled()); /* for portability */ 74
61 for (k = 0; k < (1 << order); k++) 75 if (dma_release_from_coherent(dev, order, vaddr))
62 __free_pages(pfn_to_page(pfn + k), 0); 76 return;
63 iounmap(vaddr); 77
64 } 78 debug_dma_free_coherent(dev, size, vaddr, dma_handle);
79 for (k = 0; k < (1 << order); k++)
80 __free_pages(pfn_to_page(pfn + k), 0);
81 iounmap(vaddr);
65} 82}
66EXPORT_SYMBOL(dma_free_coherent); 83EXPORT_SYMBOL(dma_free_coherent);
67 84
diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
index dff3f0253aa8..ff9ead640c4a 100644
--- a/arch/sparc/include/asm/parport.h
+++ b/arch/sparc/include/asm/parport.h
@@ -117,7 +117,7 @@ static int __devinit ecpp_probe(struct of_device *op, const struct of_device_id
117 if (!strcmp(parent->name, "dma")) { 117 if (!strcmp(parent->name, "dma")) {
118 p = parport_pc_probe_port(base, base + 0x400, 118 p = parport_pc_probe_port(base, base + 0x400,
119 op->irqs[0], PARPORT_DMA_NOFIFO, 119 op->irqs[0], PARPORT_DMA_NOFIFO,
120 op->dev.parent->parent); 120 op->dev.parent->parent, 0);
121 if (!p) 121 if (!p)
122 return -ENOMEM; 122 return -ENOMEM;
123 dev_set_drvdata(&op->dev, p); 123 dev_set_drvdata(&op->dev, p);
@@ -168,7 +168,8 @@ static int __devinit ecpp_probe(struct of_device *op, const struct of_device_id
168 p = parport_pc_probe_port(base, base + 0x400, 168 p = parport_pc_probe_port(base, base + 0x400,
169 op->irqs[0], 169 op->irqs[0],
170 slot, 170 slot,
171 op->dev.parent); 171 op->dev.parent,
172 0);
172 err = -ENOMEM; 173 err = -ENOMEM;
173 if (!p) 174 if (!p)
174 goto out_disable_irq; 175 goto out_disable_irq;
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 837c2c4cc203..ecdb682ab516 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -204,7 +204,13 @@ static void drv_read(struct drv_cmd *cmd)
204 204
205static void drv_write(struct drv_cmd *cmd) 205static void drv_write(struct drv_cmd *cmd)
206{ 206{
207 int this_cpu;
208
209 this_cpu = get_cpu();
210 if (cpumask_test_cpu(this_cpu, cmd->mask))
211 do_drv_write(cmd);
207 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); 212 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
213 put_cpu();
208} 214}
209 215
210static u32 get_cur_val(const struct cpumask *mask) 216static u32 get_cur_val(const struct cpumask *mask)
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index a0f3851ef310..2e0eb4140951 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -108,40 +108,29 @@ struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
108EXPORT_SYMBOL_GPL(ucode_cpu_info); 108EXPORT_SYMBOL_GPL(ucode_cpu_info);
109 109
110#ifdef CONFIG_MICROCODE_OLD_INTERFACE 110#ifdef CONFIG_MICROCODE_OLD_INTERFACE
111struct update_for_cpu {
112 const void __user *buf;
113 size_t size;
114};
115
116static long update_for_cpu(void *_ufc)
117{
118 struct update_for_cpu *ufc = _ufc;
119 int error;
120
121 error = microcode_ops->request_microcode_user(smp_processor_id(),
122 ufc->buf, ufc->size);
123 if (error < 0)
124 return error;
125 if (!error)
126 microcode_ops->apply_microcode(smp_processor_id());
127 return error;
128}
129
130static int do_microcode_update(const void __user *buf, size_t size) 111static int do_microcode_update(const void __user *buf, size_t size)
131{ 112{
113 cpumask_t old;
132 int error = 0; 114 int error = 0;
133 int cpu; 115 int cpu;
134 struct update_for_cpu ufc = { .buf = buf, .size = size }; 116
117 old = current->cpus_allowed;
135 118
136 for_each_online_cpu(cpu) { 119 for_each_online_cpu(cpu) {
137 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 120 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
138 121
139 if (!uci->valid) 122 if (!uci->valid)
140 continue; 123 continue;
141 error = work_on_cpu(cpu, update_for_cpu, &ufc); 124
125 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
126 error = microcode_ops->request_microcode_user(cpu, buf, size);
142 if (error < 0) 127 if (error < 0)
143 break; 128 goto out;
129 if (!error)
130 microcode_ops->apply_microcode(cpu);
144 } 131 }
132out:
133 set_cpus_allowed_ptr(current, &old);
145 return error; 134 return error;
146} 135}
147 136
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 631f6f44460a..c48fa670d221 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -17,9 +17,6 @@
17#include <linux/rbtree.h> 17#include <linux/rbtree.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19 19
20#define REQ_SYNC 1
21#define REQ_ASYNC 0
22
23/* 20/*
24 * See Documentation/block/as-iosched.txt 21 * See Documentation/block/as-iosched.txt
25 */ 22 */
@@ -93,7 +90,7 @@ struct as_data {
93 struct list_head fifo_list[2]; 90 struct list_head fifo_list[2];
94 91
95 struct request *next_rq[2]; /* next in sort order */ 92 struct request *next_rq[2]; /* next in sort order */
96 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ 93 sector_t last_sector[2]; /* last SYNC & ASYNC sectors */
97 94
98 unsigned long exit_prob; /* probability a task will exit while 95 unsigned long exit_prob; /* probability a task will exit while
99 being waited on */ 96 being waited on */
@@ -109,7 +106,7 @@ struct as_data {
109 unsigned long last_check_fifo[2]; 106 unsigned long last_check_fifo[2];
110 int changed_batch; /* 1: waiting for old batch to end */ 107 int changed_batch; /* 1: waiting for old batch to end */
111 int new_batch; /* 1: waiting on first read complete */ 108 int new_batch; /* 1: waiting on first read complete */
112 int batch_data_dir; /* current batch REQ_SYNC / REQ_ASYNC */ 109 int batch_data_dir; /* current batch SYNC / ASYNC */
113 int write_batch_count; /* max # of reqs in a write batch */ 110 int write_batch_count; /* max # of reqs in a write batch */
114 int current_write_count; /* how many requests left this batch */ 111 int current_write_count; /* how many requests left this batch */
115 int write_batch_idled; /* has the write batch gone idle? */ 112 int write_batch_idled; /* has the write batch gone idle? */
@@ -554,7 +551,7 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
554 if (aic == NULL) 551 if (aic == NULL)
555 return; 552 return;
556 553
557 if (data_dir == REQ_SYNC) { 554 if (data_dir == BLK_RW_SYNC) {
558 unsigned long in_flight = atomic_read(&aic->nr_queued) 555 unsigned long in_flight = atomic_read(&aic->nr_queued)
559 + atomic_read(&aic->nr_dispatched); 556 + atomic_read(&aic->nr_dispatched);
560 spin_lock(&aic->lock); 557 spin_lock(&aic->lock);
@@ -811,7 +808,7 @@ static void as_update_rq(struct as_data *ad, struct request *rq)
811 */ 808 */
812static void update_write_batch(struct as_data *ad) 809static void update_write_batch(struct as_data *ad)
813{ 810{
814 unsigned long batch = ad->batch_expire[REQ_ASYNC]; 811 unsigned long batch = ad->batch_expire[BLK_RW_ASYNC];
815 long write_time; 812 long write_time;
816 813
817 write_time = (jiffies - ad->current_batch_expires) + batch; 814 write_time = (jiffies - ad->current_batch_expires) + batch;
@@ -855,7 +852,7 @@ static void as_completed_request(struct request_queue *q, struct request *rq)
855 kblockd_schedule_work(q, &ad->antic_work); 852 kblockd_schedule_work(q, &ad->antic_work);
856 ad->changed_batch = 0; 853 ad->changed_batch = 0;
857 854
858 if (ad->batch_data_dir == REQ_SYNC) 855 if (ad->batch_data_dir == BLK_RW_SYNC)
859 ad->new_batch = 1; 856 ad->new_batch = 1;
860 } 857 }
861 WARN_ON(ad->nr_dispatched == 0); 858 WARN_ON(ad->nr_dispatched == 0);
@@ -869,7 +866,7 @@ static void as_completed_request(struct request_queue *q, struct request *rq)
869 if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) { 866 if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) {
870 update_write_batch(ad); 867 update_write_batch(ad);
871 ad->current_batch_expires = jiffies + 868 ad->current_batch_expires = jiffies +
872 ad->batch_expire[REQ_SYNC]; 869 ad->batch_expire[BLK_RW_SYNC];
873 ad->new_batch = 0; 870 ad->new_batch = 0;
874 } 871 }
875 872
@@ -960,7 +957,7 @@ static inline int as_batch_expired(struct as_data *ad)
960 if (ad->changed_batch || ad->new_batch) 957 if (ad->changed_batch || ad->new_batch)
961 return 0; 958 return 0;
962 959
963 if (ad->batch_data_dir == REQ_SYNC) 960 if (ad->batch_data_dir == BLK_RW_SYNC)
964 /* TODO! add a check so a complete fifo gets written? */ 961 /* TODO! add a check so a complete fifo gets written? */
965 return time_after(jiffies, ad->current_batch_expires); 962 return time_after(jiffies, ad->current_batch_expires);
966 963
@@ -986,7 +983,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
986 */ 983 */
987 ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; 984 ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;
988 985
989 if (data_dir == REQ_SYNC) { 986 if (data_dir == BLK_RW_SYNC) {
990 struct io_context *ioc = RQ_IOC(rq); 987 struct io_context *ioc = RQ_IOC(rq);
991 /* In case we have to anticipate after this */ 988 /* In case we have to anticipate after this */
992 copy_io_context(&ad->io_context, &ioc); 989 copy_io_context(&ad->io_context, &ioc);
@@ -1025,41 +1022,41 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
1025static int as_dispatch_request(struct request_queue *q, int force) 1022static int as_dispatch_request(struct request_queue *q, int force)
1026{ 1023{
1027 struct as_data *ad = q->elevator->elevator_data; 1024 struct as_data *ad = q->elevator->elevator_data;
1028 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); 1025 const int reads = !list_empty(&ad->fifo_list[BLK_RW_SYNC]);
1029 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); 1026 const int writes = !list_empty(&ad->fifo_list[BLK_RW_ASYNC]);
1030 struct request *rq; 1027 struct request *rq;
1031 1028
1032 if (unlikely(force)) { 1029 if (unlikely(force)) {
1033 /* 1030 /*
1034 * Forced dispatch, accounting is useless. Reset 1031 * Forced dispatch, accounting is useless. Reset
1035 * accounting states and dump fifo_lists. Note that 1032 * accounting states and dump fifo_lists. Note that
1036 * batch_data_dir is reset to REQ_SYNC to avoid 1033 * batch_data_dir is reset to BLK_RW_SYNC to avoid
1037 * screwing write batch accounting as write batch 1034 * screwing write batch accounting as write batch
1038 * accounting occurs on W->R transition. 1035 * accounting occurs on W->R transition.
1039 */ 1036 */
1040 int dispatched = 0; 1037 int dispatched = 0;
1041 1038
1042 ad->batch_data_dir = REQ_SYNC; 1039 ad->batch_data_dir = BLK_RW_SYNC;
1043 ad->changed_batch = 0; 1040 ad->changed_batch = 0;
1044 ad->new_batch = 0; 1041 ad->new_batch = 0;
1045 1042
1046 while (ad->next_rq[REQ_SYNC]) { 1043 while (ad->next_rq[BLK_RW_SYNC]) {
1047 as_move_to_dispatch(ad, ad->next_rq[REQ_SYNC]); 1044 as_move_to_dispatch(ad, ad->next_rq[BLK_RW_SYNC]);
1048 dispatched++; 1045 dispatched++;
1049 } 1046 }
1050 ad->last_check_fifo[REQ_SYNC] = jiffies; 1047 ad->last_check_fifo[BLK_RW_SYNC] = jiffies;
1051 1048
1052 while (ad->next_rq[REQ_ASYNC]) { 1049 while (ad->next_rq[BLK_RW_ASYNC]) {
1053 as_move_to_dispatch(ad, ad->next_rq[REQ_ASYNC]); 1050 as_move_to_dispatch(ad, ad->next_rq[BLK_RW_ASYNC]);
1054 dispatched++; 1051 dispatched++;
1055 } 1052 }
1056 ad->last_check_fifo[REQ_ASYNC] = jiffies; 1053 ad->last_check_fifo[BLK_RW_ASYNC] = jiffies;
1057 1054
1058 return dispatched; 1055 return dispatched;
1059 } 1056 }
1060 1057
1061 /* Signal that the write batch was uncontended, so we can't time it */ 1058 /* Signal that the write batch was uncontended, so we can't time it */
1062 if (ad->batch_data_dir == REQ_ASYNC && !reads) { 1059 if (ad->batch_data_dir == BLK_RW_ASYNC && !reads) {
1063 if (ad->current_write_count == 0 || !writes) 1060 if (ad->current_write_count == 0 || !writes)
1064 ad->write_batch_idled = 1; 1061 ad->write_batch_idled = 1;
1065 } 1062 }
@@ -1076,8 +1073,8 @@ static int as_dispatch_request(struct request_queue *q, int force)
1076 */ 1073 */
1077 rq = ad->next_rq[ad->batch_data_dir]; 1074 rq = ad->next_rq[ad->batch_data_dir];
1078 1075
1079 if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) { 1076 if (ad->batch_data_dir == BLK_RW_SYNC && ad->antic_expire) {
1080 if (as_fifo_expired(ad, REQ_SYNC)) 1077 if (as_fifo_expired(ad, BLK_RW_SYNC))
1081 goto fifo_expired; 1078 goto fifo_expired;
1082 1079
1083 if (as_can_anticipate(ad, rq)) { 1080 if (as_can_anticipate(ad, rq)) {
@@ -1090,7 +1087,7 @@ static int as_dispatch_request(struct request_queue *q, int force)
1090 /* we have a "next request" */ 1087 /* we have a "next request" */
1091 if (reads && !writes) 1088 if (reads && !writes)
1092 ad->current_batch_expires = 1089 ad->current_batch_expires =
1093 jiffies + ad->batch_expire[REQ_SYNC]; 1090 jiffies + ad->batch_expire[BLK_RW_SYNC];
1094 goto dispatch_request; 1091 goto dispatch_request;
1095 } 1092 }
1096 } 1093 }
@@ -1101,20 +1098,20 @@ static int as_dispatch_request(struct request_queue *q, int force)
1101 */ 1098 */
1102 1099
1103 if (reads) { 1100 if (reads) {
1104 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC])); 1101 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[BLK_RW_SYNC]));
1105 1102
1106 if (writes && ad->batch_data_dir == REQ_SYNC) 1103 if (writes && ad->batch_data_dir == BLK_RW_SYNC)
1107 /* 1104 /*
1108 * Last batch was a read, switch to writes 1105 * Last batch was a read, switch to writes
1109 */ 1106 */
1110 goto dispatch_writes; 1107 goto dispatch_writes;
1111 1108
1112 if (ad->batch_data_dir == REQ_ASYNC) { 1109 if (ad->batch_data_dir == BLK_RW_ASYNC) {
1113 WARN_ON(ad->new_batch); 1110 WARN_ON(ad->new_batch);
1114 ad->changed_batch = 1; 1111 ad->changed_batch = 1;
1115 } 1112 }
1116 ad->batch_data_dir = REQ_SYNC; 1113 ad->batch_data_dir = BLK_RW_SYNC;
1117 rq = rq_entry_fifo(ad->fifo_list[REQ_SYNC].next); 1114 rq = rq_entry_fifo(ad->fifo_list[BLK_RW_SYNC].next);
1118 ad->last_check_fifo[ad->batch_data_dir] = jiffies; 1115 ad->last_check_fifo[ad->batch_data_dir] = jiffies;
1119 goto dispatch_request; 1116 goto dispatch_request;
1120 } 1117 }
@@ -1125,9 +1122,9 @@ static int as_dispatch_request(struct request_queue *q, int force)
1125 1122
1126 if (writes) { 1123 if (writes) {
1127dispatch_writes: 1124dispatch_writes:
1128 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC])); 1125 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[BLK_RW_ASYNC]));
1129 1126
1130 if (ad->batch_data_dir == REQ_SYNC) { 1127 if (ad->batch_data_dir == BLK_RW_SYNC) {
1131 ad->changed_batch = 1; 1128 ad->changed_batch = 1;
1132 1129
1133 /* 1130 /*
@@ -1137,11 +1134,11 @@ dispatch_writes:
1137 */ 1134 */
1138 ad->new_batch = 0; 1135 ad->new_batch = 0;
1139 } 1136 }
1140 ad->batch_data_dir = REQ_ASYNC; 1137 ad->batch_data_dir = BLK_RW_ASYNC;
1141 ad->current_write_count = ad->write_batch_count; 1138 ad->current_write_count = ad->write_batch_count;
1142 ad->write_batch_idled = 0; 1139 ad->write_batch_idled = 0;
1143 rq = rq_entry_fifo(ad->fifo_list[REQ_ASYNC].next); 1140 rq = rq_entry_fifo(ad->fifo_list[BLK_RW_ASYNC].next);
1144 ad->last_check_fifo[REQ_ASYNC] = jiffies; 1141 ad->last_check_fifo[BLK_RW_ASYNC] = jiffies;
1145 goto dispatch_request; 1142 goto dispatch_request;
1146 } 1143 }
1147 1144
@@ -1164,9 +1161,9 @@ fifo_expired:
1164 if (ad->nr_dispatched) 1161 if (ad->nr_dispatched)
1165 return 0; 1162 return 0;
1166 1163
1167 if (ad->batch_data_dir == REQ_ASYNC) 1164 if (ad->batch_data_dir == BLK_RW_ASYNC)
1168 ad->current_batch_expires = jiffies + 1165 ad->current_batch_expires = jiffies +
1169 ad->batch_expire[REQ_ASYNC]; 1166 ad->batch_expire[BLK_RW_ASYNC];
1170 else 1167 else
1171 ad->new_batch = 1; 1168 ad->new_batch = 1;
1172 1169
@@ -1238,8 +1235,8 @@ static int as_queue_empty(struct request_queue *q)
1238{ 1235{
1239 struct as_data *ad = q->elevator->elevator_data; 1236 struct as_data *ad = q->elevator->elevator_data;
1240 1237
1241 return list_empty(&ad->fifo_list[REQ_ASYNC]) 1238 return list_empty(&ad->fifo_list[BLK_RW_ASYNC])
1242 && list_empty(&ad->fifo_list[REQ_SYNC]); 1239 && list_empty(&ad->fifo_list[BLK_RW_SYNC]);
1243} 1240}
1244 1241
1245static int 1242static int
@@ -1346,8 +1343,8 @@ static void as_exit_queue(struct elevator_queue *e)
1346 del_timer_sync(&ad->antic_timer); 1343 del_timer_sync(&ad->antic_timer);
1347 cancel_work_sync(&ad->antic_work); 1344 cancel_work_sync(&ad->antic_work);
1348 1345
1349 BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC])); 1346 BUG_ON(!list_empty(&ad->fifo_list[BLK_RW_SYNC]));
1350 BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC])); 1347 BUG_ON(!list_empty(&ad->fifo_list[BLK_RW_ASYNC]));
1351 1348
1352 put_io_context(ad->io_context); 1349 put_io_context(ad->io_context);
1353 kfree(ad); 1350 kfree(ad);
@@ -1372,18 +1369,18 @@ static void *as_init_queue(struct request_queue *q)
1372 init_timer(&ad->antic_timer); 1369 init_timer(&ad->antic_timer);
1373 INIT_WORK(&ad->antic_work, as_work_handler); 1370 INIT_WORK(&ad->antic_work, as_work_handler);
1374 1371
1375 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); 1372 INIT_LIST_HEAD(&ad->fifo_list[BLK_RW_SYNC]);
1376 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1373 INIT_LIST_HEAD(&ad->fifo_list[BLK_RW_ASYNC]);
1377 ad->sort_list[REQ_SYNC] = RB_ROOT; 1374 ad->sort_list[BLK_RW_SYNC] = RB_ROOT;
1378 ad->sort_list[REQ_ASYNC] = RB_ROOT; 1375 ad->sort_list[BLK_RW_ASYNC] = RB_ROOT;
1379 ad->fifo_expire[REQ_SYNC] = default_read_expire; 1376 ad->fifo_expire[BLK_RW_SYNC] = default_read_expire;
1380 ad->fifo_expire[REQ_ASYNC] = default_write_expire; 1377 ad->fifo_expire[BLK_RW_ASYNC] = default_write_expire;
1381 ad->antic_expire = default_antic_expire; 1378 ad->antic_expire = default_antic_expire;
1382 ad->batch_expire[REQ_SYNC] = default_read_batch_expire; 1379 ad->batch_expire[BLK_RW_SYNC] = default_read_batch_expire;
1383 ad->batch_expire[REQ_ASYNC] = default_write_batch_expire; 1380 ad->batch_expire[BLK_RW_ASYNC] = default_write_batch_expire;
1384 1381
1385 ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC]; 1382 ad->current_batch_expires = jiffies + ad->batch_expire[BLK_RW_SYNC];
1386 ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10; 1383 ad->write_batch_count = ad->batch_expire[BLK_RW_ASYNC] / 10;
1387 if (ad->write_batch_count < 2) 1384 if (ad->write_batch_count < 2)
1388 ad->write_batch_count = 2; 1385 ad->write_batch_count = 2;
1389 1386
@@ -1432,11 +1429,11 @@ static ssize_t __FUNC(struct elevator_queue *e, char *page) \
1432 struct as_data *ad = e->elevator_data; \ 1429 struct as_data *ad = e->elevator_data; \
1433 return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ 1430 return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
1434} 1431}
1435SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]); 1432SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[BLK_RW_SYNC]);
1436SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]); 1433SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[BLK_RW_ASYNC]);
1437SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire); 1434SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire);
1438SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]); 1435SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[BLK_RW_SYNC]);
1439SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]); 1436SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[BLK_RW_ASYNC]);
1440#undef SHOW_FUNCTION 1437#undef SHOW_FUNCTION
1441 1438
1442#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ 1439#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
@@ -1451,13 +1448,14 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)
1451 *(__PTR) = msecs_to_jiffies(*(__PTR)); \ 1448 *(__PTR) = msecs_to_jiffies(*(__PTR)); \
1452 return ret; \ 1449 return ret; \
1453} 1450}
1454STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX); 1451STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[BLK_RW_SYNC], 0, INT_MAX);
1455STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX); 1452STORE_FUNCTION(as_write_expire_store,
1453 &ad->fifo_expire[BLK_RW_ASYNC], 0, INT_MAX);
1456STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX); 1454STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX);
1457STORE_FUNCTION(as_read_batch_expire_store, 1455STORE_FUNCTION(as_read_batch_expire_store,
1458 &ad->batch_expire[REQ_SYNC], 0, INT_MAX); 1456 &ad->batch_expire[BLK_RW_SYNC], 0, INT_MAX);
1459STORE_FUNCTION(as_write_batch_expire_store, 1457STORE_FUNCTION(as_write_batch_expire_store,
1460 &ad->batch_expire[REQ_ASYNC], 0, INT_MAX); 1458 &ad->batch_expire[BLK_RW_ASYNC], 0, INT_MAX);
1461#undef STORE_FUNCTION 1459#undef STORE_FUNCTION
1462 1460
1463#define AS_ATTR(name) \ 1461#define AS_ATTR(name) \
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index f7dae57e6cab..20b4111fa050 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -319,9 +319,6 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
319 return -ENXIO; 319 return -ENXIO;
320 320
321 bio = bio_alloc(GFP_KERNEL, 0); 321 bio = bio_alloc(GFP_KERNEL, 0);
322 if (!bio)
323 return -ENOMEM;
324
325 bio->bi_end_io = bio_end_empty_barrier; 322 bio->bi_end_io = bio_end_empty_barrier;
326 bio->bi_private = &wait; 323 bio->bi_private = &wait;
327 bio->bi_bdev = bdev; 324 bio->bi_bdev = bdev;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 73f36beff5cd..cac4e9febe6a 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -209,14 +209,14 @@ static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
209 ssize_t ret = queue_var_store(&stats, page, count); 209 ssize_t ret = queue_var_store(&stats, page, count);
210 210
211 spin_lock_irq(q->queue_lock); 211 spin_lock_irq(q->queue_lock);
212 elv_quisce_start(q); 212 elv_quiesce_start(q);
213 213
214 if (stats) 214 if (stats)
215 queue_flag_set(QUEUE_FLAG_IO_STAT, q); 215 queue_flag_set(QUEUE_FLAG_IO_STAT, q);
216 else 216 else
217 queue_flag_clear(QUEUE_FLAG_IO_STAT, q); 217 queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
218 218
219 elv_quisce_end(q); 219 elv_quiesce_end(q);
220 spin_unlock_irq(q->queue_lock); 220 spin_unlock_irq(q->queue_lock);
221 221
222 return ret; 222 return ret;
diff --git a/block/blk.h b/block/blk.h
index 24fcaeeaf620..5dfc41267a08 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -70,8 +70,8 @@ void blk_queue_congestion_threshold(struct request_queue *q);
70 70
71int blk_dev_init(void); 71int blk_dev_init(void);
72 72
73void elv_quisce_start(struct request_queue *q); 73void elv_quiesce_start(struct request_queue *q);
74void elv_quisce_end(struct request_queue *q); 74void elv_quiesce_end(struct request_queue *q);
75 75
76 76
77/* 77/*
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index a4809de6fea6..0d3b70de3d80 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -56,9 +56,6 @@ static DEFINE_SPINLOCK(ioc_gone_lock);
56#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 56#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
57#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 57#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
58 58
59#define ASYNC (0)
60#define SYNC (1)
61
62#define sample_valid(samples) ((samples) > 80) 59#define sample_valid(samples) ((samples) > 80)
63 60
64/* 61/*
@@ -83,6 +80,14 @@ struct cfq_data {
83 * rr list of queues with requests and the count of them 80 * rr list of queues with requests and the count of them
84 */ 81 */
85 struct cfq_rb_root service_tree; 82 struct cfq_rb_root service_tree;
83
84 /*
85 * Each priority tree is sorted by next_request position. These
86 * trees are used when determining if two or more queues are
87 * interleaving requests (see cfq_close_cooperator).
88 */
89 struct rb_root prio_trees[CFQ_PRIO_LISTS];
90
86 unsigned int busy_queues; 91 unsigned int busy_queues;
87 /* 92 /*
88 * Used to track any pending rt requests so we can pre-empt current 93 * Used to track any pending rt requests so we can pre-empt current
@@ -147,6 +152,8 @@ struct cfq_queue {
147 struct rb_node rb_node; 152 struct rb_node rb_node;
148 /* service_tree key */ 153 /* service_tree key */
149 unsigned long rb_key; 154 unsigned long rb_key;
155 /* prio tree member */
156 struct rb_node p_node;
150 /* sorted list of pending requests */ 157 /* sorted list of pending requests */
151 struct rb_root sort_list; 158 struct rb_root sort_list;
152 /* if fifo isn't expired, next request to serve */ 159 /* if fifo isn't expired, next request to serve */
@@ -185,6 +192,7 @@ enum cfqq_state_flags {
185 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 192 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
186 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 193 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
187 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 194 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
195 CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */
188}; 196};
189 197
190#define CFQ_CFQQ_FNS(name) \ 198#define CFQ_CFQQ_FNS(name) \
@@ -211,6 +219,7 @@ CFQ_CFQQ_FNS(idle_window);
211CFQ_CFQQ_FNS(prio_changed); 219CFQ_CFQQ_FNS(prio_changed);
212CFQ_CFQQ_FNS(slice_new); 220CFQ_CFQQ_FNS(slice_new);
213CFQ_CFQQ_FNS(sync); 221CFQ_CFQQ_FNS(sync);
222CFQ_CFQQ_FNS(coop);
214#undef CFQ_CFQQ_FNS 223#undef CFQ_CFQQ_FNS
215 224
216#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 225#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
@@ -419,13 +428,17 @@ static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
419 return NULL; 428 return NULL;
420} 429}
421 430
431static void rb_erase_init(struct rb_node *n, struct rb_root *root)
432{
433 rb_erase(n, root);
434 RB_CLEAR_NODE(n);
435}
436
422static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) 437static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
423{ 438{
424 if (root->left == n) 439 if (root->left == n)
425 root->left = NULL; 440 root->left = NULL;
426 441 rb_erase_init(n, &root->rb);
427 rb_erase(n, &root->rb);
428 RB_CLEAR_NODE(n);
429} 442}
430 443
431/* 444/*
@@ -470,8 +483,8 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
470 * requests waiting to be processed. It is sorted in the order that 483 * requests waiting to be processed. It is sorted in the order that
471 * we will service the queues. 484 * we will service the queues.
472 */ 485 */
473static void cfq_service_tree_add(struct cfq_data *cfqd, 486static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
474 struct cfq_queue *cfqq, int add_front) 487 int add_front)
475{ 488{
476 struct rb_node **p, *parent; 489 struct rb_node **p, *parent;
477 struct cfq_queue *__cfqq; 490 struct cfq_queue *__cfqq;
@@ -544,6 +557,63 @@ static void cfq_service_tree_add(struct cfq_data *cfqd,
544 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); 557 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
545} 558}
546 559
560static struct cfq_queue *
561cfq_prio_tree_lookup(struct cfq_data *cfqd, int ioprio, sector_t sector,
562 struct rb_node **ret_parent, struct rb_node ***rb_link)
563{
564 struct rb_root *root = &cfqd->prio_trees[ioprio];
565 struct rb_node **p, *parent;
566 struct cfq_queue *cfqq = NULL;
567
568 parent = NULL;
569 p = &root->rb_node;
570 while (*p) {
571 struct rb_node **n;
572
573 parent = *p;
574 cfqq = rb_entry(parent, struct cfq_queue, p_node);
575
576 /*
577 * Sort strictly based on sector. Smallest to the left,
578 * largest to the right.
579 */
580 if (sector > cfqq->next_rq->sector)
581 n = &(*p)->rb_right;
582 else if (sector < cfqq->next_rq->sector)
583 n = &(*p)->rb_left;
584 else
585 break;
586 p = n;
587 }
588
589 *ret_parent = parent;
590 if (rb_link)
591 *rb_link = p;
592 return NULL;
593}
594
595static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
596{
597 struct rb_root *root = &cfqd->prio_trees[cfqq->ioprio];
598 struct rb_node **p, *parent;
599 struct cfq_queue *__cfqq;
600
601 if (!RB_EMPTY_NODE(&cfqq->p_node))
602 rb_erase_init(&cfqq->p_node, root);
603
604 if (cfq_class_idle(cfqq))
605 return;
606 if (!cfqq->next_rq)
607 return;
608
609 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->ioprio, cfqq->next_rq->sector,
610 &parent, &p);
611 BUG_ON(__cfqq);
612
613 rb_link_node(&cfqq->p_node, parent, p);
614 rb_insert_color(&cfqq->p_node, root);
615}
616
547/* 617/*
548 * Update cfqq's position in the service tree. 618 * Update cfqq's position in the service tree.
549 */ 619 */
@@ -552,8 +622,10 @@ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
552 /* 622 /*
553 * Resorting requires the cfqq to be on the RR list already. 623 * Resorting requires the cfqq to be on the RR list already.
554 */ 624 */
555 if (cfq_cfqq_on_rr(cfqq)) 625 if (cfq_cfqq_on_rr(cfqq)) {
556 cfq_service_tree_add(cfqd, cfqq, 0); 626 cfq_service_tree_add(cfqd, cfqq, 0);
627 cfq_prio_tree_add(cfqd, cfqq);
628 }
557} 629}
558 630
559/* 631/*
@@ -584,6 +656,8 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
584 656
585 if (!RB_EMPTY_NODE(&cfqq->rb_node)) 657 if (!RB_EMPTY_NODE(&cfqq->rb_node))
586 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 658 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
659 if (!RB_EMPTY_NODE(&cfqq->p_node))
660 rb_erase_init(&cfqq->p_node, &cfqd->prio_trees[cfqq->ioprio]);
587 661
588 BUG_ON(!cfqd->busy_queues); 662 BUG_ON(!cfqd->busy_queues);
589 cfqd->busy_queues--; 663 cfqd->busy_queues--;
@@ -613,7 +687,7 @@ static void cfq_add_rq_rb(struct request *rq)
613{ 687{
614 struct cfq_queue *cfqq = RQ_CFQQ(rq); 688 struct cfq_queue *cfqq = RQ_CFQQ(rq);
615 struct cfq_data *cfqd = cfqq->cfqd; 689 struct cfq_data *cfqd = cfqq->cfqd;
616 struct request *__alias; 690 struct request *__alias, *prev;
617 691
618 cfqq->queued[rq_is_sync(rq)]++; 692 cfqq->queued[rq_is_sync(rq)]++;
619 693
@@ -630,7 +704,15 @@ static void cfq_add_rq_rb(struct request *rq)
630 /* 704 /*
631 * check if this request is a better next-serve candidate 705 * check if this request is a better next-serve candidate
632 */ 706 */
707 prev = cfqq->next_rq;
633 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq); 708 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
709
710 /*
711 * adjust priority tree position, if ->next_rq changes
712 */
713 if (prev != cfqq->next_rq)
714 cfq_prio_tree_add(cfqd, cfqq);
715
634 BUG_ON(!cfqq->next_rq); 716 BUG_ON(!cfqq->next_rq);
635} 717}
636 718
@@ -843,11 +925,15 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
843/* 925/*
844 * Get and set a new active queue for service. 926 * Get and set a new active queue for service.
845 */ 927 */
846static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) 928static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
929 struct cfq_queue *cfqq)
847{ 930{
848 struct cfq_queue *cfqq; 931 if (!cfqq) {
932 cfqq = cfq_get_next_queue(cfqd);
933 if (cfqq)
934 cfq_clear_cfqq_coop(cfqq);
935 }
849 936
850 cfqq = cfq_get_next_queue(cfqd);
851 __cfq_set_active_queue(cfqd, cfqq); 937 __cfq_set_active_queue(cfqd, cfqq);
852 return cfqq; 938 return cfqq;
853} 939}
@@ -871,17 +957,89 @@ static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
871 return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean; 957 return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
872} 958}
873 959
874static int cfq_close_cooperator(struct cfq_data *cfq_data, 960static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
875 struct cfq_queue *cfqq) 961 struct cfq_queue *cur_cfqq)
962{
963 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->ioprio];
964 struct rb_node *parent, *node;
965 struct cfq_queue *__cfqq;
966 sector_t sector = cfqd->last_position;
967
968 if (RB_EMPTY_ROOT(root))
969 return NULL;
970
971 /*
972 * First, if we find a request starting at the end of the last
973 * request, choose it.
974 */
975 __cfqq = cfq_prio_tree_lookup(cfqd, cur_cfqq->ioprio,
976 sector, &parent, NULL);
977 if (__cfqq)
978 return __cfqq;
979
980 /*
981 * If the exact sector wasn't found, the parent of the NULL leaf
982 * will contain the closest sector.
983 */
984 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
985 if (cfq_rq_close(cfqd, __cfqq->next_rq))
986 return __cfqq;
987
988 if (__cfqq->next_rq->sector < sector)
989 node = rb_next(&__cfqq->p_node);
990 else
991 node = rb_prev(&__cfqq->p_node);
992 if (!node)
993 return NULL;
994
995 __cfqq = rb_entry(node, struct cfq_queue, p_node);
996 if (cfq_rq_close(cfqd, __cfqq->next_rq))
997 return __cfqq;
998
999 return NULL;
1000}
1001
1002/*
1003 * cfqd - obvious
1004 * cur_cfqq - passed in so that we don't decide that the current queue is
1005 * closely cooperating with itself.
1006 *
1007 * So, basically we're assuming that that cur_cfqq has dispatched at least
1008 * one request, and that cfqd->last_position reflects a position on the disk
1009 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
1010 * assumption.
1011 */
1012static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1013 struct cfq_queue *cur_cfqq,
1014 int probe)
876{ 1015{
1016 struct cfq_queue *cfqq;
1017
1018 /*
1019 * A valid cfq_io_context is necessary to compare requests against
1020 * the seek_mean of the current cfqq.
1021 */
1022 if (!cfqd->active_cic)
1023 return NULL;
1024
877 /* 1025 /*
878 * We should notice if some of the queues are cooperating, eg 1026 * We should notice if some of the queues are cooperating, eg
879 * working closely on the same area of the disk. In that case, 1027 * working closely on the same area of the disk. In that case,
880 * we can group them together and don't waste time idling. 1028 * we can group them together and don't waste time idling.
881 */ 1029 */
882 return 0; 1030 cfqq = cfqq_close(cfqd, cur_cfqq);
1031 if (!cfqq)
1032 return NULL;
1033
1034 if (cfq_cfqq_coop(cfqq))
1035 return NULL;
1036
1037 if (!probe)
1038 cfq_mark_cfqq_coop(cfqq);
1039 return cfqq;
883} 1040}
884 1041
1042
885#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024)) 1043#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
886 1044
887static void cfq_arm_slice_timer(struct cfq_data *cfqd) 1045static void cfq_arm_slice_timer(struct cfq_data *cfqd)
@@ -920,13 +1078,6 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
920 if (!cic || !atomic_read(&cic->ioc->nr_tasks)) 1078 if (!cic || !atomic_read(&cic->ioc->nr_tasks))
921 return; 1079 return;
922 1080
923 /*
924 * See if this prio level has a good candidate
925 */
926 if (cfq_close_cooperator(cfqd, cfqq) &&
927 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
928 return;
929
930 cfq_mark_cfqq_wait_request(cfqq); 1081 cfq_mark_cfqq_wait_request(cfqq);
931 1082
932 /* 1083 /*
@@ -939,7 +1090,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
939 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); 1090 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
940 1091
941 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1092 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
942 cfq_log(cfqd, "arm_idle: %lu", sl); 1093 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
943} 1094}
944 1095
945/* 1096/*
@@ -1003,7 +1154,7 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1003 */ 1154 */
1004static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) 1155static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1005{ 1156{
1006 struct cfq_queue *cfqq; 1157 struct cfq_queue *cfqq, *new_cfqq = NULL;
1007 1158
1008 cfqq = cfqd->active_queue; 1159 cfqq = cfqd->active_queue;
1009 if (!cfqq) 1160 if (!cfqq)
@@ -1037,6 +1188,16 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1037 goto keep_queue; 1188 goto keep_queue;
1038 1189
1039 /* 1190 /*
1191 * If another queue has a request waiting within our mean seek
1192 * distance, let it run. The expire code will check for close
1193 * cooperators and put the close queue at the front of the service
1194 * tree.
1195 */
1196 new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0);
1197 if (new_cfqq)
1198 goto expire;
1199
1200 /*
1040 * No requests pending. If the active queue still has requests in 1201 * No requests pending. If the active queue still has requests in
1041 * flight or is idling for a new request, allow either of these 1202 * flight or is idling for a new request, allow either of these
1042 * conditions to happen (or time out) before selecting a new queue. 1203 * conditions to happen (or time out) before selecting a new queue.
@@ -1050,7 +1211,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1050expire: 1211expire:
1051 cfq_slice_expired(cfqd, 0); 1212 cfq_slice_expired(cfqd, 0);
1052new_queue: 1213new_queue:
1053 cfqq = cfq_set_active_queue(cfqd); 1214 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
1054keep_queue: 1215keep_queue:
1055 return cfqq; 1216 return cfqq;
1056} 1217}
@@ -1333,14 +1494,14 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1333 if (ioc->ioc_data == cic) 1494 if (ioc->ioc_data == cic)
1334 rcu_assign_pointer(ioc->ioc_data, NULL); 1495 rcu_assign_pointer(ioc->ioc_data, NULL);
1335 1496
1336 if (cic->cfqq[ASYNC]) { 1497 if (cic->cfqq[BLK_RW_ASYNC]) {
1337 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]); 1498 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
1338 cic->cfqq[ASYNC] = NULL; 1499 cic->cfqq[BLK_RW_ASYNC] = NULL;
1339 } 1500 }
1340 1501
1341 if (cic->cfqq[SYNC]) { 1502 if (cic->cfqq[BLK_RW_SYNC]) {
1342 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]); 1503 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
1343 cic->cfqq[SYNC] = NULL; 1504 cic->cfqq[BLK_RW_SYNC] = NULL;
1344 } 1505 }
1345} 1506}
1346 1507
@@ -1449,17 +1610,18 @@ static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
1449 1610
1450 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1611 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1451 1612
1452 cfqq = cic->cfqq[ASYNC]; 1613 cfqq = cic->cfqq[BLK_RW_ASYNC];
1453 if (cfqq) { 1614 if (cfqq) {
1454 struct cfq_queue *new_cfqq; 1615 struct cfq_queue *new_cfqq;
1455 new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC); 1616 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
1617 GFP_ATOMIC);
1456 if (new_cfqq) { 1618 if (new_cfqq) {
1457 cic->cfqq[ASYNC] = new_cfqq; 1619 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
1458 cfq_put_queue(cfqq); 1620 cfq_put_queue(cfqq);
1459 } 1621 }
1460 } 1622 }
1461 1623
1462 cfqq = cic->cfqq[SYNC]; 1624 cfqq = cic->cfqq[BLK_RW_SYNC];
1463 if (cfqq) 1625 if (cfqq)
1464 cfq_mark_cfqq_prio_changed(cfqq); 1626 cfq_mark_cfqq_prio_changed(cfqq);
1465 1627
@@ -1510,6 +1672,7 @@ retry:
1510 } 1672 }
1511 1673
1512 RB_CLEAR_NODE(&cfqq->rb_node); 1674 RB_CLEAR_NODE(&cfqq->rb_node);
1675 RB_CLEAR_NODE(&cfqq->p_node);
1513 INIT_LIST_HEAD(&cfqq->fifo); 1676 INIT_LIST_HEAD(&cfqq->fifo);
1514 1677
1515 atomic_set(&cfqq->ref, 0); 1678 atomic_set(&cfqq->ref, 0);
@@ -1905,10 +2068,20 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1905 * Remember that we saw a request from this process, but 2068 * Remember that we saw a request from this process, but
1906 * don't start queuing just yet. Otherwise we risk seeing lots 2069 * don't start queuing just yet. Otherwise we risk seeing lots
1907 * of tiny requests, because we disrupt the normal plugging 2070 * of tiny requests, because we disrupt the normal plugging
1908 * and merging. 2071 * and merging. If the request is already larger than a single
2072 * page, let it rip immediately. For that case we assume that
2073 * merging is already done. Ditto for a busy system that
2074 * has other work pending, don't risk delaying until the
2075 * idle timer unplug to continue working.
1909 */ 2076 */
1910 if (cfq_cfqq_wait_request(cfqq)) 2077 if (cfq_cfqq_wait_request(cfqq)) {
2078 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
2079 cfqd->busy_queues > 1) {
2080 del_timer(&cfqd->idle_slice_timer);
2081 blk_start_queueing(cfqd->queue);
2082 }
1911 cfq_mark_cfqq_must_dispatch(cfqq); 2083 cfq_mark_cfqq_must_dispatch(cfqq);
2084 }
1912 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 2085 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1913 /* 2086 /*
1914 * not the active queue - expire current slice if it is 2087 * not the active queue - expire current slice if it is
@@ -1992,16 +2165,24 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
1992 * or if we want to idle in case it has no pending requests. 2165 * or if we want to idle in case it has no pending requests.
1993 */ 2166 */
1994 if (cfqd->active_queue == cfqq) { 2167 if (cfqd->active_queue == cfqq) {
2168 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
2169
1995 if (cfq_cfqq_slice_new(cfqq)) { 2170 if (cfq_cfqq_slice_new(cfqq)) {
1996 cfq_set_prio_slice(cfqd, cfqq); 2171 cfq_set_prio_slice(cfqd, cfqq);
1997 cfq_clear_cfqq_slice_new(cfqq); 2172 cfq_clear_cfqq_slice_new(cfqq);
1998 } 2173 }
2174 /*
2175 * If there are no requests waiting in this queue, and
2176 * there are other queues ready to issue requests, AND
2177 * those other queues are issuing requests within our
2178 * mean seek distance, give them a chance to run instead
2179 * of idling.
2180 */
1999 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) 2181 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
2000 cfq_slice_expired(cfqd, 1); 2182 cfq_slice_expired(cfqd, 1);
2001 else if (sync && !rq_noidle(rq) && 2183 else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq, 1) &&
2002 RB_EMPTY_ROOT(&cfqq->sort_list)) { 2184 sync && !rq_noidle(rq))
2003 cfq_arm_slice_timer(cfqd); 2185 cfq_arm_slice_timer(cfqd);
2004 }
2005 } 2186 }
2006 2187
2007 if (!cfqd->rq_in_driver) 2188 if (!cfqd->rq_in_driver)
@@ -2062,7 +2243,7 @@ static int cfq_may_queue(struct request_queue *q, int rw)
2062 if (!cic) 2243 if (!cic)
2063 return ELV_MQUEUE_MAY; 2244 return ELV_MQUEUE_MAY;
2064 2245
2065 cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC); 2246 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
2066 if (cfqq) { 2247 if (cfqq) {
2067 cfq_init_prio_data(cfqq, cic->ioc); 2248 cfq_init_prio_data(cfqq, cic->ioc);
2068 cfq_prio_boost(cfqq); 2249 cfq_prio_boost(cfqq);
@@ -2152,11 +2333,10 @@ static void cfq_kick_queue(struct work_struct *work)
2152 struct cfq_data *cfqd = 2333 struct cfq_data *cfqd =
2153 container_of(work, struct cfq_data, unplug_work); 2334 container_of(work, struct cfq_data, unplug_work);
2154 struct request_queue *q = cfqd->queue; 2335 struct request_queue *q = cfqd->queue;
2155 unsigned long flags;
2156 2336
2157 spin_lock_irqsave(q->queue_lock, flags); 2337 spin_lock_irq(q->queue_lock);
2158 blk_start_queueing(q); 2338 blk_start_queueing(q);
2159 spin_unlock_irqrestore(q->queue_lock, flags); 2339 spin_unlock_irq(q->queue_lock);
2160} 2340}
2161 2341
2162/* 2342/*
diff --git a/block/elevator.c b/block/elevator.c
index fb81bcc14a8c..7073a9072577 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -590,7 +590,7 @@ void elv_drain_elevator(struct request_queue *q)
590/* 590/*
591 * Call with queue lock held, interrupts disabled 591 * Call with queue lock held, interrupts disabled
592 */ 592 */
593void elv_quisce_start(struct request_queue *q) 593void elv_quiesce_start(struct request_queue *q)
594{ 594{
595 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); 595 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
596 596
@@ -607,7 +607,7 @@ void elv_quisce_start(struct request_queue *q)
607 } 607 }
608} 608}
609 609
610void elv_quisce_end(struct request_queue *q) 610void elv_quiesce_end(struct request_queue *q)
611{ 611{
612 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); 612 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
613} 613}
@@ -1126,7 +1126,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1126 * Turn on BYPASS and drain all requests w/ elevator private data 1126 * Turn on BYPASS and drain all requests w/ elevator private data
1127 */ 1127 */
1128 spin_lock_irq(q->queue_lock); 1128 spin_lock_irq(q->queue_lock);
1129 elv_quisce_start(q); 1129 elv_quiesce_start(q);
1130 1130
1131 /* 1131 /*
1132 * Remember old elevator. 1132 * Remember old elevator.
@@ -1150,7 +1150,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1150 */ 1150 */
1151 elevator_exit(old_elevator); 1151 elevator_exit(old_elevator);
1152 spin_lock_irq(q->queue_lock); 1152 spin_lock_irq(q->queue_lock);
1153 elv_quisce_end(q); 1153 elv_quiesce_end(q);
1154 spin_unlock_irq(q->queue_lock); 1154 spin_unlock_irq(q->queue_lock);
1155 1155
1156 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); 1156 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
diff --git a/block/ioctl.c b/block/ioctl.c
index 0f22e629b13c..ad474d4bbcce 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -146,8 +146,6 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
146 struct bio *bio; 146 struct bio *bio;
147 147
148 bio = bio_alloc(GFP_KERNEL, 0); 148 bio = bio_alloc(GFP_KERNEL, 0);
149 if (!bio)
150 return -ENOMEM;
151 149
152 bio->bi_end_io = blk_ioc_discard_endio; 150 bio->bi_end_io = blk_ioc_discard_endio;
153 bio->bi_bdev = bdev; 151 bio->bi_bdev = bdev;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 626ee274c5c4..84b7f8709f41 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -217,7 +217,7 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
217static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, 217static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
218 struct bio *bio) 218 struct bio *bio)
219{ 219{
220 int ret = 0; 220 int r, ret = 0;
221 221
222 /* 222 /*
223 * fill in all the output members 223 * fill in all the output members
@@ -242,7 +242,9 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
242 ret = -EFAULT; 242 ret = -EFAULT;
243 } 243 }
244 244
245 blk_rq_unmap_user(bio); 245 r = blk_rq_unmap_user(bio);
246 if (!ret)
247 ret = r;
246 blk_put_request(rq); 248 blk_put_request(rq);
247 249
248 return ret; 250 return ret;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index bdd4f5f45575..5f7e64ba87e5 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -275,8 +275,10 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
275 if (rw == READ) { 275 if (rw == READ) {
276 copy_from_brd(mem + off, brd, sector, len); 276 copy_from_brd(mem + off, brd, sector, len);
277 flush_dcache_page(page); 277 flush_dcache_page(page);
278 } else 278 } else {
279 flush_dcache_page(page);
279 copy_to_brd(brd, mem + off, sector, len); 280 copy_to_brd(brd, mem + off, sector, len);
281 }
280 kunmap_atomic(mem, KM_USER0); 282 kunmap_atomic(mem, KM_USER0);
281 283
282out: 284out:
@@ -436,6 +438,7 @@ static struct brd_device *brd_alloc(int i)
436 if (!brd->brd_queue) 438 if (!brd->brd_queue)
437 goto out_free_dev; 439 goto out_free_dev;
438 blk_queue_make_request(brd->brd_queue, brd_make_request); 440 blk_queue_make_request(brd->brd_queue, brd_make_request);
441 blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL);
439 blk_queue_max_sectors(brd->brd_queue, 1024); 442 blk_queue_max_sectors(brd->brd_queue, 1024);
440 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); 443 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
441 444
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3750d8003048..473a8f7fbdb5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -446,6 +446,9 @@ struct drm_i915_gem_object {
446 uint32_t tiling_mode; 446 uint32_t tiling_mode;
447 uint32_t stride; 447 uint32_t stride;
448 448
449 /** Record of address bit 17 of each page at last unbind. */
450 long *bit_17;
451
449 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ 452 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
450 uint32_t agp_type; 453 uint32_t agp_type;
451 454
@@ -635,9 +638,13 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
635void i915_gem_detach_phys_object(struct drm_device *dev, 638void i915_gem_detach_phys_object(struct drm_device *dev,
636 struct drm_gem_object *obj); 639 struct drm_gem_object *obj);
637void i915_gem_free_all_phys_object(struct drm_device *dev); 640void i915_gem_free_all_phys_object(struct drm_device *dev);
641int i915_gem_object_get_pages(struct drm_gem_object *obj);
642void i915_gem_object_put_pages(struct drm_gem_object *obj);
638 643
639/* i915_gem_tiling.c */ 644/* i915_gem_tiling.c */
640void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 645void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
646void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
647void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
641 648
642/* i915_gem_debug.c */ 649/* i915_gem_debug.c */
643void i915_gem_dump_object(struct drm_gem_object *obj, int len, 650void i915_gem_dump_object(struct drm_gem_object *obj, int len,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1449b452cc63..4642115902d6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,8 +43,6 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43 uint64_t offset, 43 uint64_t offset,
44 uint64_t size); 44 uint64_t size);
45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
46static int i915_gem_object_get_pages(struct drm_gem_object *obj);
47static void i915_gem_object_put_pages(struct drm_gem_object *obj);
48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 46static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 47static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50 unsigned alignment); 48 unsigned alignment);
@@ -143,15 +141,27 @@ fast_shmem_read(struct page **pages,
143 int length) 141 int length)
144{ 142{
145 char __iomem *vaddr; 143 char __iomem *vaddr;
146 int ret; 144 int unwritten;
147 145
148 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); 146 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
149 if (vaddr == NULL) 147 if (vaddr == NULL)
150 return -ENOMEM; 148 return -ENOMEM;
151 ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); 149 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
152 kunmap_atomic(vaddr, KM_USER0); 150 kunmap_atomic(vaddr, KM_USER0);
153 151
154 return ret; 152 if (unwritten)
153 return -EFAULT;
154
155 return 0;
156}
157
158static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
159{
160 drm_i915_private_t *dev_priv = obj->dev->dev_private;
161 struct drm_i915_gem_object *obj_priv = obj->driver_private;
162
163 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
164 obj_priv->tiling_mode != I915_TILING_NONE;
155} 165}
156 166
157static inline int 167static inline int
@@ -181,6 +191,64 @@ slow_shmem_copy(struct page *dst_page,
181 return 0; 191 return 0;
182} 192}
183 193
194static inline int
195slow_shmem_bit17_copy(struct page *gpu_page,
196 int gpu_offset,
197 struct page *cpu_page,
198 int cpu_offset,
199 int length,
200 int is_read)
201{
202 char *gpu_vaddr, *cpu_vaddr;
203
204 /* Use the unswizzled path if this page isn't affected. */
205 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
206 if (is_read)
207 return slow_shmem_copy(cpu_page, cpu_offset,
208 gpu_page, gpu_offset, length);
209 else
210 return slow_shmem_copy(gpu_page, gpu_offset,
211 cpu_page, cpu_offset, length);
212 }
213
214 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
215 if (gpu_vaddr == NULL)
216 return -ENOMEM;
217
218 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
219 if (cpu_vaddr == NULL) {
220 kunmap_atomic(gpu_vaddr, KM_USER0);
221 return -ENOMEM;
222 }
223
224 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
225 * XORing with the other bits (A9 for Y, A9 and A10 for X)
226 */
227 while (length > 0) {
228 int cacheline_end = ALIGN(gpu_offset + 1, 64);
229 int this_length = min(cacheline_end - gpu_offset, length);
230 int swizzled_gpu_offset = gpu_offset ^ 64;
231
232 if (is_read) {
233 memcpy(cpu_vaddr + cpu_offset,
234 gpu_vaddr + swizzled_gpu_offset,
235 this_length);
236 } else {
237 memcpy(gpu_vaddr + swizzled_gpu_offset,
238 cpu_vaddr + cpu_offset,
239 this_length);
240 }
241 cpu_offset += this_length;
242 gpu_offset += this_length;
243 length -= this_length;
244 }
245
246 kunmap_atomic(cpu_vaddr, KM_USER1);
247 kunmap_atomic(gpu_vaddr, KM_USER0);
248
249 return 0;
250}
251
184/** 252/**
185 * This is the fast shmem pread path, which attempts to copy_from_user directly 253 * This is the fast shmem pread path, which attempts to copy_from_user directly
186 * from the backing pages of the object to the user's address space. On a 254 * from the backing pages of the object to the user's address space. On a
@@ -269,6 +337,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
269 int page_length; 337 int page_length;
270 int ret; 338 int ret;
271 uint64_t data_ptr = args->data_ptr; 339 uint64_t data_ptr = args->data_ptr;
340 int do_bit17_swizzling;
272 341
273 remain = args->size; 342 remain = args->size;
274 343
@@ -286,13 +355,15 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
286 355
287 down_read(&mm->mmap_sem); 356 down_read(&mm->mmap_sem);
288 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, 357 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
289 num_pages, 0, 0, user_pages, NULL); 358 num_pages, 1, 0, user_pages, NULL);
290 up_read(&mm->mmap_sem); 359 up_read(&mm->mmap_sem);
291 if (pinned_pages < num_pages) { 360 if (pinned_pages < num_pages) {
292 ret = -EFAULT; 361 ret = -EFAULT;
293 goto fail_put_user_pages; 362 goto fail_put_user_pages;
294 } 363 }
295 364
365 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
366
296 mutex_lock(&dev->struct_mutex); 367 mutex_lock(&dev->struct_mutex);
297 368
298 ret = i915_gem_object_get_pages(obj); 369 ret = i915_gem_object_get_pages(obj);
@@ -327,11 +398,20 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
327 if ((data_page_offset + page_length) > PAGE_SIZE) 398 if ((data_page_offset + page_length) > PAGE_SIZE)
328 page_length = PAGE_SIZE - data_page_offset; 399 page_length = PAGE_SIZE - data_page_offset;
329 400
330 ret = slow_shmem_copy(user_pages[data_page_index], 401 if (do_bit17_swizzling) {
331 data_page_offset, 402 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
332 obj_priv->pages[shmem_page_index], 403 shmem_page_offset,
333 shmem_page_offset, 404 user_pages[data_page_index],
334 page_length); 405 data_page_offset,
406 page_length,
407 1);
408 } else {
409 ret = slow_shmem_copy(user_pages[data_page_index],
410 data_page_offset,
411 obj_priv->pages[shmem_page_index],
412 shmem_page_offset,
413 page_length);
414 }
335 if (ret) 415 if (ret)
336 goto fail_put_pages; 416 goto fail_put_pages;
337 417
@@ -383,9 +463,14 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
383 return -EINVAL; 463 return -EINVAL;
384 } 464 }
385 465
386 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); 466 if (i915_gem_object_needs_bit17_swizzle(obj)) {
387 if (ret != 0)
388 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); 467 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
468 } else {
469 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
470 if (ret != 0)
471 ret = i915_gem_shmem_pread_slow(dev, obj, args,
472 file_priv);
473 }
389 474
390 drm_gem_object_unreference(obj); 475 drm_gem_object_unreference(obj);
391 476
@@ -727,6 +812,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
727 int page_length; 812 int page_length;
728 int ret; 813 int ret;
729 uint64_t data_ptr = args->data_ptr; 814 uint64_t data_ptr = args->data_ptr;
815 int do_bit17_swizzling;
730 816
731 remain = args->size; 817 remain = args->size;
732 818
@@ -751,6 +837,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
751 goto fail_put_user_pages; 837 goto fail_put_user_pages;
752 } 838 }
753 839
840 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
841
754 mutex_lock(&dev->struct_mutex); 842 mutex_lock(&dev->struct_mutex);
755 843
756 ret = i915_gem_object_get_pages(obj); 844 ret = i915_gem_object_get_pages(obj);
@@ -785,11 +873,20 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
785 if ((data_page_offset + page_length) > PAGE_SIZE) 873 if ((data_page_offset + page_length) > PAGE_SIZE)
786 page_length = PAGE_SIZE - data_page_offset; 874 page_length = PAGE_SIZE - data_page_offset;
787 875
788 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], 876 if (do_bit17_swizzling) {
789 shmem_page_offset, 877 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
790 user_pages[data_page_index], 878 shmem_page_offset,
791 data_page_offset, 879 user_pages[data_page_index],
792 page_length); 880 data_page_offset,
881 page_length,
882 0);
883 } else {
884 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
885 shmem_page_offset,
886 user_pages[data_page_index],
887 data_page_offset,
888 page_length);
889 }
793 if (ret) 890 if (ret)
794 goto fail_put_pages; 891 goto fail_put_pages;
795 892
@@ -854,6 +951,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
854 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, 951 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
855 file_priv); 952 file_priv);
856 } 953 }
954 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
955 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
857 } else { 956 } else {
858 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); 957 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
859 if (ret == -EFAULT) { 958 if (ret == -EFAULT) {
@@ -1285,7 +1384,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1285 return 0; 1384 return 0;
1286} 1385}
1287 1386
1288static void 1387void
1289i915_gem_object_put_pages(struct drm_gem_object *obj) 1388i915_gem_object_put_pages(struct drm_gem_object *obj)
1290{ 1389{
1291 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1390 struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1297,6 +1396,9 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1297 if (--obj_priv->pages_refcount != 0) 1396 if (--obj_priv->pages_refcount != 0)
1298 return; 1397 return;
1299 1398
1399 if (obj_priv->tiling_mode != I915_TILING_NONE)
1400 i915_gem_object_save_bit_17_swizzle(obj);
1401
1300 for (i = 0; i < page_count; i++) 1402 for (i = 0; i < page_count; i++)
1301 if (obj_priv->pages[i] != NULL) { 1403 if (obj_priv->pages[i] != NULL) {
1302 if (obj_priv->dirty) 1404 if (obj_priv->dirty)
@@ -1494,8 +1596,19 @@ i915_gem_retire_request(struct drm_device *dev,
1494 1596
1495 if (obj->write_domain != 0) 1597 if (obj->write_domain != 0)
1496 i915_gem_object_move_to_flushing(obj); 1598 i915_gem_object_move_to_flushing(obj);
1497 else 1599 else {
1600 /* Take a reference on the object so it won't be
1601 * freed while the spinlock is held. The list
1602 * protection for this spinlock is safe when breaking
1603 * the lock like this since the next thing we do
1604 * is just get the head of the list again.
1605 */
1606 drm_gem_object_reference(obj);
1498 i915_gem_object_move_to_inactive(obj); 1607 i915_gem_object_move_to_inactive(obj);
1608 spin_unlock(&dev_priv->mm.active_list_lock);
1609 drm_gem_object_unreference(obj);
1610 spin_lock(&dev_priv->mm.active_list_lock);
1611 }
1499 } 1612 }
1500out: 1613out:
1501 spin_unlock(&dev_priv->mm.active_list_lock); 1614 spin_unlock(&dev_priv->mm.active_list_lock);
@@ -1884,7 +1997,7 @@ i915_gem_evict_everything(struct drm_device *dev)
1884 return ret; 1997 return ret;
1885} 1998}
1886 1999
1887static int 2000int
1888i915_gem_object_get_pages(struct drm_gem_object *obj) 2001i915_gem_object_get_pages(struct drm_gem_object *obj)
1889{ 2002{
1890 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2003 struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1922,6 +2035,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
1922 } 2035 }
1923 obj_priv->pages[i] = page; 2036 obj_priv->pages[i] = page;
1924 } 2037 }
2038
2039 if (obj_priv->tiling_mode != I915_TILING_NONE)
2040 i915_gem_object_do_bit_17_swizzle(obj);
2041
1925 return 0; 2042 return 0;
1926} 2043}
1927 2044
@@ -3002,13 +3119,13 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3002 drm_free(*relocs, reloc_count * sizeof(**relocs), 3119 drm_free(*relocs, reloc_count * sizeof(**relocs),
3003 DRM_MEM_DRIVER); 3120 DRM_MEM_DRIVER);
3004 *relocs = NULL; 3121 *relocs = NULL;
3005 return ret; 3122 return -EFAULT;
3006 } 3123 }
3007 3124
3008 reloc_index += exec_list[i].relocation_count; 3125 reloc_index += exec_list[i].relocation_count;
3009 } 3126 }
3010 3127
3011 return ret; 3128 return 0;
3012} 3129}
3013 3130
3014static int 3131static int
@@ -3017,23 +3134,28 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3017 struct drm_i915_gem_relocation_entry *relocs) 3134 struct drm_i915_gem_relocation_entry *relocs)
3018{ 3135{
3019 uint32_t reloc_count = 0, i; 3136 uint32_t reloc_count = 0, i;
3020 int ret; 3137 int ret = 0;
3021 3138
3022 for (i = 0; i < buffer_count; i++) { 3139 for (i = 0; i < buffer_count; i++) {
3023 struct drm_i915_gem_relocation_entry __user *user_relocs; 3140 struct drm_i915_gem_relocation_entry __user *user_relocs;
3141 int unwritten;
3024 3142
3025 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; 3143 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3026 3144
3027 if (ret == 0) { 3145 unwritten = copy_to_user(user_relocs,
3028 ret = copy_to_user(user_relocs, 3146 &relocs[reloc_count],
3029 &relocs[reloc_count], 3147 exec_list[i].relocation_count *
3030 exec_list[i].relocation_count * 3148 sizeof(*relocs));
3031 sizeof(*relocs)); 3149
3150 if (unwritten) {
3151 ret = -EFAULT;
3152 goto err;
3032 } 3153 }
3033 3154
3034 reloc_count += exec_list[i].relocation_count; 3155 reloc_count += exec_list[i].relocation_count;
3035 } 3156 }
3036 3157
3158err:
3037 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); 3159 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
3038 3160
3039 return ret; 3161 return ret;
@@ -3243,7 +3365,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3243 exec_offset = exec_list[args->buffer_count - 1].offset; 3365 exec_offset = exec_list[args->buffer_count - 1].offset;
3244 3366
3245#if WATCH_EXEC 3367#if WATCH_EXEC
3246 i915_gem_dump_object(object_list[args->buffer_count - 1], 3368 i915_gem_dump_object(batch_obj,
3247 args->batch_len, 3369 args->batch_len,
3248 __func__, 3370 __func__,
3249 ~0); 3371 ~0);
@@ -3308,10 +3430,12 @@ err:
3308 (uintptr_t) args->buffers_ptr, 3430 (uintptr_t) args->buffers_ptr,
3309 exec_list, 3431 exec_list,
3310 sizeof(*exec_list) * args->buffer_count); 3432 sizeof(*exec_list) * args->buffer_count);
3311 if (ret) 3433 if (ret) {
3434 ret = -EFAULT;
3312 DRM_ERROR("failed to copy %d exec entries " 3435 DRM_ERROR("failed to copy %d exec entries "
3313 "back to user (%d)\n", 3436 "back to user (%d)\n",
3314 args->buffer_count, ret); 3437 args->buffer_count, ret);
3438 }
3315 } 3439 }
3316 3440
3317 /* Copy the updated relocations out regardless of current error 3441 /* Copy the updated relocations out regardless of current error
@@ -3593,6 +3717,7 @@ void i915_gem_free_object(struct drm_gem_object *obj)
3593 i915_gem_free_mmap_offset(obj); 3717 i915_gem_free_mmap_offset(obj);
3594 3718
3595 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 3719 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
3720 kfree(obj_priv->bit_17);
3596 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 3721 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
3597} 3722}
3598 3723
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index a1ac0c5e7307..986f1082c596 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -234,6 +234,96 @@ static int i915_hws_info(struct seq_file *m, void *data)
234 return 0; 234 return 0;
235} 235}
236 236
237static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
238{
239 int page, i;
240 uint32_t *mem;
241
242 for (page = 0; page < page_count; page++) {
243 mem = kmap(pages[page]);
244 for (i = 0; i < PAGE_SIZE; i += 4)
245 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
246 kunmap(pages[page]);
247 }
248}
249
250static int i915_batchbuffer_info(struct seq_file *m, void *data)
251{
252 struct drm_info_node *node = (struct drm_info_node *) m->private;
253 struct drm_device *dev = node->minor->dev;
254 drm_i915_private_t *dev_priv = dev->dev_private;
255 struct drm_gem_object *obj;
256 struct drm_i915_gem_object *obj_priv;
257 int ret;
258
259 spin_lock(&dev_priv->mm.active_list_lock);
260
261 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
262 obj = obj_priv->obj;
263 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
264 ret = i915_gem_object_get_pages(obj);
265 if (ret) {
266 DRM_ERROR("Failed to get pages: %d\n", ret);
267 spin_unlock(&dev_priv->mm.active_list_lock);
268 return ret;
269 }
270
271 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
272 i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
273
274 i915_gem_object_put_pages(obj);
275 }
276 }
277
278 spin_unlock(&dev_priv->mm.active_list_lock);
279
280 return 0;
281}
282
283static int i915_ringbuffer_data(struct seq_file *m, void *data)
284{
285 struct drm_info_node *node = (struct drm_info_node *) m->private;
286 struct drm_device *dev = node->minor->dev;
287 drm_i915_private_t *dev_priv = dev->dev_private;
288 u8 *virt;
289 uint32_t *ptr, off;
290
291 if (!dev_priv->ring.ring_obj) {
292 seq_printf(m, "No ringbuffer setup\n");
293 return 0;
294 }
295
296 virt = dev_priv->ring.virtual_start;
297
298 for (off = 0; off < dev_priv->ring.Size; off += 4) {
299 ptr = (uint32_t *)(virt + off);
300 seq_printf(m, "%08x : %08x\n", off, *ptr);
301 }
302
303 return 0;
304}
305
306static int i915_ringbuffer_info(struct seq_file *m, void *data)
307{
308 struct drm_info_node *node = (struct drm_info_node *) m->private;
309 struct drm_device *dev = node->minor->dev;
310 drm_i915_private_t *dev_priv = dev->dev_private;
311 unsigned int head, tail, mask;
312
313 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
314 tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
315 mask = dev_priv->ring.tail_mask;
316
317 seq_printf(m, "RingHead : %08x\n", head);
318 seq_printf(m, "RingTail : %08x\n", tail);
319 seq_printf(m, "RingMask : %08x\n", mask);
320 seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
321 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
322
323 return 0;
324}
325
326
237static struct drm_info_list i915_gem_debugfs_list[] = { 327static struct drm_info_list i915_gem_debugfs_list[] = {
238 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 328 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
239 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 329 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
@@ -243,6 +333,9 @@ static struct drm_info_list i915_gem_debugfs_list[] = {
243 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 333 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
244 {"i915_gem_interrupt", i915_interrupt_info, 0}, 334 {"i915_gem_interrupt", i915_interrupt_info, 0},
245 {"i915_gem_hws", i915_hws_info, 0}, 335 {"i915_gem_hws", i915_hws_info, 0},
336 {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
337 {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
338 {"i915_batchbuffers", i915_batchbuffer_info, 0},
246}; 339};
247#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) 340#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
248 341
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 6be3f927c86a..f27e523c764f 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,6 +25,8 @@
25 * 25 *
26 */ 26 */
27 27
28#include "linux/string.h"
29#include "linux/bitops.h"
28#include "drmP.h" 30#include "drmP.h"
29#include "drm.h" 31#include "drm.h"
30#include "i915_drm.h" 32#include "i915_drm.h"
@@ -127,8 +129,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
127 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 129 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
128 } else { 130 } else {
129 /* Bit 17 swizzling by the CPU in addition. */ 131 /* Bit 17 swizzling by the CPU in addition. */
130 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 132 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
131 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 133 swizzle_y = I915_BIT_6_SWIZZLE_9_17;
132 } 134 }
133 break; 135 break;
134 } 136 }
@@ -288,6 +290,19 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
288 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 290 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
289 else 291 else
290 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; 292 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
293
294 /* Hide bit 17 swizzling from the user. This prevents old Mesa
295 * from aborting the application on sw fallbacks to bit 17,
296 * and we use the pread/pwrite bit17 paths to swizzle for it.
297 * If there was a user that was relying on the swizzle
298 * information for drm_intel_bo_map()ed reads/writes this would
299 * break it, but we don't have any of those.
300 */
301 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
302 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
303 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
304 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
305
291 /* If we can't handle the swizzling, make it untiled. */ 306 /* If we can't handle the swizzling, make it untiled. */
292 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { 307 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
293 args->tiling_mode = I915_TILING_NONE; 308 args->tiling_mode = I915_TILING_NONE;
@@ -354,8 +369,100 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
354 DRM_ERROR("unknown tiling mode\n"); 369 DRM_ERROR("unknown tiling mode\n");
355 } 370 }
356 371
372 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
373 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
374 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
375 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
376 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
377
357 drm_gem_object_unreference(obj); 378 drm_gem_object_unreference(obj);
358 mutex_unlock(&dev->struct_mutex); 379 mutex_unlock(&dev->struct_mutex);
359 380
360 return 0; 381 return 0;
361} 382}
383
384/**
385 * Swap every 64 bytes of this page around, to account for it having a new
386 * bit 17 of its physical address and therefore being interpreted differently
387 * by the GPU.
388 */
389static int
390i915_gem_swizzle_page(struct page *page)
391{
392 char *vaddr;
393 int i;
394 char temp[64];
395
396 vaddr = kmap(page);
397 if (vaddr == NULL)
398 return -ENOMEM;
399
400 for (i = 0; i < PAGE_SIZE; i += 128) {
401 memcpy(temp, &vaddr[i], 64);
402 memcpy(&vaddr[i], &vaddr[i + 64], 64);
403 memcpy(&vaddr[i + 64], temp, 64);
404 }
405
406 kunmap(page);
407
408 return 0;
409}
410
411void
412i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
413{
414 struct drm_device *dev = obj->dev;
415 drm_i915_private_t *dev_priv = dev->dev_private;
416 struct drm_i915_gem_object *obj_priv = obj->driver_private;
417 int page_count = obj->size >> PAGE_SHIFT;
418 int i;
419
420 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
421 return;
422
423 if (obj_priv->bit_17 == NULL)
424 return;
425
426 for (i = 0; i < page_count; i++) {
427 char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
428 if ((new_bit_17 & 0x1) !=
429 (test_bit(i, obj_priv->bit_17) != 0)) {
430 int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
431 if (ret != 0) {
432 DRM_ERROR("Failed to swizzle page\n");
433 return;
434 }
435 set_page_dirty(obj_priv->pages[i]);
436 }
437 }
438}
439
440void
441i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
442{
443 struct drm_device *dev = obj->dev;
444 drm_i915_private_t *dev_priv = dev->dev_private;
445 struct drm_i915_gem_object *obj_priv = obj->driver_private;
446 int page_count = obj->size >> PAGE_SHIFT;
447 int i;
448
449 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
450 return;
451
452 if (obj_priv->bit_17 == NULL) {
453 obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
454 sizeof(long), GFP_KERNEL);
455 if (obj_priv->bit_17 == NULL) {
456 DRM_ERROR("Failed to allocate memory for bit 17 "
457 "record\n");
458 return;
459 }
460 }
461
462 for (i = 0; i < page_count; i++) {
463 if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
464 __set_bit(i, obj_priv->bit_17);
465 else
466 __clear_bit(i, obj_priv->bit_17);
467 }
468}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 64773ce52964..c2c8e95ff14d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -367,6 +367,7 @@ static const intel_limit_t intel_limits[] = {
367 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 367 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
368 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 368 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
369 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 369 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
370 .find_pll = intel_find_best_PLL,
370 }, 371 },
371 { /* INTEL_LIMIT_IGD_LVDS */ 372 { /* INTEL_LIMIT_IGD_LVDS */
372 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 373 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -380,6 +381,7 @@ static const intel_limit_t intel_limits[] = {
380 /* IGD only supports single-channel mode. */ 381 /* IGD only supports single-channel mode. */
381 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 382 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
382 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 383 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
384 .find_pll = intel_find_best_PLL,
383 }, 385 },
384 386
385}; 387};
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index b7f0ebe9f810..3e094beecb99 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -864,8 +864,8 @@ static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
864 864
865static struct sysrq_key_op sysrq_intelfb_restore_op = { 865static struct sysrq_key_op sysrq_intelfb_restore_op = {
866 .handler = intelfb_sysrq, 866 .handler = intelfb_sysrq,
867 .help_msg = "force fb", 867 .help_msg = "force-fb(G)",
868 .action_msg = "force restore of fb console", 868 .action_msg = "Restore framebuffer console",
869}; 869};
870 870
871int intelfb_probe(struct drm_device *dev) 871int intelfb_probe(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index b06a4a3ff08d..550374225388 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -38,7 +38,7 @@
38struct intel_hdmi_priv { 38struct intel_hdmi_priv {
39 u32 sdvox_reg; 39 u32 sdvox_reg;
40 u32 save_SDVOX; 40 u32 save_SDVOX;
41 int has_hdmi_sink; 41 bool has_hdmi_sink;
42}; 42};
43 43
44static void intel_hdmi_mode_set(struct drm_encoder *encoder, 44static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -128,6 +128,22 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
128 return true; 128 return true;
129} 129}
130 130
131static void
132intel_hdmi_sink_detect(struct drm_connector *connector)
133{
134 struct intel_output *intel_output = to_intel_output(connector);
135 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
136 struct edid *edid = NULL;
137
138 edid = drm_get_edid(&intel_output->base,
139 &intel_output->ddc_bus->adapter);
140 if (edid != NULL) {
141 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
142 kfree(edid);
143 intel_output->base.display_info.raw_edid = NULL;
144 }
145}
146
131static enum drm_connector_status 147static enum drm_connector_status
132intel_hdmi_detect(struct drm_connector *connector) 148intel_hdmi_detect(struct drm_connector *connector)
133{ 149{
@@ -158,9 +174,10 @@ intel_hdmi_detect(struct drm_connector *connector)
158 return connector_status_unknown; 174 return connector_status_unknown;
159 } 175 }
160 176
161 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) 177 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) {
178 intel_hdmi_sink_detect(connector);
162 return connector_status_connected; 179 return connector_status_connected;
163 else 180 } else
164 return connector_status_disconnected; 181 return connector_status_disconnected;
165} 182}
166 183
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 7b31f55f55c8..9913651c1e17 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1357,6 +1357,23 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
1357 intel_sdvo_read_response(intel_output, &response, 2); 1357 intel_sdvo_read_response(intel_output, &response, 2);
1358} 1358}
1359 1359
1360static void
1361intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1362{
1363 struct intel_output *intel_output = to_intel_output(connector);
1364 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1365 struct edid *edid = NULL;
1366
1367 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1368 edid = drm_get_edid(&intel_output->base,
1369 &intel_output->ddc_bus->adapter);
1370 if (edid != NULL) {
1371 sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
1372 kfree(edid);
1373 intel_output->base.display_info.raw_edid = NULL;
1374 }
1375}
1376
1360static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) 1377static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
1361{ 1378{
1362 u8 response[2]; 1379 u8 response[2];
@@ -1371,9 +1388,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1371 if (status != SDVO_CMD_STATUS_SUCCESS) 1388 if (status != SDVO_CMD_STATUS_SUCCESS)
1372 return connector_status_unknown; 1389 return connector_status_unknown;
1373 1390
1374 if ((response[0] != 0) || (response[1] != 0)) 1391 if ((response[0] != 0) || (response[1] != 0)) {
1392 intel_sdvo_hdmi_sink_detect(connector);
1375 return connector_status_connected; 1393 return connector_status_connected;
1376 else 1394 } else
1377 return connector_status_disconnected; 1395 return connector_status_disconnected;
1378} 1396}
1379 1397
diff --git a/drivers/md/dm-bio-list.h b/drivers/md/dm-bio-list.h
deleted file mode 100644
index 345098b4ca77..000000000000
--- a/drivers/md/dm-bio-list.h
+++ /dev/null
@@ -1,117 +0,0 @@
1/*
2 * Copyright (C) 2004 Red Hat UK Ltd.
3 *
4 * This file is released under the GPL.
5 */
6
7#ifndef DM_BIO_LIST_H
8#define DM_BIO_LIST_H
9
10#include <linux/bio.h>
11
12#ifdef CONFIG_BLOCK
13
14struct bio_list {
15 struct bio *head;
16 struct bio *tail;
17};
18
19static inline int bio_list_empty(const struct bio_list *bl)
20{
21 return bl->head == NULL;
22}
23
24static inline void bio_list_init(struct bio_list *bl)
25{
26 bl->head = bl->tail = NULL;
27}
28
29#define bio_list_for_each(bio, bl) \
30 for (bio = (bl)->head; bio; bio = bio->bi_next)
31
32static inline unsigned bio_list_size(const struct bio_list *bl)
33{
34 unsigned sz = 0;
35 struct bio *bio;
36
37 bio_list_for_each(bio, bl)
38 sz++;
39
40 return sz;
41}
42
43static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
44{
45 bio->bi_next = NULL;
46
47 if (bl->tail)
48 bl->tail->bi_next = bio;
49 else
50 bl->head = bio;
51
52 bl->tail = bio;
53}
54
55static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
56{
57 bio->bi_next = bl->head;
58
59 bl->head = bio;
60
61 if (!bl->tail)
62 bl->tail = bio;
63}
64
65static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
66{
67 if (!bl2->head)
68 return;
69
70 if (bl->tail)
71 bl->tail->bi_next = bl2->head;
72 else
73 bl->head = bl2->head;
74
75 bl->tail = bl2->tail;
76}
77
78static inline void bio_list_merge_head(struct bio_list *bl,
79 struct bio_list *bl2)
80{
81 if (!bl2->head)
82 return;
83
84 if (bl->head)
85 bl2->tail->bi_next = bl->head;
86 else
87 bl->tail = bl2->tail;
88
89 bl->head = bl2->head;
90}
91
92static inline struct bio *bio_list_pop(struct bio_list *bl)
93{
94 struct bio *bio = bl->head;
95
96 if (bio) {
97 bl->head = bl->head->bi_next;
98 if (!bl->head)
99 bl->tail = NULL;
100
101 bio->bi_next = NULL;
102 }
103
104 return bio;
105}
106
107static inline struct bio *bio_list_get(struct bio_list *bl)
108{
109 struct bio *bio = bl->head;
110
111 bl->head = bl->tail = NULL;
112
113 return bio;
114}
115
116#endif /* CONFIG_BLOCK */
117#endif
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 59ee1b015d2d..559dbb52bc85 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -15,8 +15,6 @@
15 15
16#include <linux/device-mapper.h> 16#include <linux/device-mapper.h>
17 17
18#include "dm-bio-list.h"
19
20#define DM_MSG_PREFIX "delay" 18#define DM_MSG_PREFIX "delay"
21 19
22struct delay_c { 20struct delay_c {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 095f77bf9681..6a386ab4f7eb 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -8,7 +8,6 @@
8#include <linux/device-mapper.h> 8#include <linux/device-mapper.h>
9 9
10#include "dm-path-selector.h" 10#include "dm-path-selector.h"
11#include "dm-bio-list.h"
12#include "dm-bio-record.h" 11#include "dm-bio-record.h"
13#include "dm-uevent.h" 12#include "dm-uevent.h"
14 13
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 536ef0bef154..076fbb4e967a 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -5,7 +5,6 @@
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
7 7
8#include "dm-bio-list.h"
9#include "dm-bio-record.h" 8#include "dm-bio-record.h"
10 9
11#include <linux/init.h> 10#include <linux/init.h>
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 59f8d9df9e1a..7b899be0b087 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -14,7 +14,6 @@
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15 15
16#include "dm.h" 16#include "dm.h"
17#include "dm-bio-list.h"
18 17
19#define DM_MSG_PREFIX "region hash" 18#define DM_MSG_PREFIX "region hash"
20 19
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 981a0413068f..d73f17fc7778 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -22,7 +22,6 @@
22#include <linux/workqueue.h> 22#include <linux/workqueue.h>
23 23
24#include "dm-exception-store.h" 24#include "dm-exception-store.h"
25#include "dm-bio-list.h"
26 25
27#define DM_MSG_PREFIX "snapshots" 26#define DM_MSG_PREFIX "snapshots"
28 27
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8a994be035ba..424f7b048c30 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -6,7 +6,6 @@
6 */ 6 */
7 7
8#include "dm.h" 8#include "dm.h"
9#include "dm-bio-list.h"
10#include "dm-uevent.h" 9#include "dm-uevent.h"
11 10
12#include <linux/init.h> 11#include <linux/init.h>
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 274b491a11c1..36df9109cde1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -35,7 +35,6 @@
35#include <linux/blkdev.h> 35#include <linux/blkdev.h>
36#include <linux/seq_file.h> 36#include <linux/seq_file.h>
37#include "md.h" 37#include "md.h"
38#include "dm-bio-list.h"
39#include "raid1.h" 38#include "raid1.h"
40#include "bitmap.h" 39#include "bitmap.h"
41 40
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e293d92641ac..81a54f17417e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -22,7 +22,6 @@
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/seq_file.h> 23#include <linux/seq_file.h>
24#include "md.h" 24#include "md.h"
25#include "dm-bio-list.h"
26#include "raid10.h" 25#include "raid10.h"
27#include "bitmap.h" 26#include "bitmap.h"
28 27
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 4fa3bb2ddfe4..33e5ade774ca 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -434,7 +434,8 @@ static void __init superio_parport_init(void)
434 0 /*base_hi*/, 434 0 /*base_hi*/,
435 PAR_IRQ, 435 PAR_IRQ,
436 PARPORT_DMA_NONE /* dma */, 436 PARPORT_DMA_NONE /* dma */,
437 NULL /*struct pci_dev* */) ) 437 NULL /*struct pci_dev* */),
438 0 /* shared irq flags */ )
438 439
439 printk(KERN_WARNING PFX "Probing parallel port failed.\n"); 440 printk(KERN_WARNING PFX "Probing parallel port failed.\n");
440#endif /* CONFIG_PARPORT_PC */ 441#endif /* CONFIG_PARPORT_PC */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b1bd3fc7bae8..36fd2e75da1c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1394,7 +1394,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1394 */ 1394 */
1395 cmd->sense_buffer[8] = 0; /* Information */ 1395 cmd->sense_buffer[8] = 0; /* Information */
1396 cmd->sense_buffer[9] = 0xa; /* Add. length */ 1396 cmd->sense_buffer[9] = 0xa; /* Add. length */
1397 do_div(bghm, cmd->device->sector_size); 1397 bghm /= cmd->device->sector_size;
1398 1398
1399 failing_sector = scsi_get_lba(cmd); 1399 failing_sector = scsi_get_lba(cmd);
1400 failing_sector += bghm; 1400 failing_sector += bghm;
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 7fb9b5c4669a..12d13d99b6f0 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -44,6 +44,7 @@ struct intc_handle_int {
44struct intc_desc_int { 44struct intc_desc_int {
45 struct list_head list; 45 struct list_head list;
46 struct sys_device sysdev; 46 struct sys_device sysdev;
47 pm_message_t state;
47 unsigned long *reg; 48 unsigned long *reg;
48#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
49 unsigned long *smp; 50 unsigned long *smp;
@@ -786,18 +787,44 @@ static int intc_suspend(struct sys_device *dev, pm_message_t state)
786 /* get intc controller associated with this sysdev */ 787 /* get intc controller associated with this sysdev */
787 d = container_of(dev, struct intc_desc_int, sysdev); 788 d = container_of(dev, struct intc_desc_int, sysdev);
788 789
789 /* enable wakeup irqs belonging to this intc controller */ 790 switch (state.event) {
790 for_each_irq_desc(irq, desc) { 791 case PM_EVENT_ON:
791 if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip)) 792 if (d->state.event != PM_EVENT_FREEZE)
792 intc_enable(irq); 793 break;
794 for_each_irq_desc(irq, desc) {
795 if (desc->chip != &d->chip)
796 continue;
797 if (desc->status & IRQ_DISABLED)
798 intc_disable(irq);
799 else
800 intc_enable(irq);
801 }
802 break;
803 case PM_EVENT_FREEZE:
804 /* nothing has to be done */
805 break;
806 case PM_EVENT_SUSPEND:
807 /* enable wakeup irqs belonging to this intc controller */
808 for_each_irq_desc(irq, desc) {
809 if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
810 intc_enable(irq);
811 }
812 break;
793 } 813 }
814 d->state = state;
794 815
795 return 0; 816 return 0;
796} 817}
797 818
819static int intc_resume(struct sys_device *dev)
820{
821 return intc_suspend(dev, PMSG_ON);
822}
823
798static struct sysdev_class intc_sysdev_class = { 824static struct sysdev_class intc_sysdev_class = {
799 .name = "intc", 825 .name = "intc",
800 .suspend = intc_suspend, 826 .suspend = intc_suspend,
827 .resume = intc_resume,
801}; 828};
802 829
803/* register this intc as sysdev to allow suspend/resume */ 830/* register this intc as sysdev to allow suspend/resume */
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 869d47cb6db3..0a69c0977e3f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -546,10 +546,6 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
546 tty->driver_data = acm; 546 tty->driver_data = acm;
547 acm->tty = tty; 547 acm->tty = tty;
548 548
549 /* force low_latency on so that our tty_push actually forces the data through,
550 otherwise it is scheduled, and with high data rates data can get lost. */
551 tty->low_latency = 1;
552
553 if (usb_autopm_get_interface(acm->control) < 0) 549 if (usb_autopm_get_interface(acm->control) < 0)
554 goto early_bail; 550 goto early_bail;
555 else 551 else
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 2620bf6fe5e1..9c4c700c7cc6 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1215,20 +1215,22 @@ static void ti_bulk_in_callback(struct urb *urb)
1215 } 1215 }
1216 1216
1217 tty = tty_port_tty_get(&port->port); 1217 tty = tty_port_tty_get(&port->port);
1218 if (tty && urb->actual_length) { 1218 if (tty) {
1219 usb_serial_debug_data(debug, dev, __func__, 1219 if (urb->actual_length) {
1220 urb->actual_length, urb->transfer_buffer); 1220 usb_serial_debug_data(debug, dev, __func__,
1221 1221 urb->actual_length, urb->transfer_buffer);
1222 if (!tport->tp_is_open) 1222
1223 dbg("%s - port closed, dropping data", __func__); 1223 if (!tport->tp_is_open)
1224 else 1224 dbg("%s - port closed, dropping data",
1225 ti_recv(&urb->dev->dev, tty, 1225 __func__);
1226 else
1227 ti_recv(&urb->dev->dev, tty,
1226 urb->transfer_buffer, 1228 urb->transfer_buffer,
1227 urb->actual_length); 1229 urb->actual_length);
1228 1230 spin_lock(&tport->tp_lock);
1229 spin_lock(&tport->tp_lock); 1231 tport->tp_icount.rx += urb->actual_length;
1230 tport->tp_icount.rx += urb->actual_length; 1232 spin_unlock(&tport->tp_lock);
1231 spin_unlock(&tport->tp_lock); 1233 }
1232 tty_kref_put(tty); 1234 tty_kref_put(tty);
1233 } 1235 }
1234 1236
diff --git a/fs/bio.c b/fs/bio.c
index e0c9e545bbfa..cd42bb882f30 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -348,6 +348,24 @@ err:
348 return NULL; 348 return NULL;
349} 349}
350 350
351/**
352 * bio_alloc - allocate a bio for I/O
353 * @gfp_mask: the GFP_ mask given to the slab allocator
354 * @nr_iovecs: number of iovecs to pre-allocate
355 *
356 * Description:
357 * bio_alloc will allocate a bio and associated bio_vec array that can hold
358 * at least @nr_iovecs entries. Allocations will be done from the
359 * fs_bio_set. Also see @bio_alloc_bioset.
360 *
361 * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
362 * a bio. This is due to the mempool guarantees. To make this work, callers
363 * must never allocate more than 1 bio at the time from this pool. Callers
364 * that need to allocate more than 1 bio must always submit the previously
365 * allocate bio for IO before attempting to allocate a new one. Failure to
366 * do so can cause livelocks under memory pressure.
367 *
368 **/
351struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) 369struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
352{ 370{
353 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 371 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
diff --git a/fs/buffer.c b/fs/buffer.c
index 13edf7ad3ff1..ff8bb1f2333a 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -547,7 +547,7 @@ repeat:
547 return err; 547 return err;
548} 548}
549 549
550void do_thaw_all(unsigned long unused) 550void do_thaw_all(struct work_struct *work)
551{ 551{
552 struct super_block *sb; 552 struct super_block *sb;
553 char b[BDEVNAME_SIZE]; 553 char b[BDEVNAME_SIZE];
@@ -567,6 +567,7 @@ restart:
567 goto restart; 567 goto restart;
568 } 568 }
569 spin_unlock(&sb_lock); 569 spin_unlock(&sb_lock);
570 kfree(work);
570 printk(KERN_WARNING "Emergency Thaw complete\n"); 571 printk(KERN_WARNING "Emergency Thaw complete\n");
571} 572}
572 573
@@ -577,7 +578,13 @@ restart:
577 */ 578 */
578void emergency_thaw_all(void) 579void emergency_thaw_all(void)
579{ 580{
580 pdflush_operation(do_thaw_all, 0); 581 struct work_struct *work;
582
583 work = kmalloc(sizeof(*work), GFP_ATOMIC);
584 if (work) {
585 INIT_WORK(work, do_thaw_all);
586 schedule_work(work);
587 }
581} 588}
582 589
583/** 590/**
diff --git a/fs/direct-io.c b/fs/direct-io.c
index da258e7249cc..05763bbc2050 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -307,8 +307,6 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev,
307 struct bio *bio; 307 struct bio *bio;
308 308
309 bio = bio_alloc(GFP_KERNEL, nr_vecs); 309 bio = bio_alloc(GFP_KERNEL, nr_vecs);
310 if (bio == NULL)
311 return -ENOMEM;
312 310
313 bio->bi_bdev = bdev; 311 bio->bi_bdev = bdev;
314 bio->bi_sector = first_sector; 312 bio->bi_sector = first_sector;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 6132353dcf62..2a1cb0979768 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2416,8 +2416,6 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2416 len = ee_len; 2416 len = ee_len;
2417 2417
2418 bio = bio_alloc(GFP_NOIO, len); 2418 bio = bio_alloc(GFP_NOIO, len);
2419 if (!bio)
2420 return -ENOMEM;
2421 bio->bi_sector = ee_pblock; 2419 bio->bi_sector = ee_pblock;
2422 bio->bi_bdev = inode->i_sb->s_bdev; 2420 bio->bi_bdev = inode->i_sb->s_bdev;
2423 2421
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 2b25133524a3..06f30e965676 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -938,9 +938,9 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
938} 938}
939 939
940static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf, 940static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
941 unsigned *nbytesp, int write) 941 size_t *nbytesp, int write)
942{ 942{
943 unsigned nbytes = *nbytesp; 943 size_t nbytes = *nbytesp;
944 unsigned long user_addr = (unsigned long) buf; 944 unsigned long user_addr = (unsigned long) buf;
945 unsigned offset = user_addr & ~PAGE_MASK; 945 unsigned offset = user_addr & ~PAGE_MASK;
946 int npages; 946 int npages;
@@ -955,7 +955,7 @@ static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
955 return 0; 955 return 0;
956 } 956 }
957 957
958 nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); 958 nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
959 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 959 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
960 npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ); 960 npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ);
961 down_read(&current->mm->mmap_sem); 961 down_read(&current->mm->mmap_sem);
@@ -1298,6 +1298,8 @@ static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
1298 if (vma->vm_flags & VM_MAYSHARE) 1298 if (vma->vm_flags & VM_MAYSHARE)
1299 return -ENODEV; 1299 return -ENODEV;
1300 1300
1301 invalidate_inode_pages2(file->f_mapping);
1302
1301 return generic_file_mmap(file, vma); 1303 return generic_file_mmap(file, vma);
1302} 1304}
1303 1305
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 3984e47d1d33..1afd9f26bcb1 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -597,7 +597,6 @@ __acquires(&gl->gl_spin)
597 597
598 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); 598 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
599 599
600 down_read(&gfs2_umount_flush_sem);
601 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && 600 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
602 gl->gl_demote_state != gl->gl_state) { 601 gl->gl_demote_state != gl->gl_state) {
603 if (find_first_holder(gl)) 602 if (find_first_holder(gl))
@@ -614,15 +613,14 @@ __acquires(&gl->gl_spin)
614 if (ret == 0) 613 if (ret == 0)
615 goto out_unlock; 614 goto out_unlock;
616 if (ret == 2) 615 if (ret == 2)
617 goto out_sem; 616 goto out;
618 gh = find_first_waiter(gl); 617 gh = find_first_waiter(gl);
619 gl->gl_target = gh->gh_state; 618 gl->gl_target = gh->gh_state;
620 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 619 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
621 do_error(gl, 0); /* Fail queued try locks */ 620 do_error(gl, 0); /* Fail queued try locks */
622 } 621 }
623 do_xmote(gl, gh, gl->gl_target); 622 do_xmote(gl, gh, gl->gl_target);
624out_sem: 623out:
625 up_read(&gfs2_umount_flush_sem);
626 return; 624 return;
627 625
628out_sched: 626out_sched:
@@ -631,7 +629,7 @@ out_sched:
631 gfs2_glock_put(gl); 629 gfs2_glock_put(gl);
632out_unlock: 630out_unlock:
633 clear_bit(GLF_LOCK, &gl->gl_flags); 631 clear_bit(GLF_LOCK, &gl->gl_flags);
634 goto out_sem; 632 goto out;
635} 633}
636 634
637static void glock_work_func(struct work_struct *work) 635static void glock_work_func(struct work_struct *work)
@@ -641,6 +639,7 @@ static void glock_work_func(struct work_struct *work)
641 639
642 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 640 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
643 finish_xmote(gl, gl->gl_reply); 641 finish_xmote(gl, gl->gl_reply);
642 down_read(&gfs2_umount_flush_sem);
644 spin_lock(&gl->gl_spin); 643 spin_lock(&gl->gl_spin);
645 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 644 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
646 gl->gl_state != LM_ST_UNLOCKED && 645 gl->gl_state != LM_ST_UNLOCKED &&
@@ -653,6 +652,7 @@ static void glock_work_func(struct work_struct *work)
653 } 652 }
654 run_queue(gl, 0); 653 run_queue(gl, 0);
655 spin_unlock(&gl->gl_spin); 654 spin_unlock(&gl->gl_spin);
655 up_read(&gfs2_umount_flush_sem);
656 if (!delay || 656 if (!delay ||
657 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 657 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
658 gfs2_glock_put(gl); 658 gfs2_glock_put(gl);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 7b277d449155..5a31d426116f 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -137,15 +137,15 @@ void gfs2_set_iop(struct inode *inode)
137 if (S_ISREG(mode)) { 137 if (S_ISREG(mode)) {
138 inode->i_op = &gfs2_file_iops; 138 inode->i_op = &gfs2_file_iops;
139 if (gfs2_localflocks(sdp)) 139 if (gfs2_localflocks(sdp))
140 inode->i_fop = gfs2_file_fops_nolock; 140 inode->i_fop = &gfs2_file_fops_nolock;
141 else 141 else
142 inode->i_fop = gfs2_file_fops; 142 inode->i_fop = &gfs2_file_fops;
143 } else if (S_ISDIR(mode)) { 143 } else if (S_ISDIR(mode)) {
144 inode->i_op = &gfs2_dir_iops; 144 inode->i_op = &gfs2_dir_iops;
145 if (gfs2_localflocks(sdp)) 145 if (gfs2_localflocks(sdp))
146 inode->i_fop = gfs2_dir_fops_nolock; 146 inode->i_fop = &gfs2_dir_fops_nolock;
147 else 147 else
148 inode->i_fop = gfs2_dir_fops; 148 inode->i_fop = &gfs2_dir_fops;
149 } else if (S_ISLNK(mode)) { 149 } else if (S_ISLNK(mode)) {
150 inode->i_op = &gfs2_symlink_iops; 150 inode->i_op = &gfs2_symlink_iops;
151 } else { 151 } else {
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index dca4fee3078b..c30be2b66580 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -101,21 +101,23 @@ void gfs2_dinode_print(const struct gfs2_inode *ip);
101extern const struct inode_operations gfs2_file_iops; 101extern const struct inode_operations gfs2_file_iops;
102extern const struct inode_operations gfs2_dir_iops; 102extern const struct inode_operations gfs2_dir_iops;
103extern const struct inode_operations gfs2_symlink_iops; 103extern const struct inode_operations gfs2_symlink_iops;
104extern const struct file_operations *gfs2_file_fops_nolock; 104extern const struct file_operations gfs2_file_fops_nolock;
105extern const struct file_operations *gfs2_dir_fops_nolock; 105extern const struct file_operations gfs2_dir_fops_nolock;
106 106
107extern void gfs2_set_inode_flags(struct inode *inode); 107extern void gfs2_set_inode_flags(struct inode *inode);
108 108
109#ifdef CONFIG_GFS2_FS_LOCKING_DLM 109#ifdef CONFIG_GFS2_FS_LOCKING_DLM
110extern const struct file_operations *gfs2_file_fops; 110extern const struct file_operations gfs2_file_fops;
111extern const struct file_operations *gfs2_dir_fops; 111extern const struct file_operations gfs2_dir_fops;
112
112static inline int gfs2_localflocks(const struct gfs2_sbd *sdp) 113static inline int gfs2_localflocks(const struct gfs2_sbd *sdp)
113{ 114{
114 return sdp->sd_args.ar_localflocks; 115 return sdp->sd_args.ar_localflocks;
115} 116}
116#else /* Single node only */ 117#else /* Single node only */
117#define gfs2_file_fops NULL 118#define gfs2_file_fops gfs2_file_fops_nolock
118#define gfs2_dir_fops NULL 119#define gfs2_dir_fops gfs2_dir_fops_nolock
120
119static inline int gfs2_localflocks(const struct gfs2_sbd *sdp) 121static inline int gfs2_localflocks(const struct gfs2_sbd *sdp)
120{ 122{
121 return 1; 123 return 1;
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index 70b9b8548945..101caf3ee861 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -705,7 +705,7 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
705 } 705 }
706} 706}
707 707
708const struct file_operations *gfs2_file_fops = &(const struct file_operations){ 708const struct file_operations gfs2_file_fops = {
709 .llseek = gfs2_llseek, 709 .llseek = gfs2_llseek,
710 .read = do_sync_read, 710 .read = do_sync_read,
711 .aio_read = generic_file_aio_read, 711 .aio_read = generic_file_aio_read,
@@ -723,7 +723,7 @@ const struct file_operations *gfs2_file_fops = &(const struct file_operations){
723 .setlease = gfs2_setlease, 723 .setlease = gfs2_setlease,
724}; 724};
725 725
726const struct file_operations *gfs2_dir_fops = &(const struct file_operations){ 726const struct file_operations gfs2_dir_fops = {
727 .readdir = gfs2_readdir, 727 .readdir = gfs2_readdir,
728 .unlocked_ioctl = gfs2_ioctl, 728 .unlocked_ioctl = gfs2_ioctl,
729 .open = gfs2_open, 729 .open = gfs2_open,
@@ -735,7 +735,7 @@ const struct file_operations *gfs2_dir_fops = &(const struct file_operations){
735 735
736#endif /* CONFIG_GFS2_FS_LOCKING_DLM */ 736#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
737 737
738const struct file_operations *gfs2_file_fops_nolock = &(const struct file_operations){ 738const struct file_operations gfs2_file_fops_nolock = {
739 .llseek = gfs2_llseek, 739 .llseek = gfs2_llseek,
740 .read = do_sync_read, 740 .read = do_sync_read,
741 .aio_read = generic_file_aio_read, 741 .aio_read = generic_file_aio_read,
@@ -751,7 +751,7 @@ const struct file_operations *gfs2_file_fops_nolock = &(const struct file_operat
751 .setlease = generic_setlease, 751 .setlease = generic_setlease,
752}; 752};
753 753
754const struct file_operations *gfs2_dir_fops_nolock = &(const struct file_operations){ 754const struct file_operations gfs2_dir_fops_nolock = {
755 .readdir = gfs2_readdir, 755 .readdir = gfs2_readdir,
756 .unlocked_ioctl = gfs2_ioctl, 756 .unlocked_ioctl = gfs2_ioctl,
757 .open = gfs2_open, 757 .open = gfs2_open,
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 51883b3ad89c..650a730707b7 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -272,11 +272,6 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector)
272 lock_page(page); 272 lock_page(page);
273 273
274 bio = bio_alloc(GFP_NOFS, 1); 274 bio = bio_alloc(GFP_NOFS, 1);
275 if (unlikely(!bio)) {
276 __free_page(page);
277 return -ENOBUFS;
278 }
279
280 bio->bi_sector = sector * (sb->s_blocksize >> 9); 275 bio->bi_sector = sector * (sb->s_blocksize >> 9);
281 bio->bi_bdev = sb->s_bdev; 276 bio->bi_bdev = sb->s_bdev;
282 bio_add_page(bio, page, PAGE_SIZE, 0); 277 bio_add_page(bio, page, PAGE_SIZE, 0);
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index abd5429ae285..1c70fa5168d6 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -371,6 +371,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
371 ip = ghs[1].gh_gl->gl_object; 371 ip = ghs[1].gh_gl->gl_object;
372 372
373 ip->i_disksize = size; 373 ip->i_disksize = size;
374 i_size_write(inode, size);
374 375
375 error = gfs2_meta_inode_buffer(ip, &dibh); 376 error = gfs2_meta_inode_buffer(ip, &dibh);
376 377
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 8d53f66b5bcc..152e6c4a0dca 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -81,7 +81,7 @@ struct gfs2_quota_change_host {
81 81
82static LIST_HEAD(qd_lru_list); 82static LIST_HEAD(qd_lru_list);
83static atomic_t qd_lru_count = ATOMIC_INIT(0); 83static atomic_t qd_lru_count = ATOMIC_INIT(0);
84static spinlock_t qd_lru_lock = SPIN_LOCK_UNLOCKED; 84static DEFINE_SPINLOCK(qd_lru_lock);
85 85
86int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask) 86int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask)
87{ 87{
@@ -1364,7 +1364,7 @@ int gfs2_quotad(void *data)
1364 refrigerator(); 1364 refrigerator();
1365 t = min(quotad_timeo, statfs_timeo); 1365 t = min(quotad_timeo, statfs_timeo);
1366 1366
1367 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_UNINTERRUPTIBLE); 1367 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1368 spin_lock(&sdp->sd_trunc_lock); 1368 spin_lock(&sdp->sd_trunc_lock);
1369 empty = list_empty(&sdp->sd_trunc_list); 1369 empty = list_empty(&sdp->sd_trunc_list);
1370 spin_unlock(&sdp->sd_trunc_lock); 1370 spin_unlock(&sdp->sd_trunc_lock);
diff --git a/fs/inode.c b/fs/inode.c
index d06d6d268de9..6ad14a1cd8c9 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1470,42 +1470,6 @@ static void __wait_on_freeing_inode(struct inode *inode)
1470 spin_lock(&inode_lock); 1470 spin_lock(&inode_lock);
1471} 1471}
1472 1472
1473/*
1474 * We rarely want to lock two inodes that do not have a parent/child
1475 * relationship (such as directory, child inode) simultaneously. The
1476 * vast majority of file systems should be able to get along fine
1477 * without this. Do not use these functions except as a last resort.
1478 */
1479void inode_double_lock(struct inode *inode1, struct inode *inode2)
1480{
1481 if (inode1 == NULL || inode2 == NULL || inode1 == inode2) {
1482 if (inode1)
1483 mutex_lock(&inode1->i_mutex);
1484 else if (inode2)
1485 mutex_lock(&inode2->i_mutex);
1486 return;
1487 }
1488
1489 if (inode1 < inode2) {
1490 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
1491 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
1492 } else {
1493 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
1494 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
1495 }
1496}
1497EXPORT_SYMBOL(inode_double_lock);
1498
1499void inode_double_unlock(struct inode *inode1, struct inode *inode2)
1500{
1501 if (inode1)
1502 mutex_unlock(&inode1->i_mutex);
1503
1504 if (inode2 && inode2 != inode1)
1505 mutex_unlock(&inode2->i_mutex);
1506}
1507EXPORT_SYMBOL(inode_double_unlock);
1508
1509static __initdata unsigned long ihash_entries; 1473static __initdata unsigned long ihash_entries;
1510static int __init set_ihash_entries(char *str) 1474static int __init set_ihash_entries(char *str)
1511{ 1475{
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 24638e059bf3..064279e33bbb 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -688,6 +688,8 @@ static const struct nilfs_bmap_ptr_operations nilfs_bmap_ptr_ops_gc = {
688 .bpop_translate = NULL, 688 .bpop_translate = NULL,
689}; 689};
690 690
691static struct lock_class_key nilfs_bmap_dat_lock_key;
692
691/** 693/**
692 * nilfs_bmap_read - read a bmap from an inode 694 * nilfs_bmap_read - read a bmap from an inode
693 * @bmap: bmap 695 * @bmap: bmap
@@ -715,6 +717,7 @@ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode)
715 bmap->b_pops = &nilfs_bmap_ptr_ops_p; 717 bmap->b_pops = &nilfs_bmap_ptr_ops_p;
716 bmap->b_last_allocated_key = 0; /* XXX: use macro */ 718 bmap->b_last_allocated_key = 0; /* XXX: use macro */
717 bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; 719 bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT;
720 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key);
718 break; 721 break;
719 case NILFS_CPFILE_INO: 722 case NILFS_CPFILE_INO:
720 case NILFS_SUFILE_INO: 723 case NILFS_SUFILE_INO:
@@ -772,6 +775,7 @@ void nilfs_bmap_init_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap)
772{ 775{
773 memcpy(gcbmap, bmap, sizeof(union nilfs_bmap_union)); 776 memcpy(gcbmap, bmap, sizeof(union nilfs_bmap_union));
774 init_rwsem(&gcbmap->b_sem); 777 init_rwsem(&gcbmap->b_sem);
778 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key);
775 gcbmap->b_inode = &NILFS_BMAP_I(gcbmap)->vfs_inode; 779 gcbmap->b_inode = &NILFS_BMAP_I(gcbmap)->vfs_inode;
776} 780}
777 781
@@ -779,5 +783,6 @@ void nilfs_bmap_commit_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap)
779{ 783{
780 memcpy(bmap, gcbmap, sizeof(union nilfs_bmap_union)); 784 memcpy(bmap, gcbmap, sizeof(union nilfs_bmap_union));
781 init_rwsem(&bmap->b_sem); 785 init_rwsem(&bmap->b_sem);
786 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key);
782 bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; 787 bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode;
783} 788}
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 7558c977db02..3d0c18a16db1 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -35,11 +35,6 @@
35#include "bmap_union.h" 35#include "bmap_union.h"
36 36
37/* 37/*
38 * NILFS filesystem version
39 */
40#define NILFS_VERSION "2.0.5"
41
42/*
43 * nilfs inode data in memory 38 * nilfs inode data in memory
44 */ 39 */
45struct nilfs_inode_info { 40struct nilfs_inode_info {
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 6ade0963fc1d..4fc081e47d70 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -413,7 +413,6 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
413 struct nilfs_segment_entry *ent, *n; 413 struct nilfs_segment_entry *ent, *n;
414 struct inode *sufile = nilfs->ns_sufile; 414 struct inode *sufile = nilfs->ns_sufile;
415 __u64 segnum[4]; 415 __u64 segnum[4];
416 time_t mtime;
417 int err; 416 int err;
418 int i; 417 int i;
419 418
@@ -442,24 +441,13 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
442 * Collecting segments written after the latest super root. 441 * Collecting segments written after the latest super root.
443 * These are marked dirty to avoid being reallocated in the next write. 442 * These are marked dirty to avoid being reallocated in the next write.
444 */ 443 */
445 mtime = get_seconds();
446 list_for_each_entry_safe(ent, n, head, list) { 444 list_for_each_entry_safe(ent, n, head, list) {
447 if (ent->segnum == segnum[0]) { 445 if (ent->segnum != segnum[0]) {
448 list_del(&ent->list); 446 err = nilfs_sufile_scrap(sufile, ent->segnum);
449 nilfs_free_segment_entry(ent); 447 if (unlikely(err))
450 continue; 448 goto failed;
451 }
452 err = nilfs_open_segment_entry(ent, sufile);
453 if (unlikely(err))
454 goto failed;
455 if (!nilfs_segment_usage_dirty(ent->raw_su)) {
456 /* make the segment garbage */
457 ent->raw_su->su_nblocks = cpu_to_le32(0);
458 ent->raw_su->su_lastmod = cpu_to_le32(mtime);
459 nilfs_segment_usage_set_dirty(ent->raw_su);
460 } 449 }
461 list_del(&ent->list); 450 list_del(&ent->list);
462 nilfs_close_segment_entry(ent, sufile);
463 nilfs_free_segment_entry(ent); 451 nilfs_free_segment_entry(ent);
464 } 452 }
465 453
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index c774cf397e2f..98e68677f045 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -93,6 +93,52 @@ nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
93 create, NULL, bhp); 93 create, NULL, bhp);
94} 94}
95 95
96static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
97 u64 ncleanadd, u64 ndirtyadd)
98{
99 struct nilfs_sufile_header *header;
100 void *kaddr;
101
102 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
103 header = kaddr + bh_offset(header_bh);
104 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
105 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
106 kunmap_atomic(kaddr, KM_USER0);
107
108 nilfs_mdt_mark_buffer_dirty(header_bh);
109}
110
111int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
112 void (*dofunc)(struct inode *, __u64,
113 struct buffer_head *,
114 struct buffer_head *))
115{
116 struct buffer_head *header_bh, *bh;
117 int ret;
118
119 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
120 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
121 __func__, (unsigned long long)segnum);
122 return -EINVAL;
123 }
124 down_write(&NILFS_MDT(sufile)->mi_sem);
125
126 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
127 if (ret < 0)
128 goto out_sem;
129
130 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
131 if (!ret) {
132 dofunc(sufile, segnum, header_bh, bh);
133 brelse(bh);
134 }
135 brelse(header_bh);
136
137 out_sem:
138 up_write(&NILFS_MDT(sufile)->mi_sem);
139 return ret;
140}
141
96/** 142/**
97 * nilfs_sufile_alloc - allocate a segment 143 * nilfs_sufile_alloc - allocate a segment
98 * @sufile: inode of segment usage file 144 * @sufile: inode of segment usage file
@@ -113,7 +159,6 @@ nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
113int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) 159int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
114{ 160{
115 struct buffer_head *header_bh, *su_bh; 161 struct buffer_head *header_bh, *su_bh;
116 struct the_nilfs *nilfs;
117 struct nilfs_sufile_header *header; 162 struct nilfs_sufile_header *header;
118 struct nilfs_segment_usage *su; 163 struct nilfs_segment_usage *su;
119 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 164 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
@@ -124,8 +169,6 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
124 169
125 down_write(&NILFS_MDT(sufile)->mi_sem); 170 down_write(&NILFS_MDT(sufile)->mi_sem);
126 171
127 nilfs = NILFS_MDT(sufile)->mi_nilfs;
128
129 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 172 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
130 if (ret < 0) 173 if (ret < 0)
131 goto out_sem; 174 goto out_sem;
@@ -192,165 +235,84 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
192 return ret; 235 return ret;
193} 236}
194 237
195/** 238void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
196 * nilfs_sufile_cancel_free - 239 struct buffer_head *header_bh,
197 * @sufile: inode of segment usage file 240 struct buffer_head *su_bh)
198 * @segnum: segment number
199 *
200 * Description:
201 *
202 * Return Value: On success, 0 is returned. On error, one of the following
203 * negative error codes is returned.
204 *
205 * %-EIO - I/O error.
206 *
207 * %-ENOMEM - Insufficient amount of memory available.
208 */
209int nilfs_sufile_cancel_free(struct inode *sufile, __u64 segnum)
210{ 241{
211 struct buffer_head *header_bh, *su_bh;
212 struct the_nilfs *nilfs;
213 struct nilfs_sufile_header *header;
214 struct nilfs_segment_usage *su; 242 struct nilfs_segment_usage *su;
215 void *kaddr; 243 void *kaddr;
216 int ret;
217
218 down_write(&NILFS_MDT(sufile)->mi_sem);
219
220 nilfs = NILFS_MDT(sufile)->mi_nilfs;
221
222 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
223 if (ret < 0)
224 goto out_sem;
225
226 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &su_bh);
227 if (ret < 0)
228 goto out_header;
229 244
230 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 245 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
231 su = nilfs_sufile_block_get_segment_usage( 246 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
232 sufile, segnum, su_bh, kaddr);
233 if (unlikely(!nilfs_segment_usage_clean(su))) { 247 if (unlikely(!nilfs_segment_usage_clean(su))) {
234 printk(KERN_WARNING "%s: segment %llu must be clean\n", 248 printk(KERN_WARNING "%s: segment %llu must be clean\n",
235 __func__, (unsigned long long)segnum); 249 __func__, (unsigned long long)segnum);
236 kunmap_atomic(kaddr, KM_USER0); 250 kunmap_atomic(kaddr, KM_USER0);
237 goto out_su_bh; 251 return;
238 } 252 }
239 nilfs_segment_usage_set_dirty(su); 253 nilfs_segment_usage_set_dirty(su);
240 kunmap_atomic(kaddr, KM_USER0); 254 kunmap_atomic(kaddr, KM_USER0);
241 255
242 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 256 nilfs_sufile_mod_counter(header_bh, -1, 1);
243 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
244 le64_add_cpu(&header->sh_ncleansegs, -1);
245 le64_add_cpu(&header->sh_ndirtysegs, 1);
246 kunmap_atomic(kaddr, KM_USER0);
247
248 nilfs_mdt_mark_buffer_dirty(header_bh);
249 nilfs_mdt_mark_buffer_dirty(su_bh); 257 nilfs_mdt_mark_buffer_dirty(su_bh);
250 nilfs_mdt_mark_dirty(sufile); 258 nilfs_mdt_mark_dirty(sufile);
251
252 out_su_bh:
253 brelse(su_bh);
254 out_header:
255 brelse(header_bh);
256 out_sem:
257 up_write(&NILFS_MDT(sufile)->mi_sem);
258 return ret;
259} 259}
260 260
261/** 261void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
262 * nilfs_sufile_freev - free segments 262 struct buffer_head *header_bh,
263 * @sufile: inode of segment usage file 263 struct buffer_head *su_bh)
264 * @segnum: array of segment numbers
265 * @nsegs: number of segments
266 *
267 * Description: nilfs_sufile_freev() frees segments specified by @segnum and
268 * @nsegs, which must have been returned by a previous call to
269 * nilfs_sufile_alloc().
270 *
271 * Return Value: On success, 0 is returned. On error, one of the following
272 * negative error codes is returned.
273 *
274 * %-EIO - I/O error.
275 *
276 * %-ENOMEM - Insufficient amount of memory available.
277 */
278#define NILFS_SUFILE_FREEV_PREALLOC 16
279int nilfs_sufile_freev(struct inode *sufile, __u64 *segnum, size_t nsegs)
280{ 264{
281 struct buffer_head *header_bh, **su_bh,
282 *su_bh_prealloc[NILFS_SUFILE_FREEV_PREALLOC];
283 struct the_nilfs *nilfs;
284 struct nilfs_sufile_header *header;
285 struct nilfs_segment_usage *su; 265 struct nilfs_segment_usage *su;
286 void *kaddr; 266 void *kaddr;
287 int ret, i; 267 int clean, dirty;
288 268
289 down_write(&NILFS_MDT(sufile)->mi_sem); 269 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
290 270 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
291 nilfs = NILFS_MDT(sufile)->mi_nilfs; 271 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
292 272 su->su_nblocks == cpu_to_le32(0)) {
293 /* prepare resources */
294 if (nsegs <= NILFS_SUFILE_FREEV_PREALLOC)
295 su_bh = su_bh_prealloc;
296 else {
297 su_bh = kmalloc(sizeof(*su_bh) * nsegs, GFP_NOFS);
298 if (su_bh == NULL) {
299 ret = -ENOMEM;
300 goto out_sem;
301 }
302 }
303
304 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
305 if (ret < 0)
306 goto out_su_bh;
307 for (i = 0; i < nsegs; i++) {
308 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum[i],
309 0, &su_bh[i]);
310 if (ret < 0)
311 goto out_bh;
312 }
313
314 /* free segments */
315 for (i = 0; i < nsegs; i++) {
316 kaddr = kmap_atomic(su_bh[i]->b_page, KM_USER0);
317 su = nilfs_sufile_block_get_segment_usage(
318 sufile, segnum[i], su_bh[i], kaddr);
319 WARN_ON(nilfs_segment_usage_error(su));
320 nilfs_segment_usage_set_clean(su);
321 kunmap_atomic(kaddr, KM_USER0); 273 kunmap_atomic(kaddr, KM_USER0);
322 nilfs_mdt_mark_buffer_dirty(su_bh[i]); 274 return;
323 } 275 }
324 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 276 clean = nilfs_segment_usage_clean(su);
325 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr); 277 dirty = nilfs_segment_usage_dirty(su);
326 le64_add_cpu(&header->sh_ncleansegs, nsegs); 278
327 le64_add_cpu(&header->sh_ndirtysegs, -(u64)nsegs); 279 /* make the segment garbage */
280 su->su_lastmod = cpu_to_le64(0);
281 su->su_nblocks = cpu_to_le32(0);
282 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
328 kunmap_atomic(kaddr, KM_USER0); 283 kunmap_atomic(kaddr, KM_USER0);
329 nilfs_mdt_mark_buffer_dirty(header_bh); 284
285 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
286 nilfs_mdt_mark_buffer_dirty(su_bh);
330 nilfs_mdt_mark_dirty(sufile); 287 nilfs_mdt_mark_dirty(sufile);
288}
331 289
332 out_bh: 290void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
333 for (i--; i >= 0; i--) 291 struct buffer_head *header_bh,
334 brelse(su_bh[i]); 292 struct buffer_head *su_bh)
335 brelse(header_bh); 293{
294 struct nilfs_segment_usage *su;
295 void *kaddr;
296 int sudirty;
336 297
337 out_su_bh: 298 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
338 if (su_bh != su_bh_prealloc) 299 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
339 kfree(su_bh); 300 if (nilfs_segment_usage_clean(su)) {
301 printk(KERN_WARNING "%s: segment %llu is already clean\n",
302 __func__, (unsigned long long)segnum);
303 kunmap_atomic(kaddr, KM_USER0);
304 return;
305 }
306 WARN_ON(nilfs_segment_usage_error(su));
307 WARN_ON(!nilfs_segment_usage_dirty(su));
340 308
341 out_sem: 309 sudirty = nilfs_segment_usage_dirty(su);
342 up_write(&NILFS_MDT(sufile)->mi_sem); 310 nilfs_segment_usage_set_clean(su);
343 return ret; 311 kunmap_atomic(kaddr, KM_USER0);
344} 312 nilfs_mdt_mark_buffer_dirty(su_bh);
345 313
346/** 314 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
347 * nilfs_sufile_free - 315 nilfs_mdt_mark_dirty(sufile);
348 * @sufile:
349 * @segnum:
350 */
351int nilfs_sufile_free(struct inode *sufile, __u64 segnum)
352{
353 return nilfs_sufile_freev(sufile, &segnum, 1);
354} 316}
355 317
356/** 318/**
@@ -500,72 +462,28 @@ int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
500 return ret; 462 return ret;
501} 463}
502 464
503/** 465void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
504 * nilfs_sufile_set_error - mark a segment as erroneous 466 struct buffer_head *header_bh,
505 * @sufile: inode of segment usage file 467 struct buffer_head *su_bh)
506 * @segnum: segment number
507 *
508 * Description: nilfs_sufile_set_error() marks the segment specified by
509 * @segnum as erroneous. The error segment will never be used again.
510 *
511 * Return Value: On success, 0 is returned. On error, one of the following
512 * negative error codes is returned.
513 *
514 * %-EIO - I/O error.
515 *
516 * %-ENOMEM - Insufficient amount of memory available.
517 *
518 * %-EINVAL - Invalid segment usage number.
519 */
520int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum)
521{ 468{
522 struct buffer_head *header_bh, *su_bh;
523 struct nilfs_segment_usage *su; 469 struct nilfs_segment_usage *su;
524 struct nilfs_sufile_header *header;
525 void *kaddr; 470 void *kaddr;
526 int ret; 471 int suclean;
527
528 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
529 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
530 __func__, (unsigned long long)segnum);
531 return -EINVAL;
532 }
533 down_write(&NILFS_MDT(sufile)->mi_sem);
534
535 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
536 if (ret < 0)
537 goto out_sem;
538 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &su_bh);
539 if (ret < 0)
540 goto out_header;
541 472
542 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 473 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
543 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 474 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
544 if (nilfs_segment_usage_error(su)) { 475 if (nilfs_segment_usage_error(su)) {
545 kunmap_atomic(kaddr, KM_USER0); 476 kunmap_atomic(kaddr, KM_USER0);
546 brelse(su_bh); 477 return;
547 goto out_header;
548 } 478 }
549 479 suclean = nilfs_segment_usage_clean(su);
550 nilfs_segment_usage_set_error(su); 480 nilfs_segment_usage_set_error(su);
551 kunmap_atomic(kaddr, KM_USER0); 481 kunmap_atomic(kaddr, KM_USER0);
552 brelse(su_bh);
553 482
554 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 483 if (suclean)
555 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr); 484 nilfs_sufile_mod_counter(header_bh, -1, 0);
556 le64_add_cpu(&header->sh_ndirtysegs, -1);
557 kunmap_atomic(kaddr, KM_USER0);
558 nilfs_mdt_mark_buffer_dirty(header_bh);
559 nilfs_mdt_mark_buffer_dirty(su_bh); 485 nilfs_mdt_mark_buffer_dirty(su_bh);
560 nilfs_mdt_mark_dirty(sufile); 486 nilfs_mdt_mark_dirty(sufile);
561 brelse(su_bh);
562
563 out_header:
564 brelse(header_bh);
565
566 out_sem:
567 up_write(&NILFS_MDT(sufile)->mi_sem);
568 return ret;
569} 487}
570 488
571/** 489/**
@@ -625,7 +543,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum,
625 si[i + j].sui_nblocks = le32_to_cpu(su->su_nblocks); 543 si[i + j].sui_nblocks = le32_to_cpu(su->su_nblocks);
626 si[i + j].sui_flags = le32_to_cpu(su->su_flags) & 544 si[i + j].sui_flags = le32_to_cpu(su->su_flags) &
627 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); 545 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
628 if (nilfs_segment_is_active(nilfs, segnum + i + j)) 546 if (nilfs_segment_is_active(nilfs, segnum + j))
629 si[i + j].sui_flags |= 547 si[i + j].sui_flags |=
630 (1UL << NILFS_SEGMENT_USAGE_ACTIVE); 548 (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
631 } 549 }
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h
index d595f33a768d..a2e2efd4ade1 100644
--- a/fs/nilfs2/sufile.h
+++ b/fs/nilfs2/sufile.h
@@ -36,9 +36,6 @@ static inline unsigned long nilfs_sufile_get_nsegments(struct inode *sufile)
36} 36}
37 37
38int nilfs_sufile_alloc(struct inode *, __u64 *); 38int nilfs_sufile_alloc(struct inode *, __u64 *);
39int nilfs_sufile_cancel_free(struct inode *, __u64);
40int nilfs_sufile_freev(struct inode *, __u64 *, size_t);
41int nilfs_sufile_free(struct inode *, __u64);
42int nilfs_sufile_get_segment_usage(struct inode *, __u64, 39int nilfs_sufile_get_segment_usage(struct inode *, __u64,
43 struct nilfs_segment_usage **, 40 struct nilfs_segment_usage **,
44 struct buffer_head **); 41 struct buffer_head **);
@@ -46,9 +43,83 @@ void nilfs_sufile_put_segment_usage(struct inode *, __u64,
46 struct buffer_head *); 43 struct buffer_head *);
47int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *); 44int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *);
48int nilfs_sufile_get_ncleansegs(struct inode *, unsigned long *); 45int nilfs_sufile_get_ncleansegs(struct inode *, unsigned long *);
49int nilfs_sufile_set_error(struct inode *, __u64);
50ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, struct nilfs_suinfo *, 46ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, struct nilfs_suinfo *,
51 size_t); 47 size_t);
52 48
49int nilfs_sufile_update(struct inode *, __u64, int,
50 void (*dofunc)(struct inode *, __u64,
51 struct buffer_head *,
52 struct buffer_head *));
53void nilfs_sufile_do_cancel_free(struct inode *, __u64, struct buffer_head *,
54 struct buffer_head *);
55void nilfs_sufile_do_scrap(struct inode *, __u64, struct buffer_head *,
56 struct buffer_head *);
57void nilfs_sufile_do_free(struct inode *, __u64, struct buffer_head *,
58 struct buffer_head *);
59void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *,
60 struct buffer_head *);
61
62/**
63 * nilfs_sufile_cancel_free -
64 * @sufile: inode of segment usage file
65 * @segnum: segment number
66 *
67 * Description:
68 *
69 * Return Value: On success, 0 is returned. On error, one of the following
70 * negative error codes is returned.
71 *
72 * %-EIO - I/O error.
73 *
74 * %-ENOMEM - Insufficient amount of memory available.
75 */
76static inline int nilfs_sufile_cancel_free(struct inode *sufile, __u64 segnum)
77{
78 return nilfs_sufile_update(sufile, segnum, 0,
79 nilfs_sufile_do_cancel_free);
80}
81
82/**
83 * nilfs_sufile_scrap - make a segment garbage
84 * @sufile: inode of segment usage file
85 * @segnum: segment number to be freed
86 */
87static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum)
88{
89 return nilfs_sufile_update(sufile, segnum, 1, nilfs_sufile_do_scrap);
90}
91
92/**
93 * nilfs_sufile_free - free segment
94 * @sufile: inode of segment usage file
95 * @segnum: segment number to be freed
96 */
97static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum)
98{
99 return nilfs_sufile_update(sufile, segnum, 0, nilfs_sufile_do_free);
100}
101
102/**
103 * nilfs_sufile_set_error - mark a segment as erroneous
104 * @sufile: inode of segment usage file
105 * @segnum: segment number
106 *
107 * Description: nilfs_sufile_set_error() marks the segment specified by
108 * @segnum as erroneous. The error segment will never be used again.
109 *
110 * Return Value: On success, 0 is returned. On error, one of the following
111 * negative error codes is returned.
112 *
113 * %-EIO - I/O error.
114 *
115 * %-ENOMEM - Insufficient amount of memory available.
116 *
117 * %-EINVAL - Invalid segment usage number.
118 */
119static inline int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum)
120{
121 return nilfs_sufile_update(sufile, segnum, 0,
122 nilfs_sufile_do_set_error);
123}
53 124
54#endif /* _NILFS_SUFILE_H */ 125#endif /* _NILFS_SUFILE_H */
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index e117e1ea9bff..6989b03e97ab 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -63,7 +63,6 @@
63MODULE_AUTHOR("NTT Corp."); 63MODULE_AUTHOR("NTT Corp.");
64MODULE_DESCRIPTION("A New Implementation of the Log-structured Filesystem " 64MODULE_DESCRIPTION("A New Implementation of the Log-structured Filesystem "
65 "(NILFS)"); 65 "(NILFS)");
66MODULE_VERSION(NILFS_VERSION);
67MODULE_LICENSE("GPL"); 66MODULE_LICENSE("GPL");
68 67
69static int nilfs_remount(struct super_block *sb, int *flags, char *data); 68static int nilfs_remount(struct super_block *sb, int *flags, char *data);
@@ -476,11 +475,12 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
476{ 475{
477 struct super_block *sb = dentry->d_sb; 476 struct super_block *sb = dentry->d_sb;
478 struct nilfs_sb_info *sbi = NILFS_SB(sb); 477 struct nilfs_sb_info *sbi = NILFS_SB(sb);
478 struct the_nilfs *nilfs = sbi->s_nilfs;
479 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
479 unsigned long long blocks; 480 unsigned long long blocks;
480 unsigned long overhead; 481 unsigned long overhead;
481 unsigned long nrsvblocks; 482 unsigned long nrsvblocks;
482 sector_t nfreeblocks; 483 sector_t nfreeblocks;
483 struct the_nilfs *nilfs = sbi->s_nilfs;
484 int err; 484 int err;
485 485
486 /* 486 /*
@@ -514,6 +514,9 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
514 buf->f_files = atomic_read(&sbi->s_inodes_count); 514 buf->f_files = atomic_read(&sbi->s_inodes_count);
515 buf->f_ffree = 0; /* nilfs_count_free_inodes(sb); */ 515 buf->f_ffree = 0; /* nilfs_count_free_inodes(sb); */
516 buf->f_namelen = NILFS_NAME_LEN; 516 buf->f_namelen = NILFS_NAME_LEN;
517 buf->f_fsid.val[0] = (u32)id;
518 buf->f_fsid.val[1] = (u32)(id >> 32);
519
517 return 0; 520 return 0;
518} 521}
519 522
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 33400cf0bbe2..7f65b3be4aa9 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -115,6 +115,7 @@ void put_nilfs(struct the_nilfs *nilfs)
115static int nilfs_load_super_root(struct the_nilfs *nilfs, 115static int nilfs_load_super_root(struct the_nilfs *nilfs,
116 struct nilfs_sb_info *sbi, sector_t sr_block) 116 struct nilfs_sb_info *sbi, sector_t sr_block)
117{ 117{
118 static struct lock_class_key dat_lock_key;
118 struct buffer_head *bh_sr; 119 struct buffer_head *bh_sr;
119 struct nilfs_super_root *raw_sr; 120 struct nilfs_super_root *raw_sr;
120 struct nilfs_super_block **sbp = nilfs->ns_sbp; 121 struct nilfs_super_block **sbp = nilfs->ns_sbp;
@@ -163,6 +164,9 @@ static int nilfs_load_super_root(struct the_nilfs *nilfs,
163 if (unlikely(err)) 164 if (unlikely(err))
164 goto failed_sufile; 165 goto failed_sufile;
165 166
167 lockdep_set_class(&NILFS_MDT(nilfs->ns_dat)->mi_sem, &dat_lock_key);
168 lockdep_set_class(&NILFS_MDT(nilfs->ns_gc_dat)->mi_sem, &dat_lock_key);
169
166 nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat); 170 nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat);
167 nilfs_mdt_set_entry_size(nilfs->ns_cpfile, checkpoint_size, 171 nilfs_mdt_set_entry_size(nilfs->ns_cpfile, checkpoint_size,
168 sizeof(struct nilfs_cpfile_header)); 172 sizeof(struct nilfs_cpfile_header));
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 8672b9536039..c2a87c885b73 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1912,6 +1912,22 @@ out_sems:
1912 return written ? written : ret; 1912 return written ? written : ret;
1913} 1913}
1914 1914
1915static int ocfs2_splice_to_file(struct pipe_inode_info *pipe,
1916 struct file *out,
1917 struct splice_desc *sd)
1918{
1919 int ret;
1920
1921 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, &sd->pos,
1922 sd->total_len, 0, NULL);
1923 if (ret < 0) {
1924 mlog_errno(ret);
1925 return ret;
1926 }
1927
1928 return splice_from_pipe_feed(pipe, sd, pipe_to_file);
1929}
1930
1915static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe, 1931static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1916 struct file *out, 1932 struct file *out,
1917 loff_t *ppos, 1933 loff_t *ppos,
@@ -1919,38 +1935,76 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1919 unsigned int flags) 1935 unsigned int flags)
1920{ 1936{
1921 int ret; 1937 int ret;
1922 struct inode *inode = out->f_path.dentry->d_inode; 1938 struct address_space *mapping = out->f_mapping;
1939 struct inode *inode = mapping->host;
1940 struct splice_desc sd = {
1941 .total_len = len,
1942 .flags = flags,
1943 .pos = *ppos,
1944 .u.file = out,
1945 };
1923 1946
1924 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe, 1947 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
1925 (unsigned int)len, 1948 (unsigned int)len,
1926 out->f_path.dentry->d_name.len, 1949 out->f_path.dentry->d_name.len,
1927 out->f_path.dentry->d_name.name); 1950 out->f_path.dentry->d_name.name);
1928 1951
1929 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT); 1952 if (pipe->inode)
1953 mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT);
1930 1954
1931 ret = ocfs2_rw_lock(inode, 1); 1955 splice_from_pipe_begin(&sd);
1932 if (ret < 0) { 1956 do {
1933 mlog_errno(ret); 1957 ret = splice_from_pipe_next(pipe, &sd);
1934 goto out; 1958 if (ret <= 0)
1935 } 1959 break;
1936 1960
1937 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, ppos, len, 0, 1961 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
1938 NULL); 1962 ret = ocfs2_rw_lock(inode, 1);
1939 if (ret < 0) { 1963 if (ret < 0)
1940 mlog_errno(ret); 1964 mlog_errno(ret);
1941 goto out_unlock; 1965 else {
1942 } 1966 ret = ocfs2_splice_to_file(pipe, out, &sd);
1967 ocfs2_rw_unlock(inode, 1);
1968 }
1969 mutex_unlock(&inode->i_mutex);
1970 } while (ret > 0);
1971 splice_from_pipe_end(pipe, &sd);
1943 1972
1944 if (pipe->inode) 1973 if (pipe->inode)
1945 mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
1946 ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags);
1947 if (pipe->inode)
1948 mutex_unlock(&pipe->inode->i_mutex); 1974 mutex_unlock(&pipe->inode->i_mutex);
1949 1975
1950out_unlock: 1976 if (sd.num_spliced)
1951 ocfs2_rw_unlock(inode, 1); 1977 ret = sd.num_spliced;
1952out: 1978
1953 mutex_unlock(&inode->i_mutex); 1979 if (ret > 0) {
1980 unsigned long nr_pages;
1981
1982 *ppos += ret;
1983 nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1984
1985 /*
1986 * If file or inode is SYNC and we actually wrote some data,
1987 * sync it.
1988 */
1989 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
1990 int err;
1991
1992 mutex_lock(&inode->i_mutex);
1993 err = ocfs2_rw_lock(inode, 1);
1994 if (err < 0) {
1995 mlog_errno(err);
1996 } else {
1997 err = generic_osync_inode(inode, mapping,
1998 OSYNC_METADATA|OSYNC_DATA);
1999 ocfs2_rw_unlock(inode, 1);
2000 }
2001 mutex_unlock(&inode->i_mutex);
2002
2003 if (err)
2004 ret = err;
2005 }
2006 balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
2007 }
1954 2008
1955 mlog_exit(ret); 2009 mlog_exit(ret);
1956 return ret; 2010 return ret;
diff --git a/fs/pipe.c b/fs/pipe.c
index 4af7aa521813..13414ec45b8d 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -37,6 +37,42 @@
37 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09 37 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
38 */ 38 */
39 39
40static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
41{
42 if (pipe->inode)
43 mutex_lock_nested(&pipe->inode->i_mutex, subclass);
44}
45
46void pipe_lock(struct pipe_inode_info *pipe)
47{
48 /*
49 * pipe_lock() nests non-pipe inode locks (for writing to a file)
50 */
51 pipe_lock_nested(pipe, I_MUTEX_PARENT);
52}
53EXPORT_SYMBOL(pipe_lock);
54
55void pipe_unlock(struct pipe_inode_info *pipe)
56{
57 if (pipe->inode)
58 mutex_unlock(&pipe->inode->i_mutex);
59}
60EXPORT_SYMBOL(pipe_unlock);
61
62void pipe_double_lock(struct pipe_inode_info *pipe1,
63 struct pipe_inode_info *pipe2)
64{
65 BUG_ON(pipe1 == pipe2);
66
67 if (pipe1 < pipe2) {
68 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
69 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
70 } else {
71 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
72 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
73 }
74}
75
40/* Drop the inode semaphore and wait for a pipe event, atomically */ 76/* Drop the inode semaphore and wait for a pipe event, atomically */
41void pipe_wait(struct pipe_inode_info *pipe) 77void pipe_wait(struct pipe_inode_info *pipe)
42{ 78{
@@ -47,12 +83,10 @@ void pipe_wait(struct pipe_inode_info *pipe)
47 * is considered a noninteractive wait: 83 * is considered a noninteractive wait:
48 */ 84 */
49 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE); 85 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
50 if (pipe->inode) 86 pipe_unlock(pipe);
51 mutex_unlock(&pipe->inode->i_mutex);
52 schedule(); 87 schedule();
53 finish_wait(&pipe->wait, &wait); 88 finish_wait(&pipe->wait, &wait);
54 if (pipe->inode) 89 pipe_lock(pipe);
55 mutex_lock(&pipe->inode->i_mutex);
56} 90}
57 91
58static int 92static int
diff --git a/fs/splice.c b/fs/splice.c
index c18aa7e03e2b..5384a90665d0 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -182,8 +182,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
182 do_wakeup = 0; 182 do_wakeup = 0;
183 page_nr = 0; 183 page_nr = 0;
184 184
185 if (pipe->inode) 185 pipe_lock(pipe);
186 mutex_lock(&pipe->inode->i_mutex);
187 186
188 for (;;) { 187 for (;;) {
189 if (!pipe->readers) { 188 if (!pipe->readers) {
@@ -245,15 +244,13 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
245 pipe->waiting_writers--; 244 pipe->waiting_writers--;
246 } 245 }
247 246
248 if (pipe->inode) { 247 pipe_unlock(pipe);
249 mutex_unlock(&pipe->inode->i_mutex);
250 248
251 if (do_wakeup) { 249 if (do_wakeup) {
252 smp_mb(); 250 smp_mb();
253 if (waitqueue_active(&pipe->wait)) 251 if (waitqueue_active(&pipe->wait))
254 wake_up_interruptible(&pipe->wait); 252 wake_up_interruptible(&pipe->wait);
255 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 253 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
256 }
257 } 254 }
258 255
259 while (page_nr < spd_pages) 256 while (page_nr < spd_pages)
@@ -555,8 +552,8 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
555 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create 552 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
556 * a new page in the output file page cache and fill/dirty that. 553 * a new page in the output file page cache and fill/dirty that.
557 */ 554 */
558static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 555int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
559 struct splice_desc *sd) 556 struct splice_desc *sd)
560{ 557{
561 struct file *file = sd->u.file; 558 struct file *file = sd->u.file;
562 struct address_space *mapping = file->f_mapping; 559 struct address_space *mapping = file->f_mapping;
@@ -600,108 +597,178 @@ static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
600out: 597out:
601 return ret; 598 return ret;
602} 599}
600EXPORT_SYMBOL(pipe_to_file);
601
602static void wakeup_pipe_writers(struct pipe_inode_info *pipe)
603{
604 smp_mb();
605 if (waitqueue_active(&pipe->wait))
606 wake_up_interruptible(&pipe->wait);
607 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
608}
603 609
604/** 610/**
605 * __splice_from_pipe - splice data from a pipe to given actor 611 * splice_from_pipe_feed - feed available data from a pipe to a file
606 * @pipe: pipe to splice from 612 * @pipe: pipe to splice from
607 * @sd: information to @actor 613 * @sd: information to @actor
608 * @actor: handler that splices the data 614 * @actor: handler that splices the data
609 * 615 *
610 * Description: 616 * Description:
611 * This function does little more than loop over the pipe and call 617
612 * @actor to do the actual moving of a single struct pipe_buffer to 618 * This function loops over the pipe and calls @actor to do the
613 * the desired destination. See pipe_to_file, pipe_to_sendpage, or 619 * actual moving of a single struct pipe_buffer to the desired
614 * pipe_to_user. 620 * destination. It returns when there's no more buffers left in
621 * the pipe or if the requested number of bytes (@sd->total_len)
622 * have been copied. It returns a positive number (one) if the
623 * pipe needs to be filled with more data, zero if the required
624 * number of bytes have been copied and -errno on error.
615 * 625 *
626 * This, together with splice_from_pipe_{begin,end,next}, may be
627 * used to implement the functionality of __splice_from_pipe() when
628 * locking is required around copying the pipe buffers to the
629 * destination.
616 */ 630 */
617ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, 631int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
618 splice_actor *actor) 632 splice_actor *actor)
619{ 633{
620 int ret, do_wakeup, err; 634 int ret;
621
622 ret = 0;
623 do_wakeup = 0;
624
625 for (;;) {
626 if (pipe->nrbufs) {
627 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
628 const struct pipe_buf_operations *ops = buf->ops;
629 635
630 sd->len = buf->len; 636 while (pipe->nrbufs) {
631 if (sd->len > sd->total_len) 637 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
632 sd->len = sd->total_len; 638 const struct pipe_buf_operations *ops = buf->ops;
633 639
634 err = actor(pipe, buf, sd); 640 sd->len = buf->len;
635 if (err <= 0) { 641 if (sd->len > sd->total_len)
636 if (!ret && err != -ENODATA) 642 sd->len = sd->total_len;
637 ret = err;
638 643
639 break; 644 ret = actor(pipe, buf, sd);
640 } 645 if (ret <= 0) {
646 if (ret == -ENODATA)
647 ret = 0;
648 return ret;
649 }
650 buf->offset += ret;
651 buf->len -= ret;
641 652
642 ret += err; 653 sd->num_spliced += ret;
643 buf->offset += err; 654 sd->len -= ret;
644 buf->len -= err; 655 sd->pos += ret;
656 sd->total_len -= ret;
645 657
646 sd->len -= err; 658 if (!buf->len) {
647 sd->pos += err; 659 buf->ops = NULL;
648 sd->total_len -= err; 660 ops->release(pipe, buf);
649 if (sd->len) 661 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
650 continue; 662 pipe->nrbufs--;
663 if (pipe->inode)
664 sd->need_wakeup = true;
665 }
651 666
652 if (!buf->len) { 667 if (!sd->total_len)
653 buf->ops = NULL; 668 return 0;
654 ops->release(pipe, buf); 669 }
655 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
656 pipe->nrbufs--;
657 if (pipe->inode)
658 do_wakeup = 1;
659 }
660 670
661 if (!sd->total_len) 671 return 1;
662 break; 672}
663 } 673EXPORT_SYMBOL(splice_from_pipe_feed);
664 674
665 if (pipe->nrbufs) 675/**
666 continue; 676 * splice_from_pipe_next - wait for some data to splice from
677 * @pipe: pipe to splice from
678 * @sd: information about the splice operation
679 *
680 * Description:
681 * This function will wait for some data and return a positive
682 * value (one) if pipe buffers are available. It will return zero
683 * or -errno if no more data needs to be spliced.
684 */
685int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
686{
687 while (!pipe->nrbufs) {
667 if (!pipe->writers) 688 if (!pipe->writers)
668 break; 689 return 0;
669 if (!pipe->waiting_writers) {
670 if (ret)
671 break;
672 }
673 690
674 if (sd->flags & SPLICE_F_NONBLOCK) { 691 if (!pipe->waiting_writers && sd->num_spliced)
675 if (!ret) 692 return 0;
676 ret = -EAGAIN;
677 break;
678 }
679 693
680 if (signal_pending(current)) { 694 if (sd->flags & SPLICE_F_NONBLOCK)
681 if (!ret) 695 return -EAGAIN;
682 ret = -ERESTARTSYS;
683 break;
684 }
685 696
686 if (do_wakeup) { 697 if (signal_pending(current))
687 smp_mb(); 698 return -ERESTARTSYS;
688 if (waitqueue_active(&pipe->wait)) 699
689 wake_up_interruptible_sync(&pipe->wait); 700 if (sd->need_wakeup) {
690 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 701 wakeup_pipe_writers(pipe);
691 do_wakeup = 0; 702 sd->need_wakeup = false;
692 } 703 }
693 704
694 pipe_wait(pipe); 705 pipe_wait(pipe);
695 } 706 }
696 707
697 if (do_wakeup) { 708 return 1;
698 smp_mb(); 709}
699 if (waitqueue_active(&pipe->wait)) 710EXPORT_SYMBOL(splice_from_pipe_next);
700 wake_up_interruptible(&pipe->wait);
701 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
702 }
703 711
704 return ret; 712/**
713 * splice_from_pipe_begin - start splicing from pipe
714 * @pipe: pipe to splice from
715 *
716 * Description:
717 * This function should be called before a loop containing
718 * splice_from_pipe_next() and splice_from_pipe_feed() to
719 * initialize the necessary fields of @sd.
720 */
721void splice_from_pipe_begin(struct splice_desc *sd)
722{
723 sd->num_spliced = 0;
724 sd->need_wakeup = false;
725}
726EXPORT_SYMBOL(splice_from_pipe_begin);
727
728/**
729 * splice_from_pipe_end - finish splicing from pipe
730 * @pipe: pipe to splice from
731 * @sd: information about the splice operation
732 *
733 * Description:
734 * This function will wake up pipe writers if necessary. It should
735 * be called after a loop containing splice_from_pipe_next() and
736 * splice_from_pipe_feed().
737 */
738void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd)
739{
740 if (sd->need_wakeup)
741 wakeup_pipe_writers(pipe);
742}
743EXPORT_SYMBOL(splice_from_pipe_end);
744
745/**
746 * __splice_from_pipe - splice data from a pipe to given actor
747 * @pipe: pipe to splice from
748 * @sd: information to @actor
749 * @actor: handler that splices the data
750 *
751 * Description:
752 * This function does little more than loop over the pipe and call
753 * @actor to do the actual moving of a single struct pipe_buffer to
754 * the desired destination. See pipe_to_file, pipe_to_sendpage, or
755 * pipe_to_user.
756 *
757 */
758ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
759 splice_actor *actor)
760{
761 int ret;
762
763 splice_from_pipe_begin(sd);
764 do {
765 ret = splice_from_pipe_next(pipe, sd);
766 if (ret > 0)
767 ret = splice_from_pipe_feed(pipe, sd, actor);
768 } while (ret > 0);
769 splice_from_pipe_end(pipe, sd);
770
771 return sd->num_spliced ? sd->num_spliced : ret;
705} 772}
706EXPORT_SYMBOL(__splice_from_pipe); 773EXPORT_SYMBOL(__splice_from_pipe);
707 774
@@ -715,7 +782,7 @@ EXPORT_SYMBOL(__splice_from_pipe);
715 * @actor: handler that splices the data 782 * @actor: handler that splices the data
716 * 783 *
717 * Description: 784 * Description:
718 * See __splice_from_pipe. This function locks the input and output inodes, 785 * See __splice_from_pipe. This function locks the pipe inode,
719 * otherwise it's identical to __splice_from_pipe(). 786 * otherwise it's identical to __splice_from_pipe().
720 * 787 *
721 */ 788 */
@@ -724,7 +791,6 @@ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
724 splice_actor *actor) 791 splice_actor *actor)
725{ 792{
726 ssize_t ret; 793 ssize_t ret;
727 struct inode *inode = out->f_mapping->host;
728 struct splice_desc sd = { 794 struct splice_desc sd = {
729 .total_len = len, 795 .total_len = len,
730 .flags = flags, 796 .flags = flags,
@@ -732,30 +798,15 @@ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
732 .u.file = out, 798 .u.file = out,
733 }; 799 };
734 800
735 /* 801 pipe_lock(pipe);
736 * The actor worker might be calling ->write_begin and
737 * ->write_end. Most of the time, these expect i_mutex to
738 * be held. Since this may result in an ABBA deadlock with
739 * pipe->inode, we have to order lock acquiry here.
740 *
741 * Outer lock must be inode->i_mutex, as pipe_wait() will
742 * release and reacquire pipe->inode->i_mutex, AND inode must
743 * never be a pipe.
744 */
745 WARN_ON(S_ISFIFO(inode->i_mode));
746 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
747 if (pipe->inode)
748 mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
749 ret = __splice_from_pipe(pipe, &sd, actor); 802 ret = __splice_from_pipe(pipe, &sd, actor);
750 if (pipe->inode) 803 pipe_unlock(pipe);
751 mutex_unlock(&pipe->inode->i_mutex);
752 mutex_unlock(&inode->i_mutex);
753 804
754 return ret; 805 return ret;
755} 806}
756 807
757/** 808/**
758 * generic_file_splice_write_nolock - generic_file_splice_write without mutexes 809 * generic_file_splice_write - splice data from a pipe to a file
759 * @pipe: pipe info 810 * @pipe: pipe info
760 * @out: file to write to 811 * @out: file to write to
761 * @ppos: position in @out 812 * @ppos: position in @out
@@ -764,13 +815,12 @@ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
764 * 815 *
765 * Description: 816 * Description:
766 * Will either move or copy pages (determined by @flags options) from 817 * Will either move or copy pages (determined by @flags options) from
767 * the given pipe inode to the given file. The caller is responsible 818 * the given pipe inode to the given file.
768 * for acquiring i_mutex on both inodes.
769 * 819 *
770 */ 820 */
771ssize_t 821ssize_t
772generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out, 822generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
773 loff_t *ppos, size_t len, unsigned int flags) 823 loff_t *ppos, size_t len, unsigned int flags)
774{ 824{
775 struct address_space *mapping = out->f_mapping; 825 struct address_space *mapping = out->f_mapping;
776 struct inode *inode = mapping->host; 826 struct inode *inode = mapping->host;
@@ -781,76 +831,28 @@ generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out,
781 .u.file = out, 831 .u.file = out,
782 }; 832 };
783 ssize_t ret; 833 ssize_t ret;
784 int err;
785
786 err = file_remove_suid(out);
787 if (unlikely(err))
788 return err;
789
790 ret = __splice_from_pipe(pipe, &sd, pipe_to_file);
791 if (ret > 0) {
792 unsigned long nr_pages;
793 834
794 *ppos += ret; 835 pipe_lock(pipe);
795 nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
796 836
797 /* 837 splice_from_pipe_begin(&sd);
798 * If file or inode is SYNC and we actually wrote some data, 838 do {
799 * sync it. 839 ret = splice_from_pipe_next(pipe, &sd);
800 */ 840 if (ret <= 0)
801 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) { 841 break;
802 err = generic_osync_inode(inode, mapping,
803 OSYNC_METADATA|OSYNC_DATA);
804
805 if (err)
806 ret = err;
807 }
808 balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
809 }
810 842
811 return ret; 843 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
812} 844 ret = file_remove_suid(out);
845 if (!ret)
846 ret = splice_from_pipe_feed(pipe, &sd, pipe_to_file);
847 mutex_unlock(&inode->i_mutex);
848 } while (ret > 0);
849 splice_from_pipe_end(pipe, &sd);
813 850
814EXPORT_SYMBOL(generic_file_splice_write_nolock); 851 pipe_unlock(pipe);
815 852
816/** 853 if (sd.num_spliced)
817 * generic_file_splice_write - splice data from a pipe to a file 854 ret = sd.num_spliced;
818 * @pipe: pipe info
819 * @out: file to write to
820 * @ppos: position in @out
821 * @len: number of bytes to splice
822 * @flags: splice modifier flags
823 *
824 * Description:
825 * Will either move or copy pages (determined by @flags options) from
826 * the given pipe inode to the given file.
827 *
828 */
829ssize_t
830generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
831 loff_t *ppos, size_t len, unsigned int flags)
832{
833 struct address_space *mapping = out->f_mapping;
834 struct inode *inode = mapping->host;
835 struct splice_desc sd = {
836 .total_len = len,
837 .flags = flags,
838 .pos = *ppos,
839 .u.file = out,
840 };
841 ssize_t ret;
842 855
843 WARN_ON(S_ISFIFO(inode->i_mode));
844 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
845 ret = file_remove_suid(out);
846 if (likely(!ret)) {
847 if (pipe->inode)
848 mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
849 ret = __splice_from_pipe(pipe, &sd, pipe_to_file);
850 if (pipe->inode)
851 mutex_unlock(&pipe->inode->i_mutex);
852 }
853 mutex_unlock(&inode->i_mutex);
854 if (ret > 0) { 856 if (ret > 0) {
855 unsigned long nr_pages; 857 unsigned long nr_pages;
856 858
@@ -1339,8 +1341,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
1339 if (!pipe) 1341 if (!pipe)
1340 return -EBADF; 1342 return -EBADF;
1341 1343
1342 if (pipe->inode) 1344 pipe_lock(pipe);
1343 mutex_lock(&pipe->inode->i_mutex);
1344 1345
1345 error = ret = 0; 1346 error = ret = 0;
1346 while (nr_segs) { 1347 while (nr_segs) {
@@ -1395,8 +1396,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
1395 iov++; 1396 iov++;
1396 } 1397 }
1397 1398
1398 if (pipe->inode) 1399 pipe_unlock(pipe);
1399 mutex_unlock(&pipe->inode->i_mutex);
1400 1400
1401 if (!ret) 1401 if (!ret)
1402 ret = error; 1402 ret = error;
@@ -1524,7 +1524,7 @@ static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1524 return 0; 1524 return 0;
1525 1525
1526 ret = 0; 1526 ret = 0;
1527 mutex_lock(&pipe->inode->i_mutex); 1527 pipe_lock(pipe);
1528 1528
1529 while (!pipe->nrbufs) { 1529 while (!pipe->nrbufs) {
1530 if (signal_pending(current)) { 1530 if (signal_pending(current)) {
@@ -1542,7 +1542,7 @@ static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1542 pipe_wait(pipe); 1542 pipe_wait(pipe);
1543 } 1543 }
1544 1544
1545 mutex_unlock(&pipe->inode->i_mutex); 1545 pipe_unlock(pipe);
1546 return ret; 1546 return ret;
1547} 1547}
1548 1548
@@ -1562,7 +1562,7 @@ static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1562 return 0; 1562 return 0;
1563 1563
1564 ret = 0; 1564 ret = 0;
1565 mutex_lock(&pipe->inode->i_mutex); 1565 pipe_lock(pipe);
1566 1566
1567 while (pipe->nrbufs >= PIPE_BUFFERS) { 1567 while (pipe->nrbufs >= PIPE_BUFFERS) {
1568 if (!pipe->readers) { 1568 if (!pipe->readers) {
@@ -1583,7 +1583,7 @@ static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1583 pipe->waiting_writers--; 1583 pipe->waiting_writers--;
1584 } 1584 }
1585 1585
1586 mutex_unlock(&pipe->inode->i_mutex); 1586 pipe_unlock(pipe);
1587 return ret; 1587 return ret;
1588} 1588}
1589 1589
@@ -1599,10 +1599,10 @@ static int link_pipe(struct pipe_inode_info *ipipe,
1599 1599
1600 /* 1600 /*
1601 * Potential ABBA deadlock, work around it by ordering lock 1601 * Potential ABBA deadlock, work around it by ordering lock
1602 * grabbing by inode address. Otherwise two different processes 1602 * grabbing by pipe info address. Otherwise two different processes
1603 * could deadlock (one doing tee from A -> B, the other from B -> A). 1603 * could deadlock (one doing tee from A -> B, the other from B -> A).
1604 */ 1604 */
1605 inode_double_lock(ipipe->inode, opipe->inode); 1605 pipe_double_lock(ipipe, opipe);
1606 1606
1607 do { 1607 do {
1608 if (!opipe->readers) { 1608 if (!opipe->readers) {
@@ -1653,7 +1653,8 @@ static int link_pipe(struct pipe_inode_info *ipipe,
1653 if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) 1653 if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
1654 ret = -EAGAIN; 1654 ret = -EAGAIN;
1655 1655
1656 inode_double_unlock(ipipe->inode, opipe->inode); 1656 pipe_unlock(ipipe);
1657 pipe_unlock(opipe);
1657 1658
1658 /* 1659 /*
1659 * If we put data in the output pipe, wakeup any potential readers. 1660 * If we put data in the output pipe, wakeup any potential readers.
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 67e3353a56d6..95962fa8398a 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -594,6 +594,9 @@ struct drm_i915_gem_busy {
594#define I915_BIT_6_SWIZZLE_9_10_11 4 594#define I915_BIT_6_SWIZZLE_9_10_11 4
595/* Not seen by userland */ 595/* Not seen by userland */
596#define I915_BIT_6_SWIZZLE_UNKNOWN 5 596#define I915_BIT_6_SWIZZLE_UNKNOWN 5
597/* Seen by userland. */
598#define I915_BIT_6_SWIZZLE_9_17 6
599#define I915_BIT_6_SWIZZLE_9_10_17 7
597 600
598struct drm_i915_gem_set_tiling { 601struct drm_i915_gem_set_tiling {
599 /** Handle of the buffer to have its tiling state updated */ 602 /** Handle of the buffer to have its tiling state updated */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b900d2c67d29..b89cf2d82898 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -504,6 +504,115 @@ static inline int bio_has_data(struct bio *bio)
504 return bio && bio->bi_io_vec != NULL; 504 return bio && bio->bi_io_vec != NULL;
505} 505}
506 506
507/*
508 * BIO list managment for use by remapping drivers (e.g. DM or MD).
509 *
510 * A bio_list anchors a singly-linked list of bios chained through the bi_next
511 * member of the bio. The bio_list also caches the last list member to allow
512 * fast access to the tail.
513 */
514struct bio_list {
515 struct bio *head;
516 struct bio *tail;
517};
518
519static inline int bio_list_empty(const struct bio_list *bl)
520{
521 return bl->head == NULL;
522}
523
524static inline void bio_list_init(struct bio_list *bl)
525{
526 bl->head = bl->tail = NULL;
527}
528
529#define bio_list_for_each(bio, bl) \
530 for (bio = (bl)->head; bio; bio = bio->bi_next)
531
532static inline unsigned bio_list_size(const struct bio_list *bl)
533{
534 unsigned sz = 0;
535 struct bio *bio;
536
537 bio_list_for_each(bio, bl)
538 sz++;
539
540 return sz;
541}
542
543static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
544{
545 bio->bi_next = NULL;
546
547 if (bl->tail)
548 bl->tail->bi_next = bio;
549 else
550 bl->head = bio;
551
552 bl->tail = bio;
553}
554
555static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
556{
557 bio->bi_next = bl->head;
558
559 bl->head = bio;
560
561 if (!bl->tail)
562 bl->tail = bio;
563}
564
565static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
566{
567 if (!bl2->head)
568 return;
569
570 if (bl->tail)
571 bl->tail->bi_next = bl2->head;
572 else
573 bl->head = bl2->head;
574
575 bl->tail = bl2->tail;
576}
577
578static inline void bio_list_merge_head(struct bio_list *bl,
579 struct bio_list *bl2)
580{
581 if (!bl2->head)
582 return;
583
584 if (bl->head)
585 bl2->tail->bi_next = bl->head;
586 else
587 bl->tail = bl2->tail;
588
589 bl->head = bl2->head;
590}
591
592static inline struct bio *bio_list_pop(struct bio_list *bl)
593{
594 struct bio *bio = bl->head;
595
596 if (bio) {
597 bl->head = bl->head->bi_next;
598 if (!bl->head)
599 bl->tail = NULL;
600
601 bio->bi_next = NULL;
602 }
603
604 return bio;
605}
606
607static inline struct bio *bio_list_get(struct bio_list *bl)
608{
609 struct bio *bio = bl->head;
610
611 bl->head = bl->tail = NULL;
612
613 return bio;
614}
615
507#if defined(CONFIG_BLK_DEV_INTEGRITY) 616#if defined(CONFIG_BLK_DEV_INTEGRITY)
508 617
509#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) 618#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 493dedb7a67b..29b3ce3f2a1d 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <asm/atomic.h> 5#include <asm/atomic.h>
6#include <asm/system.h>
6 7
7struct task_struct; 8struct task_struct;
8 9
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 562d2855cf30..e766be0d4329 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -87,6 +87,60 @@ struct inodes_stat_t {
87 */ 87 */
88#define FMODE_NOCMTIME ((__force fmode_t)2048) 88#define FMODE_NOCMTIME ((__force fmode_t)2048)
89 89
90/*
91 * The below are the various read and write types that we support. Some of
92 * them include behavioral modifiers that send information down to the
93 * block layer and IO scheduler. Terminology:
94 *
95 * The block layer uses device plugging to defer IO a little bit, in
96 * the hope that we will see more IO very shortly. This increases
97 * coalescing of adjacent IO and thus reduces the number of IOs we
98 * have to send to the device. It also allows for better queuing,
99 * if the IO isn't mergeable. If the caller is going to be waiting
100 * for the IO, then he must ensure that the device is unplugged so
101 * that the IO is dispatched to the driver.
102 *
103 * All IO is handled async in Linux. This is fine for background
104 * writes, but for reads or writes that someone waits for completion
105 * on, we want to notify the block layer and IO scheduler so that they
106 * know about it. That allows them to make better scheduling
107 * decisions. So when the below references 'sync' and 'async', it
108 * is referencing this priority hint.
109 *
110 * With that in mind, the available types are:
111 *
112 * READ A normal read operation. Device will be plugged.
113 * READ_SYNC A synchronous read. Device is not plugged, caller can
114 * immediately wait on this read without caring about
115 * unplugging.
116 * READA Used for read-ahead operations. Lower priority, and the
117 * block layer could (in theory) choose to ignore this
118 * request if it runs into resource problems.
119 * WRITE A normal async write. Device will be plugged.
120 * SWRITE Like WRITE, but a special case for ll_rw_block() that
121 * tells it to lock the buffer first. Normally a buffer
122 * must be locked before doing IO.
123 * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down
124 * the hint that someone will be waiting on this IO
125 * shortly. The device must still be unplugged explicitly,
126 * WRITE_SYNC_PLUG does not do this as we could be
127 * submitting more writes before we actually wait on any
128 * of them.
129 * WRITE_SYNC Like WRITE_SYNC_PLUG, but also unplugs the device
130 * immediately after submission. The write equivalent
131 * of READ_SYNC.
132 * WRITE_ODIRECT Special case write for O_DIRECT only.
133 * SWRITE_SYNC
134 * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer.
135 * See SWRITE.
136 * WRITE_BARRIER Like WRITE, but tells the block layer that all
137 * previously submitted writes must be safely on storage
138 * before this one is started. Also guarantees that when
139 * this write is complete, it itself is also safely on
140 * storage. Prevents reordering of writes on both sides
141 * of this IO.
142 *
143 */
90#define RW_MASK 1 144#define RW_MASK 1
91#define RWA_MASK 2 145#define RWA_MASK 2
92#define READ 0 146#define READ 0
@@ -102,6 +156,11 @@ struct inodes_stat_t {
102 (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) 156 (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
103#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) 157#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
104#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) 158#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER))
159
160/*
161 * These aren't really reads or writes, they pass down information about
162 * parts of device that are now unused by the file system.
163 */
105#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) 164#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
106#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) 165#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
107 166
@@ -738,9 +797,6 @@ enum inode_i_mutex_lock_class
738 I_MUTEX_QUOTA 797 I_MUTEX_QUOTA
739}; 798};
740 799
741extern void inode_double_lock(struct inode *inode1, struct inode *inode2);
742extern void inode_double_unlock(struct inode *inode1, struct inode *inode2);
743
744/* 800/*
745 * NOTE: in a 32bit arch with a preemptable kernel and 801 * NOTE: in a 32bit arch with a preemptable kernel and
746 * an UP compile the i_size_read/write must be atomic 802 * an UP compile the i_size_read/write must be atomic
@@ -2150,8 +2206,6 @@ extern ssize_t generic_file_splice_read(struct file *, loff_t *,
2150 struct pipe_inode_info *, size_t, unsigned int); 2206 struct pipe_inode_info *, size_t, unsigned int);
2151extern ssize_t generic_file_splice_write(struct pipe_inode_info *, 2207extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
2152 struct file *, loff_t *, size_t, unsigned int); 2208 struct file *, loff_t *, size_t, unsigned int);
2153extern ssize_t generic_file_splice_write_nolock(struct pipe_inode_info *,
2154 struct file *, loff_t *, size_t, unsigned int);
2155extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, 2209extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
2156 struct file *out, loff_t *, size_t len, unsigned int flags); 2210 struct file *out, loff_t *, size_t len, unsigned int flags);
2157extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, 2211extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index f2a78b5e8b55..43fc95d822d5 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -43,10 +43,6 @@
43 * 43 *
44 */ 44 */
45 45
46/* Flags related to I2C device features */
47#define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001
48#define FSL_I2C_DEV_CLOCK_5200 0x00000002
49
50enum fsl_usb2_operating_modes { 46enum fsl_usb2_operating_modes {
51 FSL_USB2_MPH_HOST, 47 FSL_USB2_MPH_HOST,
52 FSL_USB2_DR_HOST, 48 FSL_USB2_DR_HOST,
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 8e4120285f72..c8f038554e80 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -134,6 +134,11 @@ struct pipe_buf_operations {
134 memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ 134 memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
135#define PIPE_SIZE PAGE_SIZE 135#define PIPE_SIZE PAGE_SIZE
136 136
137/* Pipe lock and unlock operations */
138void pipe_lock(struct pipe_inode_info *);
139void pipe_unlock(struct pipe_inode_info *);
140void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
141
137/* Drop the inode semaphore and wait for a pipe event, atomically */ 142/* Drop the inode semaphore and wait for a pipe event, atomically */
138void pipe_wait(struct pipe_inode_info *pipe); 143void pipe_wait(struct pipe_inode_info *pipe);
139 144
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 528dcb93c2f2..5f3faa9d15ae 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -36,6 +36,8 @@ struct splice_desc {
36 void *data; /* cookie */ 36 void *data; /* cookie */
37 } u; 37 } u;
38 loff_t pos; /* file position */ 38 loff_t pos; /* file position */
39 size_t num_spliced; /* number of bytes already spliced */
40 bool need_wakeup; /* need to wake up writer */
39}; 41};
40 42
41struct partial_page { 43struct partial_page {
@@ -66,6 +68,16 @@ extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
66 splice_actor *); 68 splice_actor *);
67extern ssize_t __splice_from_pipe(struct pipe_inode_info *, 69extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
68 struct splice_desc *, splice_actor *); 70 struct splice_desc *, splice_actor *);
71extern int splice_from_pipe_feed(struct pipe_inode_info *, struct splice_desc *,
72 splice_actor *);
73extern int splice_from_pipe_next(struct pipe_inode_info *,
74 struct splice_desc *);
75extern void splice_from_pipe_begin(struct splice_desc *);
76extern void splice_from_pipe_end(struct pipe_inode_info *,
77 struct splice_desc *);
78extern int pipe_to_file(struct pipe_inode_info *, struct pipe_buffer *,
79 struct splice_desc *);
80
69extern ssize_t splice_to_pipe(struct pipe_inode_info *, 81extern ssize_t splice_to_pipe(struct pipe_inode_info *,
70 struct splice_pipe_desc *); 82 struct splice_pipe_desc *);
71extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, 83extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index b95842542590..625e9e4639c6 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -29,7 +29,7 @@
29/** 29/**
30 * usb_serial_port: structure for the specific ports of a device. 30 * usb_serial_port: structure for the specific ports of a device.
31 * @serial: pointer back to the struct usb_serial owner of this port. 31 * @serial: pointer back to the struct usb_serial owner of this port.
32 * @tty: pointer to the corresponding tty for this port. 32 * @port: pointer to the corresponding tty_port for this port.
33 * @lock: spinlock to grab when updating portions of this structure. 33 * @lock: spinlock to grab when updating portions of this structure.
34 * @mutex: mutex used to synchronize serial_open() and serial_close() 34 * @mutex: mutex used to synchronize serial_open() and serial_close()
35 * access for this port. 35 * access for this port.
@@ -44,19 +44,22 @@
44 * @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe 44 * @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe
45 * for this port. 45 * for this port.
46 * @bulk_in_buffer: pointer to the bulk in buffer for this port. 46 * @bulk_in_buffer: pointer to the bulk in buffer for this port.
47 * @bulk_in_size: the size of the bulk_in_buffer, in bytes.
47 * @read_urb: pointer to the bulk in struct urb for this port. 48 * @read_urb: pointer to the bulk in struct urb for this port.
48 * @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this 49 * @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this
49 * port. 50 * port.
50 * @bulk_out_buffer: pointer to the bulk out buffer for this port. 51 * @bulk_out_buffer: pointer to the bulk out buffer for this port.
51 * @bulk_out_size: the size of the bulk_out_buffer, in bytes. 52 * @bulk_out_size: the size of the bulk_out_buffer, in bytes.
52 * @write_urb: pointer to the bulk out struct urb for this port. 53 * @write_urb: pointer to the bulk out struct urb for this port.
54 * @write_urb_busy: port`s writing status
53 * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this 55 * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this
54 * port. 56 * port.
55 * @write_wait: a wait_queue_head_t used by the port. 57 * @write_wait: a wait_queue_head_t used by the port.
56 * @work: work queue entry for the line discipline waking up. 58 * @work: work queue entry for the line discipline waking up.
57 * @open_count: number of times this port has been opened.
58 * @throttled: nonzero if the read urb is inactive to throttle the device 59 * @throttled: nonzero if the read urb is inactive to throttle the device
59 * @throttle_req: nonzero if the tty wants to throttle us 60 * @throttle_req: nonzero if the tty wants to throttle us
61 * @console: attached usb serial console
62 * @dev: pointer to the serial device
60 * 63 *
61 * This structure is used by the usb-serial core and drivers for the specific 64 * This structure is used by the usb-serial core and drivers for the specific
62 * ports of a device. 65 * ports of a device.
diff --git a/include/sound/jack.h b/include/sound/jack.h
index 6b013c6f6a04..f236e426a706 100644
--- a/include/sound/jack.h
+++ b/include/sound/jack.h
@@ -50,6 +50,8 @@ struct snd_jack {
50 int type; 50 int type;
51 const char *id; 51 const char *id;
52 char name[100]; 52 char name[100];
53 void *private_data;
54 void (*private_free)(struct snd_jack *);
53}; 55};
54 56
55#ifdef CONFIG_SND_JACK 57#ifdef CONFIG_SND_JACK
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 8904b1900d7f..c17296891617 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -268,7 +268,8 @@ struct snd_pcm_runtime {
268 int overrange; 268 int overrange;
269 snd_pcm_uframes_t avail_max; 269 snd_pcm_uframes_t avail_max;
270 snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */ 270 snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */
271 snd_pcm_uframes_t hw_ptr_interrupt; /* Position at interrupt time*/ 271 snd_pcm_uframes_t hw_ptr_interrupt; /* Position at interrupt time */
272 unsigned long hw_ptr_jiffies; /* Time when hw_ptr is updated */
272 273
273 /* -- HW params -- */ 274 /* -- HW params -- */
274 snd_pcm_access_t access; /* access mode */ 275 snd_pcm_access_t access; /* access mode */
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 505f319e489c..8ba052c86d48 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -64,8 +64,6 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
64 struct bio *bio; 64 struct bio *bio;
65 65
66 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); 66 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
67 if (!bio)
68 return -ENOMEM;
69 bio->bi_sector = page_off * (PAGE_SIZE >> 9); 67 bio->bi_sector = page_off * (PAGE_SIZE >> 9);
70 bio->bi_bdev = resume_bdev; 68 bio->bi_bdev = resume_bdev;
71 bio->bi_end_io = end_swap_bio_read; 69 bio->bi_end_io = end_swap_bio_read;
diff --git a/sound/core/control.c b/sound/core/control.c
index 4b20fa2b7e6d..17b8d47a5cd0 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -723,14 +723,11 @@ static int snd_ctl_elem_read_user(struct snd_card *card,
723{ 723{
724 struct snd_ctl_elem_value *control; 724 struct snd_ctl_elem_value *control;
725 int result; 725 int result;
726 726
727 control = kmalloc(sizeof(*control), GFP_KERNEL); 727 control = memdup_user(_control, sizeof(*control));
728 if (control == NULL) 728 if (IS_ERR(control))
729 return -ENOMEM; 729 return PTR_ERR(control);
730 if (copy_from_user(control, _control, sizeof(*control))) { 730
731 kfree(control);
732 return -EFAULT;
733 }
734 snd_power_lock(card); 731 snd_power_lock(card);
735 result = snd_power_wait(card, SNDRV_CTL_POWER_D0); 732 result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
736 if (result >= 0) 733 if (result >= 0)
@@ -784,13 +781,10 @@ static int snd_ctl_elem_write_user(struct snd_ctl_file *file,
784 struct snd_card *card; 781 struct snd_card *card;
785 int result; 782 int result;
786 783
787 control = kmalloc(sizeof(*control), GFP_KERNEL); 784 control = memdup_user(_control, sizeof(*control));
788 if (control == NULL) 785 if (IS_ERR(control))
789 return -ENOMEM; 786 return PTR_ERR(control);
790 if (copy_from_user(control, _control, sizeof(*control))) { 787
791 kfree(control);
792 return -EFAULT;
793 }
794 card = file->card; 788 card = file->card;
795 snd_power_lock(card); 789 snd_power_lock(card);
796 result = snd_power_wait(card, SNDRV_CTL_POWER_D0); 790 result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
@@ -916,13 +910,10 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
916 if (op_flag > 0) { 910 if (op_flag > 0) {
917 if (size > 1024 * 128) /* sane value */ 911 if (size > 1024 * 128) /* sane value */
918 return -EINVAL; 912 return -EINVAL;
919 new_data = kmalloc(size, GFP_KERNEL); 913
920 if (new_data == NULL) 914 new_data = memdup_user(tlv, size);
921 return -ENOMEM; 915 if (IS_ERR(new_data))
922 if (copy_from_user(new_data, tlv, size)) { 916 return PTR_ERR(new_data);
923 kfree(new_data);
924 return -EFAULT;
925 }
926 change = ue->tlv_data_size != size; 917 change = ue->tlv_data_size != size;
927 if (!change) 918 if (!change)
928 change = memcmp(ue->tlv_data, new_data, size); 919 change = memcmp(ue->tlv_data, new_data, size);
diff --git a/sound/core/jack.c b/sound/core/jack.c
index c8254c667c62..d54d1a05fe65 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -35,6 +35,9 @@ static int snd_jack_dev_free(struct snd_device *device)
35{ 35{
36 struct snd_jack *jack = device->device_data; 36 struct snd_jack *jack = device->device_data;
37 37
38 if (jack->private_free)
39 jack->private_free(jack);
40
38 /* If the input device is registered with the input subsystem 41 /* If the input device is registered with the input subsystem
39 * then we need to use a different deallocator. */ 42 * then we need to use a different deallocator. */
40 if (jack->registered) 43 if (jack->registered)
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 36d7a5998234..08bfed594a83 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -232,14 +232,11 @@ static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
232 if (! (runtime = substream->runtime)) 232 if (! (runtime = substream->runtime))
233 return -ENOTTY; 233 return -ENOTTY;
234 234
235 data = kmalloc(sizeof(*data), GFP_KERNEL);
236 if (data == NULL)
237 return -ENOMEM;
238 /* only fifo_size is different, so just copy all */ 235 /* only fifo_size is different, so just copy all */
239 if (copy_from_user(data, data32, sizeof(*data32))) { 236 data = memdup_user(data32, sizeof(*data32));
240 err = -EFAULT; 237 if (IS_ERR(data))
241 goto error; 238 return PTR_ERR(data);
242 } 239
243 if (refine) 240 if (refine)
244 err = snd_pcm_hw_refine(substream, data); 241 err = snd_pcm_hw_refine(substream, data);
245 else 242 else
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index fbb2e391591e..63d088f2265f 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -209,9 +209,11 @@ static int snd_pcm_update_hw_ptr_interrupt(struct snd_pcm_substream *substream)
209{ 209{
210 struct snd_pcm_runtime *runtime = substream->runtime; 210 struct snd_pcm_runtime *runtime = substream->runtime;
211 snd_pcm_uframes_t pos; 211 snd_pcm_uframes_t pos;
212 snd_pcm_uframes_t new_hw_ptr, hw_ptr_interrupt, hw_base; 212 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_ptr_interrupt, hw_base;
213 snd_pcm_sframes_t delta; 213 snd_pcm_sframes_t hdelta, delta;
214 unsigned long jdelta;
214 215
216 old_hw_ptr = runtime->status->hw_ptr;
215 pos = snd_pcm_update_hw_ptr_pos(substream, runtime); 217 pos = snd_pcm_update_hw_ptr_pos(substream, runtime);
216 if (pos == SNDRV_PCM_POS_XRUN) { 218 if (pos == SNDRV_PCM_POS_XRUN) {
217 xrun(substream); 219 xrun(substream);
@@ -247,7 +249,30 @@ static int snd_pcm_update_hw_ptr_interrupt(struct snd_pcm_substream *substream)
247 new_hw_ptr = hw_base + pos; 249 new_hw_ptr = hw_base + pos;
248 } 250 }
249 } 251 }
250 if (delta > runtime->period_size) { 252 hdelta = new_hw_ptr - old_hw_ptr;
253 jdelta = jiffies - runtime->hw_ptr_jiffies;
254 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
255 delta = jdelta /
256 (((runtime->period_size * HZ) / runtime->rate)
257 + HZ/100);
258 hw_ptr_error(substream,
259 "hw_ptr skipping! [Q] "
260 "(pos=%ld, delta=%ld, period=%ld, "
261 "jdelta=%lu/%lu/%lu)\n",
262 (long)pos, (long)hdelta,
263 (long)runtime->period_size, jdelta,
264 ((hdelta * HZ) / runtime->rate), delta);
265 hw_ptr_interrupt = runtime->hw_ptr_interrupt +
266 runtime->period_size * delta;
267 if (hw_ptr_interrupt >= runtime->boundary)
268 hw_ptr_interrupt -= runtime->boundary;
269 /* rebase to interrupt position */
270 hw_base = new_hw_ptr = hw_ptr_interrupt;
271 /* align hw_base to buffer_size */
272 hw_base -= hw_base % runtime->buffer_size;
273 delta = 0;
274 }
275 if (delta > runtime->period_size + runtime->period_size / 2) {
251 hw_ptr_error(substream, 276 hw_ptr_error(substream,
252 "Lost interrupts? " 277 "Lost interrupts? "
253 "(stream=%i, delta=%ld, intr_ptr=%ld)\n", 278 "(stream=%i, delta=%ld, intr_ptr=%ld)\n",
@@ -263,6 +288,7 @@ static int snd_pcm_update_hw_ptr_interrupt(struct snd_pcm_substream *substream)
263 288
264 runtime->hw_ptr_base = hw_base; 289 runtime->hw_ptr_base = hw_base;
265 runtime->status->hw_ptr = new_hw_ptr; 290 runtime->status->hw_ptr = new_hw_ptr;
291 runtime->hw_ptr_jiffies = jiffies;
266 runtime->hw_ptr_interrupt = hw_ptr_interrupt; 292 runtime->hw_ptr_interrupt = hw_ptr_interrupt;
267 293
268 return snd_pcm_update_hw_ptr_post(substream, runtime); 294 return snd_pcm_update_hw_ptr_post(substream, runtime);
@@ -275,6 +301,7 @@ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
275 snd_pcm_uframes_t pos; 301 snd_pcm_uframes_t pos;
276 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base; 302 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
277 snd_pcm_sframes_t delta; 303 snd_pcm_sframes_t delta;
304 unsigned long jdelta;
278 305
279 old_hw_ptr = runtime->status->hw_ptr; 306 old_hw_ptr = runtime->status->hw_ptr;
280 pos = snd_pcm_update_hw_ptr_pos(substream, runtime); 307 pos = snd_pcm_update_hw_ptr_pos(substream, runtime);
@@ -286,14 +313,15 @@ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
286 new_hw_ptr = hw_base + pos; 313 new_hw_ptr = hw_base + pos;
287 314
288 delta = new_hw_ptr - old_hw_ptr; 315 delta = new_hw_ptr - old_hw_ptr;
316 jdelta = jiffies - runtime->hw_ptr_jiffies;
289 if (delta < 0) { 317 if (delta < 0) {
290 delta += runtime->buffer_size; 318 delta += runtime->buffer_size;
291 if (delta < 0) { 319 if (delta < 0) {
292 hw_ptr_error(substream, 320 hw_ptr_error(substream,
293 "Unexpected hw_pointer value [2] " 321 "Unexpected hw_pointer value [2] "
294 "(stream=%i, pos=%ld, old_ptr=%ld)\n", 322 "(stream=%i, pos=%ld, old_ptr=%ld, jdelta=%li)\n",
295 substream->stream, (long)pos, 323 substream->stream, (long)pos,
296 (long)old_hw_ptr); 324 (long)old_hw_ptr, jdelta);
297 return 0; 325 return 0;
298 } 326 }
299 hw_base += runtime->buffer_size; 327 hw_base += runtime->buffer_size;
@@ -301,12 +329,13 @@ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
301 hw_base = 0; 329 hw_base = 0;
302 new_hw_ptr = hw_base + pos; 330 new_hw_ptr = hw_base + pos;
303 } 331 }
304 if (delta > runtime->period_size && runtime->periods > 1) { 332 if (((delta * HZ) / runtime->rate) > jdelta + HZ/100) {
305 hw_ptr_error(substream, 333 hw_ptr_error(substream,
306 "hw_ptr skipping! " 334 "hw_ptr skipping! "
307 "(pos=%ld, delta=%ld, period=%ld)\n", 335 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu)\n",
308 (long)pos, (long)delta, 336 (long)pos, (long)delta,
309 (long)runtime->period_size); 337 (long)runtime->period_size, jdelta,
338 ((delta * HZ) / runtime->rate));
310 return 0; 339 return 0;
311 } 340 }
312 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 341 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
@@ -315,6 +344,7 @@ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
315 344
316 runtime->hw_ptr_base = hw_base; 345 runtime->hw_ptr_base = hw_base;
317 runtime->status->hw_ptr = new_hw_ptr; 346 runtime->status->hw_ptr = new_hw_ptr;
347 runtime->hw_ptr_jiffies = jiffies;
318 348
319 return snd_pcm_update_hw_ptr_post(substream, runtime); 349 return snd_pcm_update_hw_ptr_post(substream, runtime);
320} 350}
@@ -1441,6 +1471,7 @@ static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1441 runtime->status->hw_ptr %= runtime->buffer_size; 1471 runtime->status->hw_ptr %= runtime->buffer_size;
1442 else 1472 else
1443 runtime->status->hw_ptr = 0; 1473 runtime->status->hw_ptr = 0;
1474 runtime->hw_ptr_jiffies = jiffies;
1444 snd_pcm_stream_unlock_irqrestore(substream, flags); 1475 snd_pcm_stream_unlock_irqrestore(substream, flags);
1445 return 0; 1476 return 0;
1446} 1477}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index a151fb01ba82..fc6f98e257df 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -327,21 +327,16 @@ static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
327 struct snd_pcm_hw_params *params; 327 struct snd_pcm_hw_params *params;
328 int err; 328 int err;
329 329
330 params = kmalloc(sizeof(*params), GFP_KERNEL); 330 params = memdup_user(_params, sizeof(*params));
331 if (!params) { 331 if (IS_ERR(params))
332 err = -ENOMEM; 332 return PTR_ERR(params);
333 goto out; 333
334 }
335 if (copy_from_user(params, _params, sizeof(*params))) {
336 err = -EFAULT;
337 goto out;
338 }
339 err = snd_pcm_hw_refine(substream, params); 334 err = snd_pcm_hw_refine(substream, params);
340 if (copy_to_user(_params, params, sizeof(*params))) { 335 if (copy_to_user(_params, params, sizeof(*params))) {
341 if (!err) 336 if (!err)
342 err = -EFAULT; 337 err = -EFAULT;
343 } 338 }
344out: 339
345 kfree(params); 340 kfree(params);
346 return err; 341 return err;
347} 342}
@@ -465,21 +460,16 @@ static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
465 struct snd_pcm_hw_params *params; 460 struct snd_pcm_hw_params *params;
466 int err; 461 int err;
467 462
468 params = kmalloc(sizeof(*params), GFP_KERNEL); 463 params = memdup_user(_params, sizeof(*params));
469 if (!params) { 464 if (IS_ERR(params))
470 err = -ENOMEM; 465 return PTR_ERR(params);
471 goto out; 466
472 }
473 if (copy_from_user(params, _params, sizeof(*params))) {
474 err = -EFAULT;
475 goto out;
476 }
477 err = snd_pcm_hw_params(substream, params); 467 err = snd_pcm_hw_params(substream, params);
478 if (copy_to_user(_params, params, sizeof(*params))) { 468 if (copy_to_user(_params, params, sizeof(*params))) {
479 if (!err) 469 if (!err)
480 err = -EFAULT; 470 err = -EFAULT;
481 } 471 }
482out: 472
483 kfree(params); 473 kfree(params);
484 return err; 474 return err;
485} 475}
@@ -2593,13 +2583,11 @@ static int snd_pcm_playback_ioctl1(struct file *file,
2593 return -EFAULT; 2583 return -EFAULT;
2594 if (copy_from_user(&xfern, _xfern, sizeof(xfern))) 2584 if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
2595 return -EFAULT; 2585 return -EFAULT;
2596 bufs = kmalloc(sizeof(void *) * runtime->channels, GFP_KERNEL); 2586
2597 if (bufs == NULL) 2587 bufs = memdup_user(xfern.bufs,
2598 return -ENOMEM; 2588 sizeof(void *) * runtime->channels);
2599 if (copy_from_user(bufs, xfern.bufs, sizeof(void *) * runtime->channels)) { 2589 if (IS_ERR(bufs))
2600 kfree(bufs); 2590 return PTR_ERR(bufs);
2601 return -EFAULT;
2602 }
2603 result = snd_pcm_lib_writev(substream, bufs, xfern.frames); 2591 result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
2604 kfree(bufs); 2592 kfree(bufs);
2605 __put_user(result, &_xfern->result); 2593 __put_user(result, &_xfern->result);
@@ -2675,13 +2663,11 @@ static int snd_pcm_capture_ioctl1(struct file *file,
2675 return -EFAULT; 2663 return -EFAULT;
2676 if (copy_from_user(&xfern, _xfern, sizeof(xfern))) 2664 if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
2677 return -EFAULT; 2665 return -EFAULT;
2678 bufs = kmalloc(sizeof(void *) * runtime->channels, GFP_KERNEL); 2666
2679 if (bufs == NULL) 2667 bufs = memdup_user(xfern.bufs,
2680 return -ENOMEM; 2668 sizeof(void *) * runtime->channels);
2681 if (copy_from_user(bufs, xfern.bufs, sizeof(void *) * runtime->channels)) { 2669 if (IS_ERR(bufs))
2682 kfree(bufs); 2670 return PTR_ERR(bufs);
2683 return -EFAULT;
2684 }
2685 result = snd_pcm_lib_readv(substream, bufs, xfern.frames); 2671 result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
2686 kfree(bufs); 2672 kfree(bufs);
2687 __put_user(result, &_xfern->result); 2673 __put_user(result, &_xfern->result);
@@ -3312,18 +3298,12 @@ static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
3312 int err; 3298 int err;
3313 3299
3314 params = kmalloc(sizeof(*params), GFP_KERNEL); 3300 params = kmalloc(sizeof(*params), GFP_KERNEL);
3315 if (!params) { 3301 if (!params)
3316 err = -ENOMEM; 3302 return -ENOMEM;
3317 goto out;
3318 }
3319 oparams = kmalloc(sizeof(*oparams), GFP_KERNEL);
3320 if (!oparams) {
3321 err = -ENOMEM;
3322 goto out;
3323 }
3324 3303
3325 if (copy_from_user(oparams, _oparams, sizeof(*oparams))) { 3304 oparams = memdup_user(_oparams, sizeof(*oparams));
3326 err = -EFAULT; 3305 if (IS_ERR(oparams)) {
3306 err = PTR_ERR(oparams);
3327 goto out; 3307 goto out;
3328 } 3308 }
3329 snd_pcm_hw_convert_from_old_params(params, oparams); 3309 snd_pcm_hw_convert_from_old_params(params, oparams);
@@ -3333,9 +3313,10 @@ static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
3333 if (!err) 3313 if (!err)
3334 err = -EFAULT; 3314 err = -EFAULT;
3335 } 3315 }
3316
3317 kfree(oparams);
3336out: 3318out:
3337 kfree(params); 3319 kfree(params);
3338 kfree(oparams);
3339 return err; 3320 return err;
3340} 3321}
3341 3322
@@ -3347,17 +3328,12 @@ static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
3347 int err; 3328 int err;
3348 3329
3349 params = kmalloc(sizeof(*params), GFP_KERNEL); 3330 params = kmalloc(sizeof(*params), GFP_KERNEL);
3350 if (!params) { 3331 if (!params)
3351 err = -ENOMEM; 3332 return -ENOMEM;
3352 goto out; 3333
3353 } 3334 oparams = memdup_user(_oparams, sizeof(*oparams));
3354 oparams = kmalloc(sizeof(*oparams), GFP_KERNEL); 3335 if (IS_ERR(oparams)) {
3355 if (!oparams) { 3336 err = PTR_ERR(oparams);
3356 err = -ENOMEM;
3357 goto out;
3358 }
3359 if (copy_from_user(oparams, _oparams, sizeof(*oparams))) {
3360 err = -EFAULT;
3361 goto out; 3337 goto out;
3362 } 3338 }
3363 snd_pcm_hw_convert_from_old_params(params, oparams); 3339 snd_pcm_hw_convert_from_old_params(params, oparams);
@@ -3367,9 +3343,10 @@ static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
3367 if (!err) 3343 if (!err)
3368 err = -EFAULT; 3344 err = -EFAULT;
3369 } 3345 }
3346
3347 kfree(oparams);
3370out: 3348out:
3371 kfree(params); 3349 kfree(params);
3372 kfree(oparams);
3373 return err; 3350 return err;
3374} 3351}
3375#endif /* CONFIG_SND_SUPPORT_OLD_API */ 3352#endif /* CONFIG_SND_SUPPORT_OLD_API */
diff --git a/sound/core/seq/seq_compat.c b/sound/core/seq/seq_compat.c
index 38693f47c262..c956fe462569 100644
--- a/sound/core/seq/seq_compat.c
+++ b/sound/core/seq/seq_compat.c
@@ -48,12 +48,11 @@ static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned
48 struct snd_seq_port_info *data; 48 struct snd_seq_port_info *data;
49 mm_segment_t fs; 49 mm_segment_t fs;
50 50
51 data = kmalloc(sizeof(*data), GFP_KERNEL); 51 data = memdup_user(data32, sizeof(*data32));
52 if (! data) 52 if (IS_ERR(data))
53 return -ENOMEM; 53 return PTR_ERR(data);
54 54
55 if (copy_from_user(data, data32, sizeof(*data32)) || 55 if (get_user(data->flags, &data32->flags) ||
56 get_user(data->flags, &data32->flags) ||
57 get_user(data->time_queue, &data32->time_queue)) 56 get_user(data->time_queue, &data32->time_queue))
58 goto error; 57 goto error;
59 data->kernel = NULL; 58 data->kernel = NULL;
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 3f0050d0b71e..8f8b17ac074d 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1395,13 +1395,10 @@ static int snd_timer_user_ginfo(struct file *file,
1395 struct list_head *p; 1395 struct list_head *p;
1396 int err = 0; 1396 int err = 0;
1397 1397
1398 ginfo = kmalloc(sizeof(*ginfo), GFP_KERNEL); 1398 ginfo = memdup_user(_ginfo, sizeof(*ginfo));
1399 if (! ginfo) 1399 if (IS_ERR(ginfo))
1400 return -ENOMEM; 1400 return PTR_ERR(ginfo);
1401 if (copy_from_user(ginfo, _ginfo, sizeof(*ginfo))) { 1401
1402 kfree(ginfo);
1403 return -EFAULT;
1404 }
1405 tid = ginfo->tid; 1402 tid = ginfo->tid;
1406 memset(ginfo, 0, sizeof(*ginfo)); 1403 memset(ginfo, 0, sizeof(*ginfo));
1407 ginfo->tid = tid; 1404 ginfo->tid = tid;
diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
index 49037d074c71..bdc8dde4e4a2 100644
--- a/sound/isa/sb/sb16_csp.c
+++ b/sound/isa/sb/sb16_csp.c
@@ -684,15 +684,16 @@ static int snd_sb_csp_load(struct snd_sb_csp * p, const unsigned char *buf, int
684 684
685static int snd_sb_csp_load_user(struct snd_sb_csp * p, const unsigned char __user *buf, int size, int load_flags) 685static int snd_sb_csp_load_user(struct snd_sb_csp * p, const unsigned char __user *buf, int size, int load_flags)
686{ 686{
687 int err = -ENOMEM; 687 int err;
688 unsigned char *kbuf = kmalloc(size, GFP_KERNEL); 688 unsigned char *kbuf;
689 if (kbuf) { 689
690 if (copy_from_user(kbuf, buf, size)) 690 kbuf = memdup_user(buf, size);
691 err = -EFAULT; 691 if (IS_ERR(kbuf))
692 else 692 return PTR_ERR(kbuf);
693 err = snd_sb_csp_load(p, kbuf, size, load_flags); 693
694 kfree(kbuf); 694 err = snd_sb_csp_load(p, kbuf, size, load_flags);
695 } 695
696 kfree(kbuf);
696 return err; 697 return err;
697} 698}
698 699
diff --git a/sound/isa/wavefront/wavefront_fx.c b/sound/isa/wavefront/wavefront_fx.c
index a4345fc07561..2bb1cee09255 100644
--- a/sound/isa/wavefront/wavefront_fx.c
+++ b/sound/isa/wavefront/wavefront_fx.c
@@ -202,15 +202,11 @@ snd_wavefront_fx_ioctl (struct snd_hwdep *sdev, struct file *file,
202 "> 512 bytes to FX\n"); 202 "> 512 bytes to FX\n");
203 return -EIO; 203 return -EIO;
204 } 204 }
205 page_data = kmalloc(r.data[2] * sizeof(short), GFP_KERNEL); 205 page_data = memdup_user((unsigned char __user *)
206 if (!page_data) 206 r.data[3],
207 return -ENOMEM; 207 r.data[2] * sizeof(short));
208 if (copy_from_user (page_data, 208 if (IS_ERR(page_data))
209 (unsigned char __user *) r.data[3], 209 return PTR_ERR(page_data);
210 r.data[2] * sizeof(short))) {
211 kfree(page_data);
212 return -EFAULT;
213 }
214 pd = page_data; 210 pd = page_data;
215 } 211 }
216 212
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
index beb312cca75b..5d4ff48c4345 100644
--- a/sound/isa/wavefront/wavefront_synth.c
+++ b/sound/isa/wavefront/wavefront_synth.c
@@ -1664,12 +1664,11 @@ snd_wavefront_synth_ioctl (struct snd_hwdep *hw, struct file *file,
1664 break; 1664 break;
1665 1665
1666 case WFCTL_WFCMD: 1666 case WFCTL_WFCMD:
1667 wc = kmalloc(sizeof(*wc), GFP_KERNEL); 1667 wc = memdup_user(argp, sizeof(*wc));
1668 if (! wc) 1668 if (IS_ERR(wc))
1669 return -ENOMEM; 1669 return PTR_ERR(wc);
1670 if (copy_from_user (wc, argp, sizeof (*wc))) 1670
1671 err = -EFAULT; 1671 if (wavefront_synth_control (acard, wc) < 0)
1672 else if (wavefront_synth_control (acard, wc) < 0)
1673 err = -EIO; 1672 err = -EIO;
1674 else if (copy_to_user (argp, wc, sizeof (*wc))) 1673 else if (copy_to_user (argp, wc, sizeof (*wc)))
1675 err = -EFAULT; 1674 err = -EFAULT;
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 191e1cd9997d..4b302d86f5f2 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -2493,24 +2493,17 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un
2493 case SNDRV_EMU10K1_IOCTL_CODE_POKE: 2493 case SNDRV_EMU10K1_IOCTL_CODE_POKE:
2494 if (!capable(CAP_SYS_ADMIN)) 2494 if (!capable(CAP_SYS_ADMIN))
2495 return -EPERM; 2495 return -EPERM;
2496 icode = kmalloc(sizeof(*icode), GFP_KERNEL); 2496
2497 if (icode == NULL) 2497 icode = memdup_user(argp, sizeof(*icode));
2498 return -ENOMEM; 2498 if (IS_ERR(icode))
2499 if (copy_from_user(icode, argp, sizeof(*icode))) { 2499 return PTR_ERR(icode);
2500 kfree(icode);
2501 return -EFAULT;
2502 }
2503 res = snd_emu10k1_icode_poke(emu, icode); 2500 res = snd_emu10k1_icode_poke(emu, icode);
2504 kfree(icode); 2501 kfree(icode);
2505 return res; 2502 return res;
2506 case SNDRV_EMU10K1_IOCTL_CODE_PEEK: 2503 case SNDRV_EMU10K1_IOCTL_CODE_PEEK:
2507 icode = kmalloc(sizeof(*icode), GFP_KERNEL); 2504 icode = memdup_user(argp, sizeof(*icode));
2508 if (icode == NULL) 2505 if (IS_ERR(icode))
2509 return -ENOMEM; 2506 return PTR_ERR(icode);
2510 if (copy_from_user(icode, argp, sizeof(*icode))) {
2511 kfree(icode);
2512 return -EFAULT;
2513 }
2514 res = snd_emu10k1_icode_peek(emu, icode); 2507 res = snd_emu10k1_icode_peek(emu, icode);
2515 if (res == 0 && copy_to_user(argp, icode, sizeof(*icode))) { 2508 if (res == 0 && copy_to_user(argp, icode, sizeof(*icode))) {
2516 kfree(icode); 2509 kfree(icode);
@@ -2519,24 +2512,16 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un
2519 kfree(icode); 2512 kfree(icode);
2520 return res; 2513 return res;
2521 case SNDRV_EMU10K1_IOCTL_PCM_POKE: 2514 case SNDRV_EMU10K1_IOCTL_PCM_POKE:
2522 ipcm = kmalloc(sizeof(*ipcm), GFP_KERNEL); 2515 ipcm = memdup_user(argp, sizeof(*ipcm));
2523 if (ipcm == NULL) 2516 if (IS_ERR(ipcm))
2524 return -ENOMEM; 2517 return PTR_ERR(ipcm);
2525 if (copy_from_user(ipcm, argp, sizeof(*ipcm))) {
2526 kfree(ipcm);
2527 return -EFAULT;
2528 }
2529 res = snd_emu10k1_ipcm_poke(emu, ipcm); 2518 res = snd_emu10k1_ipcm_poke(emu, ipcm);
2530 kfree(ipcm); 2519 kfree(ipcm);
2531 return res; 2520 return res;
2532 case SNDRV_EMU10K1_IOCTL_PCM_PEEK: 2521 case SNDRV_EMU10K1_IOCTL_PCM_PEEK:
2533 ipcm = kzalloc(sizeof(*ipcm), GFP_KERNEL); 2522 ipcm = memdup_user(argp, sizeof(*ipcm));
2534 if (ipcm == NULL) 2523 if (IS_ERR(ipcm))
2535 return -ENOMEM; 2524 return PTR_ERR(ipcm);
2536 if (copy_from_user(ipcm, argp, sizeof(*ipcm))) {
2537 kfree(ipcm);
2538 return -EFAULT;
2539 }
2540 res = snd_emu10k1_ipcm_peek(emu, ipcm); 2525 res = snd_emu10k1_ipcm_peek(emu, ipcm);
2541 if (res == 0 && copy_to_user(argp, ipcm, sizeof(*ipcm))) { 2526 if (res == 0 && copy_to_user(argp, ipcm, sizeof(*ipcm))) {
2542 kfree(ipcm); 2527 kfree(ipcm);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index a4e5e5952115..fd6e6f337d10 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2250,7 +2250,11 @@ int snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid,
2250 err = bus->ops.command(bus, res); 2250 err = bus->ops.command(bus, res);
2251 if (!err) { 2251 if (!err) {
2252 struct hda_cache_head *c; 2252 struct hda_cache_head *c;
2253 u32 key = build_cmd_cache_key(nid, verb); 2253 u32 key;
2254 /* parm may contain the verb stuff for get/set amp */
2255 verb = verb | (parm >> 8);
2256 parm &= 0xff;
2257 key = build_cmd_cache_key(nid, verb);
2254 c = get_alloc_hash(&codec->cmd_cache, key); 2258 c = get_alloc_hash(&codec->cmd_cache, key);
2255 if (c) 2259 if (c)
2256 c->val = parm; 2260 c->val = parm;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7ba8db5d4c42..bc882f8f163c 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -312,6 +312,9 @@ struct azx_dev {
312 unsigned int period_bytes; /* size of the period in bytes */ 312 unsigned int period_bytes; /* size of the period in bytes */
313 unsigned int frags; /* number for period in the play buffer */ 313 unsigned int frags; /* number for period in the play buffer */
314 unsigned int fifo_size; /* FIFO size */ 314 unsigned int fifo_size; /* FIFO size */
315 unsigned int start_flag: 1; /* stream full start flag */
316 unsigned long start_jiffies; /* start + minimum jiffies */
317 unsigned long min_jiffies; /* minimum jiffies before position is valid */
315 318
316 void __iomem *sd_addr; /* stream descriptor pointer */ 319 void __iomem *sd_addr; /* stream descriptor pointer */
317 320
@@ -330,7 +333,6 @@ struct azx_dev {
330 unsigned int opened :1; 333 unsigned int opened :1;
331 unsigned int running :1; 334 unsigned int running :1;
332 unsigned int irq_pending :1; 335 unsigned int irq_pending :1;
333 unsigned int irq_ignore :1;
334 /* 336 /*
335 * For VIA: 337 * For VIA:
336 * A flag to ensure DMA position is 0 338 * A flag to ensure DMA position is 0
@@ -975,7 +977,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
975 struct azx *chip = dev_id; 977 struct azx *chip = dev_id;
976 struct azx_dev *azx_dev; 978 struct azx_dev *azx_dev;
977 u32 status; 979 u32 status;
978 int i; 980 int i, ok;
979 981
980 spin_lock(&chip->reg_lock); 982 spin_lock(&chip->reg_lock);
981 983
@@ -991,18 +993,14 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
991 azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK); 993 azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK);
992 if (!azx_dev->substream || !azx_dev->running) 994 if (!azx_dev->substream || !azx_dev->running)
993 continue; 995 continue;
994 /* ignore the first dummy IRQ (due to pos_adj) */
995 if (azx_dev->irq_ignore) {
996 azx_dev->irq_ignore = 0;
997 continue;
998 }
999 /* check whether this IRQ is really acceptable */ 996 /* check whether this IRQ is really acceptable */
1000 if (azx_position_ok(chip, azx_dev)) { 997 ok = azx_position_ok(chip, azx_dev);
998 if (ok == 1) {
1001 azx_dev->irq_pending = 0; 999 azx_dev->irq_pending = 0;
1002 spin_unlock(&chip->reg_lock); 1000 spin_unlock(&chip->reg_lock);
1003 snd_pcm_period_elapsed(azx_dev->substream); 1001 snd_pcm_period_elapsed(azx_dev->substream);
1004 spin_lock(&chip->reg_lock); 1002 spin_lock(&chip->reg_lock);
1005 } else if (chip->bus && chip->bus->workq) { 1003 } else if (ok == 0 && chip->bus && chip->bus->workq) {
1006 /* bogus IRQ, process it later */ 1004 /* bogus IRQ, process it later */
1007 azx_dev->irq_pending = 1; 1005 azx_dev->irq_pending = 1;
1008 queue_work(chip->bus->workq, 1006 queue_work(chip->bus->workq,
@@ -1088,7 +1086,6 @@ static int azx_setup_periods(struct azx *chip,
1088 bdl = (u32 *)azx_dev->bdl.area; 1086 bdl = (u32 *)azx_dev->bdl.area;
1089 ofs = 0; 1087 ofs = 0;
1090 azx_dev->frags = 0; 1088 azx_dev->frags = 0;
1091 azx_dev->irq_ignore = 0;
1092 pos_adj = bdl_pos_adj[chip->dev_index]; 1089 pos_adj = bdl_pos_adj[chip->dev_index];
1093 if (pos_adj > 0) { 1090 if (pos_adj > 0) {
1094 struct snd_pcm_runtime *runtime = substream->runtime; 1091 struct snd_pcm_runtime *runtime = substream->runtime;
@@ -1109,7 +1106,6 @@ static int azx_setup_periods(struct azx *chip,
1109 &bdl, ofs, pos_adj, 1); 1106 &bdl, ofs, pos_adj, 1);
1110 if (ofs < 0) 1107 if (ofs < 0)
1111 goto error; 1108 goto error;
1112 azx_dev->irq_ignore = 1;
1113 } 1109 }
1114 } else 1110 } else
1115 pos_adj = 0; 1111 pos_adj = 0;
@@ -1155,6 +1151,9 @@ static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
1155 while (((val = azx_sd_readb(azx_dev, SD_CTL)) & SD_CTL_STREAM_RESET) && 1151 while (((val = azx_sd_readb(azx_dev, SD_CTL)) & SD_CTL_STREAM_RESET) &&
1156 --timeout) 1152 --timeout)
1157 ; 1153 ;
1154
1155 /* reset first position - may not be synced with hw at this time */
1156 *azx_dev->posbuf = 0;
1158} 1157}
1159 1158
1160/* 1159/*
@@ -1409,7 +1408,6 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
1409 snd_pcm_set_sync(substream); 1408 snd_pcm_set_sync(substream);
1410 mutex_unlock(&chip->open_mutex); 1409 mutex_unlock(&chip->open_mutex);
1411 1410
1412 azx_stream_reset(chip, azx_dev);
1413 return 0; 1411 return 0;
1414} 1412}
1415 1413
@@ -1474,6 +1472,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
1474 unsigned int bufsize, period_bytes, format_val; 1472 unsigned int bufsize, period_bytes, format_val;
1475 int err; 1473 int err;
1476 1474
1475 azx_stream_reset(chip, azx_dev);
1477 format_val = snd_hda_calc_stream_format(runtime->rate, 1476 format_val = snd_hda_calc_stream_format(runtime->rate,
1478 runtime->channels, 1477 runtime->channels,
1479 runtime->format, 1478 runtime->format,
@@ -1502,6 +1501,8 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
1502 return err; 1501 return err;
1503 } 1502 }
1504 1503
1504 azx_dev->min_jiffies = (runtime->period_size * HZ) /
1505 (runtime->rate * 2);
1505 azx_setup_controller(chip, azx_dev); 1506 azx_setup_controller(chip, azx_dev);
1506 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 1507 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
1507 azx_dev->fifo_size = azx_sd_readw(azx_dev, SD_FIFOSIZE) + 1; 1508 azx_dev->fifo_size = azx_sd_readw(azx_dev, SD_FIFOSIZE) + 1;
@@ -1518,13 +1519,14 @@ static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
1518 struct azx *chip = apcm->chip; 1519 struct azx *chip = apcm->chip;
1519 struct azx_dev *azx_dev; 1520 struct azx_dev *azx_dev;
1520 struct snd_pcm_substream *s; 1521 struct snd_pcm_substream *s;
1521 int start, nsync = 0, sbits = 0; 1522 int rstart = 0, start, nsync = 0, sbits = 0;
1522 int nwait, timeout; 1523 int nwait, timeout;
1523 1524
1524 switch (cmd) { 1525 switch (cmd) {
1526 case SNDRV_PCM_TRIGGER_START:
1527 rstart = 1;
1525 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 1528 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
1526 case SNDRV_PCM_TRIGGER_RESUME: 1529 case SNDRV_PCM_TRIGGER_RESUME:
1527 case SNDRV_PCM_TRIGGER_START:
1528 start = 1; 1530 start = 1;
1529 break; 1531 break;
1530 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 1532 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
@@ -1554,6 +1556,10 @@ static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
1554 if (s->pcm->card != substream->pcm->card) 1556 if (s->pcm->card != substream->pcm->card)
1555 continue; 1557 continue;
1556 azx_dev = get_azx_dev(s); 1558 azx_dev = get_azx_dev(s);
1559 if (rstart) {
1560 azx_dev->start_flag = 1;
1561 azx_dev->start_jiffies = jiffies + azx_dev->min_jiffies;
1562 }
1557 if (start) 1563 if (start)
1558 azx_stream_start(chip, azx_dev); 1564 azx_stream_start(chip, azx_dev);
1559 else 1565 else
@@ -1703,6 +1709,11 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
1703{ 1709{
1704 unsigned int pos; 1710 unsigned int pos;
1705 1711
1712 if (azx_dev->start_flag &&
1713 time_before_eq(jiffies, azx_dev->start_jiffies))
1714 return -1; /* bogus (too early) interrupt */
1715 azx_dev->start_flag = 0;
1716
1706 pos = azx_get_position(chip, azx_dev); 1717 pos = azx_get_position(chip, azx_dev);
1707 if (chip->position_fix == POS_FIX_AUTO) { 1718 if (chip->position_fix == POS_FIX_AUTO) {
1708 if (!pos) { 1719 if (!pos) {
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 1f2ad76ca94b..56ce19e68cb5 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -350,12 +350,20 @@ static int conexant_mux_enum_put(struct snd_kcontrol *kcontrol,
350} 350}
351 351
352#ifdef CONFIG_SND_JACK 352#ifdef CONFIG_SND_JACK
353static void conexant_free_jack_priv(struct snd_jack *jack)
354{
355 struct conexant_jack *jacks = jack->private_data;
356 jacks->nid = 0;
357 jacks->jack = NULL;
358}
359
353static int conexant_add_jack(struct hda_codec *codec, 360static int conexant_add_jack(struct hda_codec *codec,
354 hda_nid_t nid, int type) 361 hda_nid_t nid, int type)
355{ 362{
356 struct conexant_spec *spec; 363 struct conexant_spec *spec;
357 struct conexant_jack *jack; 364 struct conexant_jack *jack;
358 const char *name; 365 const char *name;
366 int err;
359 367
360 spec = codec->spec; 368 spec = codec->spec;
361 snd_array_init(&spec->jacks, sizeof(*jack), 32); 369 snd_array_init(&spec->jacks, sizeof(*jack), 32);
@@ -368,7 +376,12 @@ static int conexant_add_jack(struct hda_codec *codec,
368 jack->nid = nid; 376 jack->nid = nid;
369 jack->type = type; 377 jack->type = type;
370 378
371 return snd_jack_new(codec->bus->card, name, type, &jack->jack); 379 err = snd_jack_new(codec->bus->card, name, type, &jack->jack);
380 if (err < 0)
381 return err;
382 jack->jack->private_data = jack;
383 jack->jack->private_free = conexant_free_jack_priv;
384 return 0;
372} 385}
373 386
374static void conexant_report_jack(struct hda_codec *codec, hda_nid_t nid) 387static void conexant_report_jack(struct hda_codec *codec, hda_nid_t nid)
@@ -455,8 +468,10 @@ static void conexant_free(struct hda_codec *codec)
455 if (spec->jacks.list) { 468 if (spec->jacks.list) {
456 struct conexant_jack *jacks = spec->jacks.list; 469 struct conexant_jack *jacks = spec->jacks.list;
457 int i; 470 int i;
458 for (i = 0; i < spec->jacks.used; i++) 471 for (i = 0; i < spec->jacks.used; i++, jacks++) {
459 snd_device_free(codec->bus->card, &jacks[i].jack); 472 if (jacks->jack)
473 snd_device_free(codec->bus->card, jacks->jack);
474 }
460 snd_array_free(&spec->jacks); 475 snd_array_free(&spec->jacks);
461 } 476 }
462#endif 477#endif
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f35e58a2d921..6ed787eedd06 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -8742,10 +8742,9 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
8742 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC883_LAPTOP_EAPD), 8742 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC883_LAPTOP_EAPD),
8743 SND_PCI_QUIRK(0x15d9, 0x8780, "Supermicro PDSBA", ALC883_3ST_6ch), 8743 SND_PCI_QUIRK(0x15d9, 0x8780, "Supermicro PDSBA", ALC883_3ST_6ch),
8744 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_MEDION), 8744 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_MEDION),
8745 SND_PCI_QUIRK(0x1734, 0x1107, "FSC AMILO Xi2550", 8745 SND_PCI_QUIRK_MASK(0x1734, 0xfff0, 0x1100, "FSC AMILO Xi/Pi25xx",
8746 ALC883_FUJITSU_PI2515), 8746 ALC883_FUJITSU_PI2515),
8747 SND_PCI_QUIRK(0x1734, 0x1108, "Fujitsu AMILO Pi2515", ALC883_FUJITSU_PI2515), 8747 SND_PCI_QUIRK_MASK(0x1734, 0xfff0, 0x1130, "Fujitsu AMILO Xa35xx",
8748 SND_PCI_QUIRK(0x1734, 0x113d, "Fujitsu AMILO Xa3530",
8749 ALC888_FUJITSU_XA3530), 8748 ALC888_FUJITSU_XA3530),
8750 SND_PCI_QUIRK(0x17aa, 0x101e, "Lenovo 101e", ALC883_LENOVO_101E_2ch), 8749 SND_PCI_QUIRK(0x17aa, 0x101e, "Lenovo 101e", ALC883_LENOVO_101E_2ch),
8751 SND_PCI_QUIRK(0x17aa, 0x2085, "Lenovo NB0763", ALC883_LENOVO_NB0763), 8750 SND_PCI_QUIRK(0x17aa, 0x2085, "Lenovo NB0763", ALC883_LENOVO_NB0763),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 61996a2f45df..ce30b459aee6 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -3851,6 +3851,15 @@ static void stac_gpio_set(struct hda_codec *codec, unsigned int mask,
3851 AC_VERB_SET_GPIO_DATA, gpiostate); /* sync */ 3851 AC_VERB_SET_GPIO_DATA, gpiostate); /* sync */
3852} 3852}
3853 3853
3854#ifdef CONFIG_SND_JACK
3855static void stac92xx_free_jack_priv(struct snd_jack *jack)
3856{
3857 struct sigmatel_jack *jacks = jack->private_data;
3858 jacks->nid = 0;
3859 jacks->jack = NULL;
3860}
3861#endif
3862
3854static int stac92xx_add_jack(struct hda_codec *codec, 3863static int stac92xx_add_jack(struct hda_codec *codec,
3855 hda_nid_t nid, int type) 3864 hda_nid_t nid, int type)
3856{ 3865{
@@ -3860,6 +3869,7 @@ static int stac92xx_add_jack(struct hda_codec *codec,
3860 int def_conf = snd_hda_codec_get_pincfg(codec, nid); 3869 int def_conf = snd_hda_codec_get_pincfg(codec, nid);
3861 int connectivity = get_defcfg_connect(def_conf); 3870 int connectivity = get_defcfg_connect(def_conf);
3862 char name[32]; 3871 char name[32];
3872 int err;
3863 3873
3864 if (connectivity && connectivity != AC_JACK_PORT_FIXED) 3874 if (connectivity && connectivity != AC_JACK_PORT_FIXED)
3865 return 0; 3875 return 0;
@@ -3876,10 +3886,15 @@ static int stac92xx_add_jack(struct hda_codec *codec,
3876 snd_hda_get_jack_connectivity(def_conf), 3886 snd_hda_get_jack_connectivity(def_conf),
3877 snd_hda_get_jack_location(def_conf)); 3887 snd_hda_get_jack_location(def_conf));
3878 3888
3879 return snd_jack_new(codec->bus->card, name, type, &jack->jack); 3889 err = snd_jack_new(codec->bus->card, name, type, &jack->jack);
3880#else 3890 if (err < 0) {
3881 return 0; 3891 jack->nid = 0;
3892 return err;
3893 }
3894 jack->jack->private_data = jack;
3895 jack->jack->private_free = stac92xx_free_jack_priv;
3882#endif 3896#endif
3897 return 0;
3883} 3898}
3884 3899
3885static int stac_add_event(struct sigmatel_spec *spec, hda_nid_t nid, 3900static int stac_add_event(struct sigmatel_spec *spec, hda_nid_t nid,
@@ -4138,8 +4153,10 @@ static void stac92xx_free_jacks(struct hda_codec *codec)
4138 if (!codec->bus->shutdown && spec->jacks.list) { 4153 if (!codec->bus->shutdown && spec->jacks.list) {
4139 struct sigmatel_jack *jacks = spec->jacks.list; 4154 struct sigmatel_jack *jacks = spec->jacks.list;
4140 int i; 4155 int i;
4141 for (i = 0; i < spec->jacks.used; i++) 4156 for (i = 0; i < spec->jacks.used; i++, jacks++) {
4142 snd_device_free(codec->bus->card, &jacks[i].jack); 4157 if (jacks->jack)
4158 snd_device_free(codec->bus->card, jacks->jack);
4159 }
4143 } 4160 }
4144 snd_array_free(&spec->jacks); 4161 snd_array_free(&spec->jacks);
4145#endif 4162#endif
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 57648810eaf1..5dced5b79387 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -355,6 +355,9 @@ struct ichdev {
355 unsigned int fragsize1; 355 unsigned int fragsize1;
356 unsigned int position; 356 unsigned int position;
357 unsigned int pos_shift; 357 unsigned int pos_shift;
358 unsigned int last_pos;
359 unsigned long last_pos_jiffies;
360 unsigned int jiffy_to_bytes;
358 int frags; 361 int frags;
359 int lvi; 362 int lvi;
360 int lvi_frag; 363 int lvi_frag;
@@ -838,7 +841,10 @@ static int snd_intel8x0_pcm_trigger(struct snd_pcm_substream *substream, int cmd
838 ichdev->suspended = 0; 841 ichdev->suspended = 0;
839 /* fallthru */ 842 /* fallthru */
840 case SNDRV_PCM_TRIGGER_START: 843 case SNDRV_PCM_TRIGGER_START:
844 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
841 val = ICH_IOCE | ICH_STARTBM; 845 val = ICH_IOCE | ICH_STARTBM;
846 ichdev->last_pos = ichdev->position;
847 ichdev->last_pos_jiffies = jiffies;
842 break; 848 break;
843 case SNDRV_PCM_TRIGGER_SUSPEND: 849 case SNDRV_PCM_TRIGGER_SUSPEND:
844 ichdev->suspended = 1; 850 ichdev->suspended = 1;
@@ -849,9 +855,6 @@ static int snd_intel8x0_pcm_trigger(struct snd_pcm_substream *substream, int cmd
849 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 855 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
850 val = ICH_IOCE; 856 val = ICH_IOCE;
851 break; 857 break;
852 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
853 val = ICH_IOCE | ICH_STARTBM;
854 break;
855 default: 858 default:
856 return -EINVAL; 859 return -EINVAL;
857 } 860 }
@@ -1045,6 +1048,7 @@ static int snd_intel8x0_pcm_prepare(struct snd_pcm_substream *substream)
1045 ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1; 1048 ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1;
1046 } 1049 }
1047 snd_intel8x0_setup_periods(chip, ichdev); 1050 snd_intel8x0_setup_periods(chip, ichdev);
1051 ichdev->jiffy_to_bytes = (runtime->rate * 4 * ichdev->pos_shift) / HZ;
1048 return 0; 1052 return 0;
1049} 1053}
1050 1054
@@ -1053,7 +1057,7 @@ static snd_pcm_uframes_t snd_intel8x0_pcm_pointer(struct snd_pcm_substream *subs
1053 struct intel8x0 *chip = snd_pcm_substream_chip(substream); 1057 struct intel8x0 *chip = snd_pcm_substream_chip(substream);
1054 struct ichdev *ichdev = get_ichdev(substream); 1058 struct ichdev *ichdev = get_ichdev(substream);
1055 size_t ptr1, ptr; 1059 size_t ptr1, ptr;
1056 int civ, timeout = 100; 1060 int civ, timeout = 10;
1057 unsigned int position; 1061 unsigned int position;
1058 1062
1059 spin_lock(&chip->reg_lock); 1063 spin_lock(&chip->reg_lock);
@@ -1069,9 +1073,19 @@ static snd_pcm_uframes_t snd_intel8x0_pcm_pointer(struct snd_pcm_substream *subs
1069 ptr1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb)) 1073 ptr1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb))
1070 break; 1074 break;
1071 } while (timeout--); 1075 } while (timeout--);
1072 ptr1 <<= ichdev->pos_shift; 1076 if (ptr1 != 0) {
1073 ptr = ichdev->fragsize1 - ptr1; 1077 ptr1 <<= ichdev->pos_shift;
1074 ptr += position; 1078 ptr = ichdev->fragsize1 - ptr1;
1079 ptr += position;
1080 ichdev->last_pos = ptr;
1081 ichdev->last_pos_jiffies = jiffies;
1082 } else {
1083 ptr1 = jiffies - ichdev->last_pos_jiffies;
1084 if (ptr1)
1085 ptr1 -= 1;
1086 ptr = ichdev->last_pos + ptr1 * ichdev->jiffy_to_bytes;
1087 ptr %= ichdev->size;
1088 }
1075 spin_unlock(&chip->reg_lock); 1089 spin_unlock(&chip->reg_lock);
1076 if (ptr >= ichdev->size) 1090 if (ptr >= ichdev->size)
1077 return 0; 1091 return 0;
@@ -2661,12 +2675,14 @@ static void __devinit intel8x0_measure_ac97_clock(struct intel8x0 *chip)
2661 struct snd_pcm_substream *subs; 2675 struct snd_pcm_substream *subs;
2662 struct ichdev *ichdev; 2676 struct ichdev *ichdev;
2663 unsigned long port; 2677 unsigned long port;
2664 unsigned long pos, t; 2678 unsigned long pos, pos1, t;
2665 struct timeval start_time, stop_time; 2679 int civ, timeout = 1000, attempt = 1;
2680 struct timespec start_time, stop_time;
2666 2681
2667 if (chip->ac97_bus->clock != 48000) 2682 if (chip->ac97_bus->clock != 48000)
2668 return; /* specified in module option */ 2683 return; /* specified in module option */
2669 2684
2685 __again:
2670 subs = chip->pcm[0]->streams[0].substream; 2686 subs = chip->pcm[0]->streams[0].substream;
2671 if (! subs || subs->dma_buffer.bytes < INTEL8X0_TESTBUF_SIZE) { 2687 if (! subs || subs->dma_buffer.bytes < INTEL8X0_TESTBUF_SIZE) {
2672 snd_printk(KERN_WARNING "no playback buffer allocated - aborting measure ac97 clock\n"); 2688 snd_printk(KERN_WARNING "no playback buffer allocated - aborting measure ac97 clock\n");
@@ -2674,7 +2690,7 @@ static void __devinit intel8x0_measure_ac97_clock(struct intel8x0 *chip)
2674 } 2690 }
2675 ichdev = &chip->ichd[ICHD_PCMOUT]; 2691 ichdev = &chip->ichd[ICHD_PCMOUT];
2676 ichdev->physbuf = subs->dma_buffer.addr; 2692 ichdev->physbuf = subs->dma_buffer.addr;
2677 ichdev->size = chip->ichd[ICHD_PCMOUT].fragsize = INTEL8X0_TESTBUF_SIZE; 2693 ichdev->size = ichdev->fragsize = INTEL8X0_TESTBUF_SIZE;
2678 ichdev->substream = NULL; /* don't process interrupts */ 2694 ichdev->substream = NULL; /* don't process interrupts */
2679 2695
2680 /* set rate */ 2696 /* set rate */
@@ -2693,16 +2709,31 @@ static void __devinit intel8x0_measure_ac97_clock(struct intel8x0 *chip)
2693 iputbyte(chip, port + ICH_REG_OFF_CR, ICH_IOCE); 2709 iputbyte(chip, port + ICH_REG_OFF_CR, ICH_IOCE);
2694 iputdword(chip, ICHREG(ALI_DMACR), 1 << ichdev->ali_slot); 2710 iputdword(chip, ICHREG(ALI_DMACR), 1 << ichdev->ali_slot);
2695 } 2711 }
2696 do_gettimeofday(&start_time); 2712 do_posix_clock_monotonic_gettime(&start_time);
2697 spin_unlock_irq(&chip->reg_lock); 2713 spin_unlock_irq(&chip->reg_lock);
2698 msleep(50); 2714 msleep(50);
2699 spin_lock_irq(&chip->reg_lock); 2715 spin_lock_irq(&chip->reg_lock);
2700 /* check the position */ 2716 /* check the position */
2701 pos = ichdev->fragsize1; 2717 do {
2702 pos -= igetword(chip, ichdev->reg_offset + ichdev->roff_picb) << ichdev->pos_shift; 2718 civ = igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV);
2703 pos += ichdev->position; 2719 pos1 = igetword(chip, ichdev->reg_offset + ichdev->roff_picb);
2720 if (pos1 == 0) {
2721 udelay(10);
2722 continue;
2723 }
2724 if (civ == igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV) &&
2725 pos1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb))
2726 break;
2727 } while (timeout--);
2728 if (pos1 == 0) { /* oops, this value is not reliable */
2729 pos = 0;
2730 } else {
2731 pos = ichdev->fragsize1;
2732 pos -= pos1 << ichdev->pos_shift;
2733 pos += ichdev->position;
2734 }
2704 chip->in_measurement = 0; 2735 chip->in_measurement = 0;
2705 do_gettimeofday(&stop_time); 2736 do_posix_clock_monotonic_gettime(&stop_time);
2706 /* stop */ 2737 /* stop */
2707 if (chip->device_type == DEVICE_ALI) { 2738 if (chip->device_type == DEVICE_ALI) {
2708 iputdword(chip, ICHREG(ALI_DMACR), 1 << (ichdev->ali_slot + 16)); 2739 iputdword(chip, ICHREG(ALI_DMACR), 1 << (ichdev->ali_slot + 16));
@@ -2717,19 +2748,37 @@ static void __devinit intel8x0_measure_ac97_clock(struct intel8x0 *chip)
2717 iputbyte(chip, port + ICH_REG_OFF_CR, ICH_RESETREGS); 2748 iputbyte(chip, port + ICH_REG_OFF_CR, ICH_RESETREGS);
2718 spin_unlock_irq(&chip->reg_lock); 2749 spin_unlock_irq(&chip->reg_lock);
2719 2750
2751 if (pos == 0) {
2752 snd_printk(KERN_ERR "intel8x0: measure - unreliable DMA position..\n");
2753 __retry:
2754 if (attempt < 2) {
2755 attempt++;
2756 goto __again;
2757 }
2758 return;
2759 }
2760
2761 pos /= 4;
2720 t = stop_time.tv_sec - start_time.tv_sec; 2762 t = stop_time.tv_sec - start_time.tv_sec;
2721 t *= 1000000; 2763 t *= 1000000;
2722 t += stop_time.tv_usec - start_time.tv_usec; 2764 t += (stop_time.tv_nsec - start_time.tv_nsec) / 1000;
2723 printk(KERN_INFO "%s: measured %lu usecs\n", __func__, t); 2765 printk(KERN_INFO "%s: measured %lu usecs (%lu samples)\n", __func__, t, pos);
2724 if (t == 0) { 2766 if (t == 0) {
2725 snd_printk(KERN_ERR "?? calculation error..\n"); 2767 snd_printk(KERN_ERR "intel8x0: ?? calculation error..\n");
2726 return; 2768 goto __retry;
2727 } 2769 }
2728 pos = (pos / 4) * 1000; 2770 pos *= 1000;
2729 pos = (pos / t) * 1000 + ((pos % t) * 1000) / t; 2771 pos = (pos / t) * 1000 + ((pos % t) * 1000) / t;
2730 if (pos < 40000 || pos >= 60000) 2772 if (pos < 40000 || pos >= 60000) {
2731 /* abnormal value. hw problem? */ 2773 /* abnormal value. hw problem? */
2732 printk(KERN_INFO "intel8x0: measured clock %ld rejected\n", pos); 2774 printk(KERN_INFO "intel8x0: measured clock %ld rejected\n", pos);
2775 goto __retry;
2776 } else if (pos > 40500 && pos < 41500)
2777 /* first exception - 41000Hz reference clock */
2778 chip->ac97_bus->clock = 41000;
2779 else if (pos > 43600 && pos < 44600)
2780 /* second exception - 44100HZ reference clock */
2781 chip->ac97_bus->clock = 44100;
2733 else if (pos < 47500 || pos > 48500) 2782 else if (pos < 47500 || pos > 48500)
2734 /* not 48000Hz, tuning the clock.. */ 2783 /* not 48000Hz, tuning the clock.. */
2735 chip->ac97_bus->clock = (chip->ac97_bus->clock * 48000) / pos; 2784 chip->ac97_bus->clock = (chip->ac97_bus->clock * 48000) / pos;
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index f7c4544f7859..0625c342a1c9 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -27,8 +27,6 @@
27#include <sound/soc.h> 27#include <sound/soc.h>
28#include <sound/soc-dapm.h> 28#include <sound/soc-dapm.h>
29 29
30#include <mach/pxa-regs.h>
31#include <mach/hardware.h>
32#include <mach/magician.h> 30#include <mach/magician.h>
33#include <asm/mach-types.h> 31#include <asm/mach-types.h>
34#include "../codecs/uda1380.h" 32#include "../codecs/uda1380.h"
diff --git a/sound/soc/s3c24xx/Kconfig b/sound/soc/s3c24xx/Kconfig
index 2f3a21eee051..df494d1e346f 100644
--- a/sound/soc/s3c24xx/Kconfig
+++ b/sound/soc/s3c24xx/Kconfig
@@ -1,10 +1,10 @@
1config SND_S3C24XX_SOC 1config SND_S3C24XX_SOC
2 tristate "SoC Audio for the Samsung S3CXXXX chips" 2 tristate "SoC Audio for the Samsung S3CXXXX chips"
3 depends on ARCH_S3C2410 || ARCH_S3C64XX 3 depends on ARCH_S3C2410
4 help 4 help
5 Say Y or M if you want to add support for codecs attached to 5 Say Y or M if you want to add support for codecs attached to
6 the S3C24XX and S3C64XX AC97, I2S or SSP interface. You will 6 the S3C24XX AC97 or I2S interfaces. You will also need to
7 also need to select the audio interfaces to support below. 7 select the audio interfaces to support below.
8 8
9config SND_S3C24XX_SOC_I2S 9config SND_S3C24XX_SOC_I2S
10 tristate 10 tristate
diff --git a/sound/usb/caiaq/Makefile b/sound/usb/caiaq/Makefile
index 23dadd5a11cd..388999653aaa 100644
--- a/sound/usb/caiaq/Makefile
+++ b/sound/usb/caiaq/Makefile
@@ -1,4 +1,4 @@
1snd-usb-caiaq-y := caiaq-device.o caiaq-audio.o caiaq-midi.o caiaq-control.o 1snd-usb-caiaq-y := device.o audio.o midi.o control.o
2snd-usb-caiaq-$(CONFIG_SND_USB_CAIAQ_INPUT) += caiaq-input.o 2snd-usb-caiaq-$(CONFIG_SND_USB_CAIAQ_INPUT) += input.o
3 3
4obj-$(CONFIG_SND_USB_CAIAQ) += snd-usb-caiaq.o 4obj-$(CONFIG_SND_USB_CAIAQ) += snd-usb-caiaq.o
diff --git a/sound/usb/caiaq/caiaq-audio.c b/sound/usb/caiaq/audio.c
index 08d51e0c9fea..3f45c0fe61ab 100644
--- a/sound/usb/caiaq/caiaq-audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -16,20 +16,14 @@
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/ 17*/
18 18
19#include <linux/spinlock.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/interrupt.h>
23#include <linux/usb.h> 21#include <linux/usb.h>
24#include <linux/spinlock.h>
25#include <sound/core.h> 22#include <sound/core.h>
26#include <sound/initval.h>
27#include <sound/pcm.h> 23#include <sound/pcm.h>
28#include <sound/rawmidi.h>
29#include <linux/input.h>
30 24
31#include "caiaq-device.h" 25#include "device.h"
32#include "caiaq-audio.h" 26#include "audio.h"
33 27
34#define N_URBS 32 28#define N_URBS 32
35#define CLOCK_DRIFT_TOLERANCE 5 29#define CLOCK_DRIFT_TOLERANCE 5
diff --git a/sound/usb/caiaq/caiaq-audio.h b/sound/usb/caiaq/audio.h
index 8ab1f8d9529e..8ab1f8d9529e 100644
--- a/sound/usb/caiaq/caiaq-audio.h
+++ b/sound/usb/caiaq/audio.h
diff --git a/sound/usb/caiaq/caiaq-control.c b/sound/usb/caiaq/control.c
index e92c2bbf4fe9..537102ba6b9d 100644
--- a/sound/usb/caiaq/caiaq-control.c
+++ b/sound/usb/caiaq/control.c
@@ -18,17 +18,13 @@
18 */ 18 */
19 19
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/usb.h> 21#include <linux/usb.h>
22#include <sound/control.h>
23#include <sound/core.h> 23#include <sound/core.h>
24#include <sound/initval.h>
25#include <sound/pcm.h> 24#include <sound/pcm.h>
26#include <sound/rawmidi.h>
27#include <sound/control.h>
28#include <linux/input.h>
29 25
30#include "caiaq-device.h" 26#include "device.h"
31#include "caiaq-control.h" 27#include "control.h"
32 28
33#define CNT_INTVAL 0x10000 29#define CNT_INTVAL 0x10000
34 30
diff --git a/sound/usb/caiaq/caiaq-control.h b/sound/usb/caiaq/control.h
index 2e7ab1aa4fb3..2e7ab1aa4fb3 100644
--- a/sound/usb/caiaq/caiaq-control.h
+++ b/sound/usb/caiaq/control.h
diff --git a/sound/usb/caiaq/caiaq-device.c b/sound/usb/caiaq/device.c
index cf573a982fdc..6d517705da0e 100644
--- a/sound/usb/caiaq/caiaq-device.c
+++ b/sound/usb/caiaq/device.c
@@ -19,27 +19,20 @@
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20*/ 20*/
21 21
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h> 22#include <linux/moduleparam.h>
25#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/usb.h> 26#include <linux/usb.h>
27#include <linux/input.h>
28#include <linux/spinlock.h>
29#include <sound/core.h>
30#include <sound/initval.h> 27#include <sound/initval.h>
28#include <sound/core.h>
31#include <sound/pcm.h> 29#include <sound/pcm.h>
32#include <sound/rawmidi.h>
33#include <sound/control.h>
34
35#include "caiaq-device.h"
36#include "caiaq-audio.h"
37#include "caiaq-midi.h"
38#include "caiaq-control.h"
39 30
40#ifdef CONFIG_SND_USB_CAIAQ_INPUT 31#include "device.h"
41#include "caiaq-input.h" 32#include "audio.h"
42#endif 33#include "midi.h"
34#include "control.h"
35#include "input.h"
43 36
44MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); 37MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
45MODULE_DESCRIPTION("caiaq USB audio, version 1.3.13"); 38MODULE_DESCRIPTION("caiaq USB audio, version 1.3.13");
diff --git a/sound/usb/caiaq/caiaq-device.h b/sound/usb/caiaq/device.h
index 4cce1ad7493d..4cce1ad7493d 100644
--- a/sound/usb/caiaq/caiaq-device.h
+++ b/sound/usb/caiaq/device.h
diff --git a/sound/usb/caiaq/caiaq-input.c b/sound/usb/caiaq/input.c
index f743847a5e5a..a48d309bd94c 100644
--- a/sound/usb/caiaq/caiaq-input.c
+++ b/sound/usb/caiaq/input.c
@@ -17,17 +17,12 @@
17*/ 17*/
18 18
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/input.h>
23#include <linux/usb.h> 20#include <linux/usb.h>
24#include <linux/usb/input.h> 21#include <linux/usb/input.h>
25#include <linux/spinlock.h>
26#include <sound/core.h>
27#include <sound/rawmidi.h>
28#include <sound/pcm.h> 22#include <sound/pcm.h>
29#include "caiaq-device.h" 23
30#include "caiaq-input.h" 24#include "device.h"
25#include "input.h"
31 26
32static unsigned short keycode_ak1[] = { KEY_C, KEY_B, KEY_A }; 27static unsigned short keycode_ak1[] = { KEY_C, KEY_B, KEY_A };
33static unsigned short keycode_rk2[] = { KEY_1, KEY_2, KEY_3, KEY_4, 28static unsigned short keycode_rk2[] = { KEY_1, KEY_2, KEY_3, KEY_4,
diff --git a/sound/usb/caiaq/caiaq-input.h b/sound/usb/caiaq/input.h
index ced535577864..ced535577864 100644
--- a/sound/usb/caiaq/caiaq-input.h
+++ b/sound/usb/caiaq/input.h
diff --git a/sound/usb/caiaq/caiaq-midi.c b/sound/usb/caiaq/midi.c
index f19fd360c936..8fa8cd88d763 100644
--- a/sound/usb/caiaq/caiaq-midi.c
+++ b/sound/usb/caiaq/midi.c
@@ -16,20 +16,13 @@
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/ 17*/
18 18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/interrupt.h>
23#include <linux/usb.h> 19#include <linux/usb.h>
24#include <linux/input.h>
25#include <linux/spinlock.h>
26#include <sound/core.h>
27#include <sound/rawmidi.h> 20#include <sound/rawmidi.h>
21#include <sound/core.h>
28#include <sound/pcm.h> 22#include <sound/pcm.h>
29 23
30#include "caiaq-device.h" 24#include "device.h"
31#include "caiaq-midi.h" 25#include "midi.h"
32
33 26
34static int snd_usb_caiaq_midi_input_open(struct snd_rawmidi_substream *substream) 27static int snd_usb_caiaq_midi_input_open(struct snd_rawmidi_substream *substream)
35{ 28{
diff --git a/sound/usb/caiaq/caiaq-midi.h b/sound/usb/caiaq/midi.h
index 9d16db027fc3..9d16db027fc3 100644
--- a/sound/usb/caiaq/caiaq-midi.h
+++ b/sound/usb/caiaq/midi.h
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index 98276aafefe6..012ff1f6f8af 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -349,14 +349,10 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
349 if (cmd != SNDRV_USB_STREAM_IOCTL_SET_PARAMS) 349 if (cmd != SNDRV_USB_STREAM_IOCTL_SET_PARAMS)
350 return -ENOTTY; 350 return -ENOTTY;
351 351
352 cfg = kmalloc(sizeof(*cfg), GFP_KERNEL); 352 cfg = memdup_user((void *)arg, sizeof(*cfg));
353 if (!cfg) 353 if (IS_ERR(cfg))
354 return -ENOMEM; 354 return PTR_ERR(cfg);
355 355
356 if (copy_from_user(cfg, (void *)arg, sizeof(*cfg))) {
357 err = -EFAULT;
358 goto free;
359 }
360 if (cfg->version != USB_STREAM_INTERFACE_VERSION) { 356 if (cfg->version != USB_STREAM_INTERFACE_VERSION) {
361 err = -ENXIO; 357 err = -ENXIO;
362 goto free; 358 goto free;
diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
index 4af8740db717..f3d8f71265dd 100644
--- a/sound/usb/usx2y/usX2Yhwdep.c
+++ b/sound/usb/usx2y/usX2Yhwdep.c
@@ -203,13 +203,12 @@ static int snd_usX2Y_hwdep_dsp_load(struct snd_hwdep *hw,
203 203
204 if (access_ok(VERIFY_READ, dsp->image, dsp->length)) { 204 if (access_ok(VERIFY_READ, dsp->image, dsp->length)) {
205 struct usb_device* dev = priv->chip.dev; 205 struct usb_device* dev = priv->chip.dev;
206 char *buf = kmalloc(dsp->length, GFP_KERNEL); 206 char *buf;
207 if (!buf) 207
208 return -ENOMEM; 208 buf = memdup_user(dsp->image, dsp->length);
209 if (copy_from_user(buf, dsp->image, dsp->length)) { 209 if (IS_ERR(buf))
210 kfree(buf); 210 return PTR_ERR(buf);
211 return -EFAULT; 211
212 }
213 err = usb_set_interface(dev, 0, 1); 212 err = usb_set_interface(dev, 0, 1);
214 if (err) 213 if (err)
215 snd_printk(KERN_ERR "usb_set_interface error \n"); 214 snd_printk(KERN_ERR "usb_set_interface error \n");