aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/biodoc.txt19
-rw-r--r--Documentation/networking/bonding.txt2
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/i2c.txt46
-rw-r--r--Documentation/sound/alsa/HD-Audio.txt4
-rw-r--r--MAINTAINERS16
-rw-r--r--Makefile9
-rw-r--r--arch/microblaze/include/asm/auxvec.h1
-rw-r--r--arch/microblaze/include/asm/cputable.h1
-rw-r--r--arch/microblaze/include/asm/ftrace.h1
-rw-r--r--arch/microblaze/include/asm/hw_irq.h1
-rw-r--r--arch/microblaze/include/asm/io.h1
-rw-r--r--arch/microblaze/include/asm/socket.h3
-rw-r--r--arch/microblaze/include/asm/user.h1
-rw-r--r--arch/microblaze/include/asm/vga.h1
-rw-r--r--arch/microblaze/kernel/of_device.c10
-rw-r--r--arch/microblaze/kernel/process.c3
-rw-r--r--arch/microblaze/kernel/prom.c1
-rw-r--r--arch/microblaze/kernel/ptrace.c1
-rw-r--r--arch/microblaze/kernel/signal.c1
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c2
-rw-r--r--arch/mn10300/include/asm/bug.h10
-rw-r--r--arch/mn10300/include/asm/unistd.h2
-rw-r--r--arch/mn10300/kernel/entry.S2
-rw-r--r--arch/mn10300/kernel/setup.c4
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/boot/dts/tqm8540.dts4
-rw-r--r--arch/powerpc/boot/dts/tqm8541.dts4
-rw-r--r--arch/powerpc/boot/dts/tqm8548-bigflash.dts8
-rw-r--r--arch/powerpc/boot/dts/tqm8548.dts8
-rw-r--r--arch/powerpc/boot/dts/tqm8555.dts4
-rw-r--r--arch/powerpc/boot/dts/tqm8560.dts4
-rw-r--r--arch/powerpc/configs/85xx/tqm8548_defconfig164
-rw-r--r--arch/powerpc/include/asm/futex.h12
-rw-r--r--arch/powerpc/include/asm/mmu.h6
-rw-r--r--arch/powerpc/include/asm/parport.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h13
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/mm/tlb_nohash.c1
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S14
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c2
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/include/asm/cpuid.h25
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/lowcore.h12
-rw-r--r--arch/s390/include/asm/processor.h17
-rw-r--r--arch/s390/include/asm/ptrace.h2
-rw-r--r--arch/s390/include/asm/setup.h24
-rw-r--r--arch/s390/include/asm/thread_info.h3
-rw-r--r--arch/s390/include/asm/timer.h1
-rw-r--r--arch/s390/include/asm/timex.h5
-rw-r--r--arch/s390/include/asm/unistd.h4
-rw-r--r--arch/s390/kernel/asm-offsets.c2
-rw-r--r--arch/s390/kernel/compat_wrapper.S18
-rw-r--r--arch/s390/kernel/early.c19
-rw-r--r--arch/s390/kernel/entry.S13
-rw-r--r--arch/s390/kernel/entry64.S13
-rw-r--r--arch/s390/kernel/head.S15
-rw-r--r--arch/s390/kernel/nmi.c5
-rw-r--r--arch/s390/kernel/setup.c12
-rw-r--r--arch/s390/kernel/smp.c4
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/time.c77
-rw-r--r--arch/s390/kernel/vtime.c67
-rw-r--r--arch/sh/Kconfig18
-rw-r--r--arch/sh/boards/board-ap325rxa.c1
-rw-r--r--arch/sh/boards/board-urquell.c30
-rw-r--r--arch/sh/drivers/pci/ops-sh7785lcr.c5
-rw-r--r--arch/sh/drivers/pci/pci-sh7780.h2
-rw-r--r--arch/sh/drivers/pci/pci.c3
-rw-r--r--arch/sh/include/asm/dma-mapping.h36
-rw-r--r--arch/sh/include/asm/scatterlist.h11
-rw-r--r--arch/sh/include/asm/topology.h7
-rw-r--r--arch/sh/include/asm/unistd_32.h4
-rw-r--r--arch/sh/include/asm/unistd_64.h4
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c14
-rw-r--r--arch/sh/kernel/syscalls_32.S2
-rw-r--r--arch/sh/kernel/syscalls_64.S2
-rw-r--r--arch/sh/mm/consistent.c31
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/parport.h5
-rw-r--r--arch/sparc/kernel/ldc.c6
-rw-r--r--arch/sparc/kernel/smp_64.c4
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h5
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c16
-rw-r--r--arch/x86/kernel/bios_uv.c3
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c6
-rw-r--r--arch/x86/kernel/microcode_core.c33
-rw-r--r--arch/x86/kernel/tlb_uv.c189
-rw-r--r--arch/x86/kernel/uv_sysfs.c4
-rw-r--r--block/as-iosched.c116
-rw-r--r--block/blk-barrier.c3
-rw-r--r--block/blk-sysfs.c4
-rw-r--r--block/blk.h4
-rw-r--r--block/cfq-iosched.c270
-rw-r--r--block/elevator.c8
-rw-r--r--block/ioctl.c2
-rw-r--r--block/scsi_ioctl.c6
-rw-r--r--drivers/ata/libata-core.c25
-rw-r--r--drivers/ata/libata-scsi.c30
-rw-r--r--drivers/ata/libata-sff.c27
-rw-r--r--drivers/ata/pata_hpt37x.c22
-rw-r--r--drivers/ata/pata_legacy.c34
-rw-r--r--drivers/ata/pata_ninja32.c4
-rw-r--r--drivers/block/brd.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c187
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c111
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c23
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c22
-rw-r--r--drivers/md/dm-bio-list.h117
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-mpath.c1
-rw-r--r--drivers/md/dm-raid1.c1
-rw-r--r--drivers/md/dm-region-hash.c1
-rw-r--r--drivers/md/dm-snap.c1
-rw-r--r--drivers/md/dm.c1
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid10.c1
-rw-r--r--drivers/net/a2065.c17
-rw-r--r--drivers/net/ariadne.c19
-rw-r--r--drivers/net/arm/am79c961a.c24
-rw-r--r--drivers/net/arm/at91_ether.c32
-rw-r--r--drivers/net/arm/ep93xx_eth.c19
-rw-r--r--drivers/net/arm/ether1.c19
-rw-r--r--drivers/net/arm/ether3.c19
-rw-r--r--drivers/net/atarilance.c18
-rw-r--r--drivers/net/au1000_eth.c23
-rw-r--r--drivers/net/benet/be_ethtool.c4
-rw-r--r--drivers/net/bfin_mac.c24
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c5
-rw-r--r--drivers/net/bonding/bond_sysfs.c14
-rw-r--r--drivers/net/cris/eth_v10.c30
-rw-r--r--drivers/net/declance.c17
-rw-r--r--drivers/net/e1000/e1000_main.c48
-rw-r--r--drivers/net/e1000e/netdev.c61
-rw-r--r--drivers/net/ehea/ehea_main.c4
-rw-r--r--drivers/net/forcedeth.c4
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c27
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/ibm_newemac/core.c41
-rw-r--r--drivers/net/igb/e1000_mac.c2
-rw-r--r--drivers/net/igb/e1000_mac.h1
-rw-r--r--drivers/net/igb/e1000_mbx.c17
-rw-r--r--drivers/net/igb/e1000_mbx.h2
-rw-r--r--drivers/net/igb/igb_main.c37
-rw-r--r--drivers/net/igbvf/igbvf.h3
-rw-r--r--drivers/net/igbvf/netdev.c9
-rw-r--r--drivers/net/igbvf/vf.c2
-rw-r--r--drivers/net/igbvf/vf.h1
-rw-r--r--drivers/net/ioc3-eth.c22
-rw-r--r--drivers/net/isa-skeleton.c20
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c59
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c40
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c55
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c24
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c57
-rw-r--r--drivers/net/mac89x0.c18
-rw-r--r--drivers/net/macb.c19
-rw-r--r--drivers/net/macsonic.c19
-rw-r--r--drivers/net/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/phy/fixed.c5
-rw-r--r--drivers/net/phy/marvell.c54
-rw-r--r--drivers/net/phy/phy.c14
-rw-r--r--drivers/net/sfc/efx.c7
-rw-r--r--drivers/net/sfc/falcon.c4
-rw-r--r--drivers/net/sh_eth.c21
-rw-r--r--drivers/net/skge.c4
-rw-r--r--drivers/net/sun3_82586.c19
-rw-r--r--drivers/net/tc35815.c27
-rw-r--r--drivers/net/tg3.c9
-rw-r--r--drivers/net/tsi108_eth.c20
-rw-r--r--drivers/net/tun.c5
-rw-r--r--drivers/net/via-velocity.c4
-rw-r--r--drivers/net/xtsonic.c19
-rw-r--r--drivers/parisc/superio.c3
-rw-r--r--drivers/s390/block/dasd.c37
-rw-r--r--drivers/s390/block/dasd_eckd.c16
-rw-r--r--drivers/s390/cio/qdio_main.c43
-rw-r--r--drivers/sbus/char/jsflash.c15
-rw-r--r--drivers/sbus/char/uctrl.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/sh/intc.c35
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c26
-rw-r--r--fs/bio.c18
-rw-r--r--fs/buffer.c56
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/ext4/extents.c2
-rw-r--r--fs/fuse/file.c8
-rw-r--r--fs/gfs2/glock.c10
-rw-r--r--fs/gfs2/inode.c8
-rw-r--r--fs/gfs2/inode.h14
-rw-r--r--fs/gfs2/ops_file.c8
-rw-r--r--fs/gfs2/ops_fstype.c5
-rw-r--r--fs/gfs2/ops_inode.c1
-rw-r--r--fs/gfs2/quota.c4
-rw-r--r--fs/inode.c36
-rw-r--r--fs/nilfs2/bmap.c5
-rw-r--r--fs/nilfs2/nilfs.h5
-rw-r--r--fs/nilfs2/recovery.c20
-rw-r--r--fs/nilfs2/sufile.c290
-rw-r--r--fs/nilfs2/sufile.h79
-rw-r--r--fs/nilfs2/super.c7
-rw-r--r--fs/nilfs2/the_nilfs.c4
-rw-r--r--fs/ocfs2/file.c94
-rw-r--r--fs/pipe.c42
-rw-r--r--fs/splice.c371
-rw-r--r--include/asm-generic/bug.h2
-rw-r--r--include/drm/i915_drm.h3
-rw-r--r--include/linux/bio.h109
-rw-r--r--include/linux/buffer_head.h3
-rw-r--r--include/linux/fs.h64
-rw-r--r--include/linux/fsl_devices.h4
-rw-r--r--include/linux/libata.h8
-rw-r--r--include/linux/phy.h6
-rw-r--r--include/linux/pipe_fs_i.h5
-rw-r--r--include/linux/splice.h12
-rw-r--r--include/linux/usb/serial.h7
-rw-r--r--include/net/udp.h2
-rw-r--r--include/sound/jack.h2
-rw-r--r--include/sound/pcm.h3
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/rcupdate.c18
-rw-r--r--lib/kobject_uevent.c2
-rw-r--r--mm/filemap.c1
-rw-r--r--mm/mmap.c2
-rw-r--r--net/802/tr.c3
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/core/dev.c9
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/rose/af_rose.c10
-rw-r--r--net/sched/em_meta.c6
-rw-r--r--sound/core/control.c35
-rw-r--r--sound/core/jack.c3
-rw-r--r--sound/core/pcm_compat.c11
-rw-r--r--sound/core/pcm_lib.c47
-rw-r--r--sound/core/pcm_native.c93
-rw-r--r--sound/core/seq/seq_compat.c9
-rw-r--r--sound/core/timer.c11
-rw-r--r--sound/isa/sb/sb16_csp.c19
-rw-r--r--sound/isa/wavefront/wavefront_fx.c14
-rw-r--r--sound/isa/wavefront/wavefront_synth.c11
-rw-r--r--sound/pci/emu10k1/emufx.c41
-rw-r--r--sound/pci/hda/hda_codec.c6
-rw-r--r--sound/pci/hda/hda_intel.c39
-rw-r--r--sound/pci/hda/patch_conexant.c21
-rw-r--r--sound/pci/hda/patch_realtek.c5
-rw-r--r--sound/pci/hda/patch_sigmatel.c27
-rw-r--r--sound/pci/intel8x0.c91
-rw-r--r--sound/soc/pxa/magician.c2
-rw-r--r--sound/soc/s3c24xx/Kconfig6
-rw-r--r--sound/usb/caiaq/Makefile4
-rw-r--r--sound/usb/caiaq/audio.c (renamed from sound/usb/caiaq/caiaq-audio.c)12
-rw-r--r--sound/usb/caiaq/audio.h (renamed from sound/usb/caiaq/caiaq-audio.h)0
-rw-r--r--sound/usb/caiaq/control.c (renamed from sound/usb/caiaq/caiaq-control.c)10
-rw-r--r--sound/usb/caiaq/control.h (renamed from sound/usb/caiaq/caiaq-control.h)0
-rw-r--r--sound/usb/caiaq/device.c (renamed from sound/usb/caiaq/caiaq-device.c)23
-rw-r--r--sound/usb/caiaq/device.h (renamed from sound/usb/caiaq/caiaq-device.h)0
-rw-r--r--sound/usb/caiaq/input.c (renamed from sound/usb/caiaq/caiaq-input.c)11
-rw-r--r--sound/usb/caiaq/input.h (renamed from sound/usb/caiaq/caiaq-input.h)0
-rw-r--r--sound/usb/caiaq/midi.c (renamed from sound/usb/caiaq/caiaq-midi.c)13
-rw-r--r--sound/usb/caiaq/midi.h (renamed from sound/usb/caiaq/caiaq-midi.h)0
-rw-r--r--sound/usb/usx2y/us122l.c10
-rw-r--r--sound/usb/usx2y/usX2Yhwdep.c13
275 files changed, 3496 insertions, 2087 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index ecad6ee75705..6fab97ea7e6b 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -1040,23 +1040,21 @@ Front merges are handled by the binary trees in AS and deadline schedulers.
1040iii. Plugging the queue to batch requests in anticipation of opportunities for 1040iii. Plugging the queue to batch requests in anticipation of opportunities for
1041 merge/sort optimizations 1041 merge/sort optimizations
1042 1042
1043This is just the same as in 2.4 so far, though per-device unplugging
1044support is anticipated for 2.5. Also with a priority-based i/o scheduler,
1045such decisions could be based on request priorities.
1046
1047Plugging is an approach that the current i/o scheduling algorithm resorts to so 1043Plugging is an approach that the current i/o scheduling algorithm resorts to so
1048that it collects up enough requests in the queue to be able to take 1044that it collects up enough requests in the queue to be able to take
1049advantage of the sorting/merging logic in the elevator. If the 1045advantage of the sorting/merging logic in the elevator. If the
1050queue is empty when a request comes in, then it plugs the request queue 1046queue is empty when a request comes in, then it plugs the request queue
1051(sort of like plugging the bottom of a vessel to get fluid to build up) 1047(sort of like plugging the bath tub of a vessel to get fluid to build up)
1052till it fills up with a few more requests, before starting to service 1048till it fills up with a few more requests, before starting to service
1053the requests. This provides an opportunity to merge/sort the requests before 1049the requests. This provides an opportunity to merge/sort the requests before
1054passing them down to the device. There are various conditions when the queue is 1050passing them down to the device. There are various conditions when the queue is
1055unplugged (to open up the flow again), either through a scheduled task or 1051unplugged (to open up the flow again), either through a scheduled task or
1056could be on demand. For example wait_on_buffer sets the unplugging going 1052could be on demand. For example wait_on_buffer sets the unplugging going
1057(by running tq_disk) so the read gets satisfied soon. So in the read case, 1053through sync_buffer() running blk_run_address_space(mapping). Or the caller
1058the queue gets explicitly unplugged as part of waiting for completion, 1054can do it explicity through blk_unplug(bdev). So in the read case,
1059in fact all queues get unplugged as a side-effect. 1055the queue gets explicitly unplugged as part of waiting for completion on that
1056buffer. For page driven IO, the address space ->sync_page() takes care of
1057doing the blk_run_address_space().
1060 1058
1061Aside: 1059Aside:
1062 This is kind of controversial territory, as it's not clear if plugging is 1060 This is kind of controversial territory, as it's not clear if plugging is
@@ -1067,11 +1065,6 @@ Aside:
1067 multi-page bios being queued in one shot, we may not need to wait to merge 1065 multi-page bios being queued in one shot, we may not need to wait to merge
1068 a big request from the broken up pieces coming by. 1066 a big request from the broken up pieces coming by.
1069 1067
1070 Per-queue granularity unplugging (still a Todo) may help reduce some of the
1071 concerns with just a single tq_disk flush approach. Something like
1072 blk_kick_queue() to unplug a specific queue (right away ?)
1073 or optionally, all queues, is in the plan.
1074
10754.4 I/O contexts 10684.4 I/O contexts
1076I/O contexts provide a dynamically allocated per process data area. They may 1069I/O contexts provide a dynamically allocated per process data area. They may
1077be used in I/O schedulers, and in the block layer (could be used for IO statis, 1070be used in I/O schedulers, and in the block layer (could be used for IO statis,
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 5ede7473b425..08762750f121 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -1242,7 +1242,7 @@ monitoring is enabled, and vice-versa.
1242To add ARP targets: 1242To add ARP targets:
1243# echo +192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target 1243# echo +192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target
1244# echo +192.168.0.101 > /sys/class/net/bond0/bonding/arp_ip_target 1244# echo +192.168.0.101 > /sys/class/net/bond0/bonding/arp_ip_target
1245 NOTE: up to 10 target addresses may be specified. 1245 NOTE: up to 16 target addresses may be specified.
1246 1246
1247To remove an ARP target: 1247To remove an ARP target:
1248# echo -192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target 1248# echo -192.168.0.100 > /sys/class/net/bond0/bonding/arp_ip_target
diff --git a/Documentation/powerpc/dts-bindings/fsl/i2c.txt b/Documentation/powerpc/dts-bindings/fsl/i2c.txt
index d0ab33e21fe6..b6d2e21474f9 100644
--- a/Documentation/powerpc/dts-bindings/fsl/i2c.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/i2c.txt
@@ -7,8 +7,10 @@ Required properties :
7 7
8Recommended properties : 8Recommended properties :
9 9
10 - compatible : Should be "fsl-i2c" for parts compatible with 10 - compatible : compatibility list with 2 entries, the first should
11 Freescale I2C specifications. 11 be "fsl,CHIP-i2c" where CHIP is the name of a compatible processor,
12 e.g. mpc8313, mpc8543, mpc8544, mpc5200 or mpc5200b. The second one
13 should be "fsl-i2c".
12 - interrupts : <a b> where a is the interrupt number and b is a 14 - interrupts : <a b> where a is the interrupt number and b is a
13 field that represents an encoding of the sense and level 15 field that represents an encoding of the sense and level
14 information for the interrupt. This should be encoded based on 16 information for the interrupt. This should be encoded based on
@@ -16,17 +18,31 @@ Recommended properties :
16 controller you have. 18 controller you have.
17 - interrupt-parent : the phandle for the interrupt controller that 19 - interrupt-parent : the phandle for the interrupt controller that
18 services interrupts for this device. 20 services interrupts for this device.
19 - dfsrr : boolean; if defined, indicates that this I2C device has 21 - fsl,preserve-clocking : boolean; if defined, the clock settings
20 a digital filter sampling rate register 22 from the bootloader are preserved (not touched).
21 - fsl5200-clocking : boolean; if defined, indicated that this device 23 - clock-frequency : desired I2C bus clock frequency in Hz.
22 uses the FSL 5200 clocking mechanism. 24
23 25Examples :
24Example : 26
25 i2c@3000 { 27 i2c@3d00 {
26 interrupt-parent = <40000>; 28 #address-cells = <1>;
27 interrupts = <1b 3>; 29 #size-cells = <0>;
28 reg = <3000 18>; 30 compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
29 device_type = "i2c"; 31 cell-index = <0>;
30 compatible = "fsl-i2c"; 32 reg = <0x3d00 0x40>;
31 dfsrr; 33 interrupts = <2 15 0>;
34 interrupt-parent = <&mpc5200_pic>;
35 fsl,preserve-clocking;
32 }; 36 };
37
38 i2c@3100 {
39 #address-cells = <1>;
40 #size-cells = <0>;
41 cell-index = <1>;
42 compatible = "fsl,mpc8544-i2c", "fsl-i2c";
43 reg = <0x3100 0x100>;
44 interrupts = <43 2>;
45 interrupt-parent = <&mpic>;
46 clock-frequency = <400000>;
47 };
48
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt
index c5948f2f9a25..88b7433d2f11 100644
--- a/Documentation/sound/alsa/HD-Audio.txt
+++ b/Documentation/sound/alsa/HD-Audio.txt
@@ -169,7 +169,7 @@ PCI SSID look-up.
169What `model` option values are available depends on the codec chip. 169What `model` option values are available depends on the codec chip.
170Check your codec chip from the codec proc file (see "Codec Proc-File" 170Check your codec chip from the codec proc file (see "Codec Proc-File"
171section below). It will show the vendor/product name of your codec 171section below). It will show the vendor/product name of your codec
172chip. Then, see Documentation/sound/alsa/HD-Audio-Modelstxt file, 172chip. Then, see Documentation/sound/alsa/HD-Audio-Models.txt file,
173the section of HD-audio driver. You can find a list of codecs 173the section of HD-audio driver. You can find a list of codecs
174and `model` options belonging to each codec. For example, for Realtek 174and `model` options belonging to each codec. For example, for Realtek
175ALC262 codec chip, pass `model=ultra` for devices that are compatible 175ALC262 codec chip, pass `model=ultra` for devices that are compatible
@@ -177,7 +177,7 @@ with Samsung Q1 Ultra.
177 177
178Thus, the first thing you can do for any brand-new, unsupported and 178Thus, the first thing you can do for any brand-new, unsupported and
179non-working HD-audio hardware is to check HD-audio codec and several 179non-working HD-audio hardware is to check HD-audio codec and several
180different `model` option values. If you have a luck, some of them 180different `model` option values. If you have any luck, some of them
181might suit with your device well. 181might suit with your device well.
182 182
183Some codecs such as ALC880 have a special model option `model=test`. 183Some codecs such as ALC880 have a special model option `model=test`.
diff --git a/MAINTAINERS b/MAINTAINERS
index 29d74f47ba86..abbedb658c29 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3680,6 +3680,7 @@ L: microblaze-uclinux@itee.uq.edu.au
3680W: http://www.monstr.eu/fdt/ 3680W: http://www.monstr.eu/fdt/
3681T: git git://git.monstr.eu/linux-2.6-microblaze.git 3681T: git git://git.monstr.eu/linux-2.6-microblaze.git
3682S: Supported 3682S: Supported
3683F: arch/microblaze/
3683 3684
3684MICROTEK X6 SCANNER 3685MICROTEK X6 SCANNER
3685P: Oliver Neukum 3686P: Oliver Neukum
@@ -3916,19 +3917,12 @@ F: Documentation/blockdev/nbd.txt
3916F: drivers/block/nbd.c 3917F: drivers/block/nbd.c
3917F: include/linux/nbd.h 3918F: include/linux/nbd.h
3918 3919
3919NETWORK DEVICE DRIVERS
3920P: Jeff Garzik
3921M: jgarzik@pobox.com
3922L: netdev@vger.kernel.org
3923T: git git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/netdev-2.6.git
3924S: Maintained
3925F: drivers/net/
3926
3927NETWORKING [GENERAL] 3920NETWORKING [GENERAL]
3928P: Networking Team 3921P: David S. Miller
3929M: netdev@vger.kernel.org 3922M: davem@davemloft.net
3930L: netdev@vger.kernel.org 3923L: netdev@vger.kernel.org
3931W: http://linux-net.osdl.org/ 3924W: http://linux-net.osdl.org/
3925T: git kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6.git
3932S: Maintained 3926S: Maintained
3933F: net/ 3927F: net/
3934F: include/net/ 3928F: include/net/
@@ -5313,7 +5307,9 @@ L: linux-sh@vger.kernel.org
5313W: http://www.linux-sh.org 5307W: http://www.linux-sh.org
5314T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git 5308T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git
5315S: Supported 5309S: Supported
5310F: Documentation/sh/
5316F: arch/sh/ 5311F: arch/sh/
5312F: drivers/sh/
5317 5313
5318SUSPEND TO RAM 5314SUSPEND TO RAM
5319P: Len Brown 5315P: Len Brown
diff --git a/Makefile b/Makefile
index ad830bd45a4b..bfdef56add34 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 30 3SUBLEVEL = 30
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc2
5NAME = Temporary Tasmanian Devil 5NAME = Temporary Tasmanian Devil
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -169,7 +169,7 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
169 -e s/arm.*/arm/ -e s/sa110/arm/ \ 169 -e s/arm.*/arm/ -e s/sa110/arm/ \
170 -e s/s390x/s390/ -e s/parisc64/parisc/ \ 170 -e s/s390x/s390/ -e s/parisc64/parisc/ \
171 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ 171 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
172 -e s/sh.*/sh/ ) 172 -e s/sh[234].*/sh/ )
173 173
174# Cross compiling and selecting different set of gcc/bin-utils 174# Cross compiling and selecting different set of gcc/bin-utils
175# --------------------------------------------------------------------------- 175# ---------------------------------------------------------------------------
@@ -210,6 +210,11 @@ ifeq ($(ARCH),sparc64)
210 SRCARCH := sparc 210 SRCARCH := sparc
211endif 211endif
212 212
213# Additional ARCH settings for sh
214ifeq ($(ARCH),sh64)
215 SRCARCH := sh
216endif
217
213# Where to locate arch specific headers 218# Where to locate arch specific headers
214hdr-arch := $(SRCARCH) 219hdr-arch := $(SRCARCH)
215 220
diff --git a/arch/microblaze/include/asm/auxvec.h b/arch/microblaze/include/asm/auxvec.h
index e69de29bb2d1..8b137891791f 100644
--- a/arch/microblaze/include/asm/auxvec.h
+++ b/arch/microblaze/include/asm/auxvec.h
@@ -0,0 +1 @@
diff --git a/arch/microblaze/include/asm/cputable.h b/arch/microblaze/include/asm/cputable.h
index e69de29bb2d1..8b137891791f 100644
--- a/arch/microblaze/include/asm/cputable.h
+++ b/arch/microblaze/include/asm/cputable.h
@@ -0,0 +1 @@
diff --git a/arch/microblaze/include/asm/ftrace.h b/arch/microblaze/include/asm/ftrace.h
new file mode 100644
index 000000000000..8b137891791f
--- /dev/null
+++ b/arch/microblaze/include/asm/ftrace.h
@@ -0,0 +1 @@
diff --git a/arch/microblaze/include/asm/hw_irq.h b/arch/microblaze/include/asm/hw_irq.h
index e69de29bb2d1..8b137891791f 100644
--- a/arch/microblaze/include/asm/hw_irq.h
+++ b/arch/microblaze/include/asm/hw_irq.h
@@ -0,0 +1 @@
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index cfab0342588d..8b5853ee6b5c 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -12,7 +12,6 @@
12#include <asm/byteorder.h> 12#include <asm/byteorder.h>
13#include <asm/page.h> 13#include <asm/page.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <asm/page.h>
16 15
17#define IO_SPACE_LIMIT (0xFFFFFFFF) 16#define IO_SPACE_LIMIT (0xFFFFFFFF)
18 17
diff --git a/arch/microblaze/include/asm/socket.h b/arch/microblaze/include/asm/socket.h
index f919b6b540ac..825936860314 100644
--- a/arch/microblaze/include/asm/socket.h
+++ b/arch/microblaze/include/asm/socket.h
@@ -63,4 +63,7 @@
63 63
64#define SO_MARK 36 64#define SO_MARK 36
65 65
66#define SO_TIMESTAMPING 37
67#define SCM_TIMESTAMPING SO_TIMESTAMPING
68
66#endif /* _ASM_MICROBLAZE_SOCKET_H */ 69#endif /* _ASM_MICROBLAZE_SOCKET_H */
diff --git a/arch/microblaze/include/asm/user.h b/arch/microblaze/include/asm/user.h
index e69de29bb2d1..8b137891791f 100644
--- a/arch/microblaze/include/asm/user.h
+++ b/arch/microblaze/include/asm/user.h
@@ -0,0 +1 @@
diff --git a/arch/microblaze/include/asm/vga.h b/arch/microblaze/include/asm/vga.h
index e69de29bb2d1..8b137891791f 100644
--- a/arch/microblaze/include/asm/vga.h
+++ b/arch/microblaze/include/asm/vga.h
@@ -0,0 +1 @@
diff --git a/arch/microblaze/kernel/of_device.c b/arch/microblaze/kernel/of_device.c
index 717edf4ad0b4..9a0f7632c47c 100644
--- a/arch/microblaze/kernel/of_device.c
+++ b/arch/microblaze/kernel/of_device.c
@@ -13,7 +13,6 @@ void of_device_make_bus_id(struct of_device *dev)
13{ 13{
14 static atomic_t bus_no_reg_magic; 14 static atomic_t bus_no_reg_magic;
15 struct device_node *node = dev->node; 15 struct device_node *node = dev->node;
16 char *name = dev->dev.bus_id;
17 const u32 *reg; 16 const u32 *reg;
18 u64 addr; 17 u64 addr;
19 int magic; 18 int magic;
@@ -25,9 +24,8 @@ void of_device_make_bus_id(struct of_device *dev)
25 if (reg) { 24 if (reg) {
26 addr = of_translate_address(node, reg); 25 addr = of_translate_address(node, reg);
27 if (addr != OF_BAD_ADDR) { 26 if (addr != OF_BAD_ADDR) {
28 snprintf(name, BUS_ID_SIZE, 27 dev_set_name(&dev->dev, "%llx.%s",
29 "%llx.%s", (unsigned long long)addr, 28 (unsigned long long)addr, node->name);
30 node->name);
31 return; 29 return;
32 } 30 }
33 } 31 }
@@ -37,7 +35,7 @@ void of_device_make_bus_id(struct of_device *dev)
37 * counter (and pray...) 35 * counter (and pray...)
38 */ 36 */
39 magic = atomic_add_return(1, &bus_no_reg_magic); 37 magic = atomic_add_return(1, &bus_no_reg_magic);
40 snprintf(name, BUS_ID_SIZE, "%s.%d", node->name, magic - 1); 38 dev_set_name(&dev->dev, "%s.%d", node->name, magic - 1);
41} 39}
42EXPORT_SYMBOL(of_device_make_bus_id); 40EXPORT_SYMBOL(of_device_make_bus_id);
43 41
@@ -58,7 +56,7 @@ struct of_device *of_device_alloc(struct device_node *np,
58 dev->dev.archdata.of_node = np; 56 dev->dev.archdata.of_node = np;
59 57
60 if (bus_id) 58 if (bus_id)
61 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE); 59 dev_set_name(&dev->dev, bus_id);
62 else 60 else
63 of_device_make_bus_id(dev); 61 of_device_make_bus_id(dev);
64 62
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 60e9ed7d3132..436f26ccbfa9 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -115,8 +115,7 @@ void flush_thread(void)
115{ 115{
116} 116}
117 117
118/* FIXME - here will be a proposed change -> remove nr parameter */ 118int copy_thread(unsigned long clone_flags, unsigned long usp,
119int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
120 unsigned long unused, 119 unsigned long unused,
121 struct task_struct *p, struct pt_regs *regs) 120 struct task_struct *p, struct pt_regs *regs)
122{ 121{
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 475b1fac5cfd..34c48718061a 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -39,7 +39,6 @@
39#include <asm/system.h> 39#include <asm/system.h>
40#include <asm/mmu.h> 40#include <asm/mmu.h>
41#include <asm/pgtable.h> 41#include <asm/pgtable.h>
42#include <linux/pci.h>
43#include <asm/sections.h> 42#include <asm/sections.h>
44#include <asm/pci-bridge.h> 43#include <asm/pci-bridge.h>
45 44
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c
index 3171e39e3220..b86aa623e36d 100644
--- a/arch/microblaze/kernel/ptrace.c
+++ b/arch/microblaze/kernel/ptrace.c
@@ -32,7 +32,6 @@
32#include <linux/signal.h> 32#include <linux/signal.h>
33 33
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/ptrace.h>
36#include <asm/processor.h> 35#include <asm/processor.h>
37#include <linux/uaccess.h> 36#include <linux/uaccess.h>
38#include <asm/asm-offsets.h> 37#include <asm/asm-offsets.h>
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index ff347b98863a..3889cf45fa71 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -37,7 +37,6 @@
37#include <linux/uaccess.h> 37#include <linux/uaccess.h>
38#include <asm/pgtable.h> 38#include <asm/pgtable.h>
39#include <asm/pgalloc.h> 39#include <asm/pgalloc.h>
40#include <linux/signal.h>
41#include <linux/syscalls.h> 40#include <linux/syscalls.h>
42#include <asm/cacheflush.h> 41#include <asm/cacheflush.h>
43#include <asm/syscalls.h> 42#include <asm/syscalls.h>
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index d90b548fb1bb..ba0568c2cc1c 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -29,9 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/err.h> 30#include <linux/err.h>
31#include <linux/fs.h> 31#include <linux/fs.h>
32#include <linux/ipc.h>
33#include <linux/semaphore.h> 32#include <linux/semaphore.h>
34#include <linux/syscalls.h>
35#include <linux/uaccess.h> 33#include <linux/uaccess.h>
36#include <linux/unistd.h> 34#include <linux/unistd.h>
37 35
diff --git a/arch/mn10300/include/asm/bug.h b/arch/mn10300/include/asm/bug.h
index 4fcf3384e259..aa6a38886391 100644
--- a/arch/mn10300/include/asm/bug.h
+++ b/arch/mn10300/include/asm/bug.h
@@ -11,10 +11,12 @@
11#ifndef _ASM_BUG_H 11#ifndef _ASM_BUG_H
12#define _ASM_BUG_H 12#define _ASM_BUG_H
13 13
14#ifdef CONFIG_BUG
15
14/* 16/*
15 * Tell the user there is some problem. 17 * Tell the user there is some problem.
16 */ 18 */
17#define _debug_bug_trap() \ 19#define BUG() \
18do { \ 20do { \
19 asm volatile( \ 21 asm volatile( \
20 " syscall 15 \n" \ 22 " syscall 15 \n" \
@@ -25,11 +27,11 @@ do { \
25 : \ 27 : \
26 : "i"(__FILE__), "i"(__LINE__) \ 28 : "i"(__FILE__), "i"(__LINE__) \
27 ); \ 29 ); \
28} while (0) 30} while (1)
29
30#define BUG() _debug_bug_trap()
31 31
32#define HAVE_ARCH_BUG 32#define HAVE_ARCH_BUG
33#endif /* CONFIG_BUG */
34
33#include <asm-generic/bug.h> 35#include <asm-generic/bug.h>
34 36
35#endif /* _ASM_BUG_H */ 37#endif /* _ASM_BUG_H */
diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h
index 543a4f98695d..fef5b434dadc 100644
--- a/arch/mn10300/include/asm/unistd.h
+++ b/arch/mn10300/include/asm/unistd.h
@@ -344,6 +344,8 @@
344#define __NR_dup3 331 344#define __NR_dup3 331
345#define __NR_pipe2 332 345#define __NR_pipe2 332
346#define __NR_inotify_init1 333 346#define __NR_inotify_init1 333
347#define __NR_preadv 334
348#define __NR_pwritev 335
347 349
348#ifdef __KERNEL__ 350#ifdef __KERNEL__
349 351
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index 34ab5a293153..3dc3e462f92a 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -723,6 +723,8 @@ ENTRY(sys_call_table)
723 .long sys_dup3 723 .long sys_dup3
724 .long sys_pipe2 724 .long sys_pipe2
725 .long sys_inotify_init1 725 .long sys_inotify_init1
726 .long sys_preadv
727 .long sys_pwritev /* 335 */
726 728
727 729
728nr_syscalls=(.-sys_call_table)/4 730nr_syscalls=(.-sys_call_table)/4
diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c
index 71414e19fd16..79890edfd67a 100644
--- a/arch/mn10300/kernel/setup.c
+++ b/arch/mn10300/kernel/setup.c
@@ -136,10 +136,6 @@ void __init setup_arch(char **cmdline_p)
136 data_resource.start = virt_to_bus(&_etext); 136 data_resource.start = virt_to_bus(&_etext);
137 data_resource.end = virt_to_bus(&_edata)-1; 137 data_resource.end = virt_to_bus(&_edata)-1;
138 138
139#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
140#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
141#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
142
143 start_pfn = (CONFIG_KERNEL_RAM_BASE_ADDRESS >> PAGE_SHIFT); 139 start_pfn = (CONFIG_KERNEL_RAM_BASE_ADDRESS >> PAGE_SHIFT);
144 kstart_pfn = PFN_UP(__pa(&_text)); 140 kstart_pfn = PFN_UP(__pa(&_text));
145 free_pfn = PFN_UP(__pa(&_end)); 141 free_pfn = PFN_UP(__pa(&_end));
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 5b50e1ac6179..4c7804551362 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -462,7 +462,7 @@ config PPC_64K_PAGES
462 462
463config PPC_256K_PAGES 463config PPC_256K_PAGES
464 bool "256k page size" if 44x 464 bool "256k page size" if 44x
465 depends on !STDBINUTILS && (!SHMEM || BROKEN) 465 depends on !STDBINUTILS
466 help 466 help
467 Make the page size 256k. 467 Make the page size 256k.
468 468
diff --git a/arch/powerpc/boot/dts/tqm8540.dts b/arch/powerpc/boot/dts/tqm8540.dts
index 231bae756637..b6f1fc6eb960 100644
--- a/arch/powerpc/boot/dts/tqm8540.dts
+++ b/arch/powerpc/boot/dts/tqm8540.dts
@@ -84,9 +84,9 @@
84 interrupt-parent = <&mpic>; 84 interrupt-parent = <&mpic>;
85 dfsrr; 85 dfsrr;
86 86
87 dtt@50 { 87 dtt@48 {
88 compatible = "national,lm75"; 88 compatible = "national,lm75";
89 reg = <0x50>; 89 reg = <0x48>;
90 }; 90 };
91 91
92 rtc@68 { 92 rtc@68 {
diff --git a/arch/powerpc/boot/dts/tqm8541.dts b/arch/powerpc/boot/dts/tqm8541.dts
index 4356a1f08295..fa6a3d54a8a5 100644
--- a/arch/powerpc/boot/dts/tqm8541.dts
+++ b/arch/powerpc/boot/dts/tqm8541.dts
@@ -83,9 +83,9 @@
83 interrupt-parent = <&mpic>; 83 interrupt-parent = <&mpic>;
84 dfsrr; 84 dfsrr;
85 85
86 dtt@50 { 86 dtt@48 {
87 compatible = "national,lm75"; 87 compatible = "national,lm75";
88 reg = <0x50>; 88 reg = <0x48>;
89 }; 89 };
90 90
91 rtc@68 { 91 rtc@68 {
diff --git a/arch/powerpc/boot/dts/tqm8548-bigflash.dts b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
index 19aa72301c83..00f7ed7a2455 100644
--- a/arch/powerpc/boot/dts/tqm8548-bigflash.dts
+++ b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
@@ -85,9 +85,9 @@
85 interrupt-parent = <&mpic>; 85 interrupt-parent = <&mpic>;
86 dfsrr; 86 dfsrr;
87 87
88 dtt@50 { 88 dtt@48 {
89 compatible = "national,lm75"; 89 compatible = "national,lm75";
90 reg = <0x50>; 90 reg = <0x48>;
91 }; 91 };
92 92
93 rtc@68 { 93 rtc@68 {
@@ -247,7 +247,7 @@
247 interrupts = <31 2 32 2 33 2>; 247 interrupts = <31 2 32 2 33 2>;
248 interrupt-parent = <&mpic>; 248 interrupt-parent = <&mpic>;
249 tbi-handle = <&tbi2>; 249 tbi-handle = <&tbi2>;
250 phy-handle = <&phy3>; 250 phy-handle = <&phy4>;
251 251
252 mdio@520 { 252 mdio@520 {
253 #address-cells = <1>; 253 #address-cells = <1>;
@@ -275,7 +275,7 @@
275 interrupts = <37 2 38 2 39 2>; 275 interrupts = <37 2 38 2 39 2>;
276 interrupt-parent = <&mpic>; 276 interrupt-parent = <&mpic>;
277 tbi-handle = <&tbi3>; 277 tbi-handle = <&tbi3>;
278 phy-handle = <&phy4>; 278 phy-handle = <&phy5>;
279 279
280 mdio@520 { 280 mdio@520 {
281 #address-cells = <1>; 281 #address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/tqm8548.dts b/arch/powerpc/boot/dts/tqm8548.dts
index 49145a04fc6c..673e4a778ac8 100644
--- a/arch/powerpc/boot/dts/tqm8548.dts
+++ b/arch/powerpc/boot/dts/tqm8548.dts
@@ -85,9 +85,9 @@
85 interrupt-parent = <&mpic>; 85 interrupt-parent = <&mpic>;
86 dfsrr; 86 dfsrr;
87 87
88 dtt@50 { 88 dtt@48 {
89 compatible = "national,lm75"; 89 compatible = "national,lm75";
90 reg = <0x50>; 90 reg = <0x48>;
91 }; 91 };
92 92
93 rtc@68 { 93 rtc@68 {
@@ -247,7 +247,7 @@
247 interrupts = <31 2 32 2 33 2>; 247 interrupts = <31 2 32 2 33 2>;
248 interrupt-parent = <&mpic>; 248 interrupt-parent = <&mpic>;
249 tbi-handle = <&tbi2>; 249 tbi-handle = <&tbi2>;
250 phy-handle = <&phy3>; 250 phy-handle = <&phy4>;
251 251
252 mdio@520 { 252 mdio@520 {
253 #address-cells = <1>; 253 #address-cells = <1>;
@@ -275,7 +275,7 @@
275 interrupts = <37 2 38 2 39 2>; 275 interrupts = <37 2 38 2 39 2>;
276 interrupt-parent = <&mpic>; 276 interrupt-parent = <&mpic>;
277 tbi-handle = <&tbi3>; 277 tbi-handle = <&tbi3>;
278 phy-handle = <&phy4>; 278 phy-handle = <&phy5>;
279 279
280 mdio@520 { 280 mdio@520 {
281 #address-cells = <1>; 281 #address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/tqm8555.dts b/arch/powerpc/boot/dts/tqm8555.dts
index 06d366ebbda3..6a99f1eef7ad 100644
--- a/arch/powerpc/boot/dts/tqm8555.dts
+++ b/arch/powerpc/boot/dts/tqm8555.dts
@@ -83,9 +83,9 @@
83 interrupt-parent = <&mpic>; 83 interrupt-parent = <&mpic>;
84 dfsrr; 84 dfsrr;
85 85
86 dtt@50 { 86 dtt@48 {
87 compatible = "national,lm75"; 87 compatible = "national,lm75";
88 reg = <0x50>; 88 reg = <0x48>;
89 }; 89 };
90 90
91 rtc@68 { 91 rtc@68 {
diff --git a/arch/powerpc/boot/dts/tqm8560.dts b/arch/powerpc/boot/dts/tqm8560.dts
index feff915e0492..b6c2d71defd3 100644
--- a/arch/powerpc/boot/dts/tqm8560.dts
+++ b/arch/powerpc/boot/dts/tqm8560.dts
@@ -85,9 +85,9 @@
85 interrupt-parent = <&mpic>; 85 interrupt-parent = <&mpic>;
86 dfsrr; 86 dfsrr;
87 87
88 dtt@50 { 88 dtt@48 {
89 compatible = "national,lm75"; 89 compatible = "national,lm75";
90 reg = <0x50>; 90 reg = <0x48>;
91 }; 91 };
92 92
93 rtc@68 { 93 rtc@68 {
diff --git a/arch/powerpc/configs/85xx/tqm8548_defconfig b/arch/powerpc/configs/85xx/tqm8548_defconfig
index 0bc45975911a..43030fea2eee 100644
--- a/arch/powerpc/configs/85xx/tqm8548_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8548_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29-rc2 3# Linux kernel version: 2.6.29-rc7
4# Mon Jan 26 15:36:20 2009 4# Mon Mar 16 09:03:28 2009
5# 5#
6# CONFIG_PPC64 is not set 6# CONFIG_PPC64 is not set
7 7
@@ -22,6 +22,7 @@ CONFIG_FSL_EMB_PERFMON=y
22# CONFIG_PHYS_64BIT is not set 22# CONFIG_PHYS_64BIT is not set
23CONFIG_SPE=y 23CONFIG_SPE=y
24CONFIG_PPC_MMU_NOHASH=y 24CONFIG_PPC_MMU_NOHASH=y
25CONFIG_PPC_BOOK3E_MMU=y
25# CONFIG_PPC_MM_SLICES is not set 26# CONFIG_PPC_MM_SLICES is not set
26# CONFIG_SMP is not set 27# CONFIG_SMP is not set
27CONFIG_PPC32=y 28CONFIG_PPC32=y
@@ -75,6 +76,15 @@ CONFIG_SYSVIPC_SYSCTL=y
75# CONFIG_BSD_PROCESS_ACCT is not set 76# CONFIG_BSD_PROCESS_ACCT is not set
76# CONFIG_TASKSTATS is not set 77# CONFIG_TASKSTATS is not set
77# CONFIG_AUDIT is not set 78# CONFIG_AUDIT is not set
79
80#
81# RCU Subsystem
82#
83CONFIG_CLASSIC_RCU=y
84# CONFIG_TREE_RCU is not set
85# CONFIG_PREEMPT_RCU is not set
86# CONFIG_TREE_RCU_TRACE is not set
87# CONFIG_PREEMPT_RCU_TRACE is not set
78# CONFIG_IKCONFIG is not set 88# CONFIG_IKCONFIG is not set
79CONFIG_LOG_BUF_SHIFT=14 89CONFIG_LOG_BUF_SHIFT=14
80CONFIG_GROUP_SCHED=y 90CONFIG_GROUP_SCHED=y
@@ -152,11 +162,6 @@ CONFIG_DEFAULT_AS=y
152# CONFIG_DEFAULT_CFQ is not set 162# CONFIG_DEFAULT_CFQ is not set
153# CONFIG_DEFAULT_NOOP is not set 163# CONFIG_DEFAULT_NOOP is not set
154CONFIG_DEFAULT_IOSCHED="anticipatory" 164CONFIG_DEFAULT_IOSCHED="anticipatory"
155CONFIG_CLASSIC_RCU=y
156# CONFIG_TREE_RCU is not set
157# CONFIG_PREEMPT_RCU is not set
158# CONFIG_TREE_RCU_TRACE is not set
159# CONFIG_PREEMPT_RCU_TRACE is not set
160# CONFIG_FREEZER is not set 165# CONFIG_FREEZER is not set
161 166
162# 167#
@@ -202,7 +207,7 @@ CONFIG_MPIC=y
202# 207#
203# Kernel options 208# Kernel options
204# 209#
205# CONFIG_HIGHMEM is not set 210CONFIG_HIGHMEM=y
206CONFIG_TICK_ONESHOT=y 211CONFIG_TICK_ONESHOT=y
207CONFIG_NO_HZ=y 212CONFIG_NO_HZ=y
208CONFIG_HIGH_RES_TIMERS=y 213CONFIG_HIGH_RES_TIMERS=y
@@ -244,6 +249,7 @@ CONFIG_UNEVICTABLE_LRU=y
244CONFIG_PPC_4K_PAGES=y 249CONFIG_PPC_4K_PAGES=y
245# CONFIG_PPC_16K_PAGES is not set 250# CONFIG_PPC_16K_PAGES is not set
246# CONFIG_PPC_64K_PAGES is not set 251# CONFIG_PPC_64K_PAGES is not set
252# CONFIG_PPC_256K_PAGES is not set
247CONFIG_FORCE_MAX_ZONEORDER=11 253CONFIG_FORCE_MAX_ZONEORDER=11
248CONFIG_PROC_DEVICETREE=y 254CONFIG_PROC_DEVICETREE=y
249# CONFIG_CMDLINE_BOOL is not set 255# CONFIG_CMDLINE_BOOL is not set
@@ -259,6 +265,7 @@ CONFIG_ZONE_DMA=y
259CONFIG_PPC_INDIRECT_PCI=y 265CONFIG_PPC_INDIRECT_PCI=y
260CONFIG_FSL_SOC=y 266CONFIG_FSL_SOC=y
261CONFIG_FSL_PCI=y 267CONFIG_FSL_PCI=y
268CONFIG_FSL_LBC=y
262CONFIG_PPC_PCI_CHOICE=y 269CONFIG_PPC_PCI_CHOICE=y
263CONFIG_PCI=y 270CONFIG_PCI=y
264CONFIG_PCI_DOMAINS=y 271CONFIG_PCI_DOMAINS=y
@@ -284,10 +291,11 @@ CONFIG_ARCH_SUPPORTS_MSI=y
284# Default settings for advanced configuration options are used 291# Default settings for advanced configuration options are used
285# 292#
286CONFIG_LOWMEM_SIZE=0x30000000 293CONFIG_LOWMEM_SIZE=0x30000000
294CONFIG_LOWMEM_CAM_NUM=3
287CONFIG_PAGE_OFFSET=0xc0000000 295CONFIG_PAGE_OFFSET=0xc0000000
288CONFIG_KERNEL_START=0xc0000000 296CONFIG_KERNEL_START=0xc0000000
289CONFIG_PHYSICAL_START=0x00000000 297CONFIG_PHYSICAL_START=0x00000000
290CONFIG_PHYSICAL_ALIGN=0x10000000 298CONFIG_PHYSICAL_ALIGN=0x04000000
291CONFIG_TASK_SIZE=0xc0000000 299CONFIG_TASK_SIZE=0xc0000000
292CONFIG_NET=y 300CONFIG_NET=y
293 301
@@ -363,12 +371,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
363# CONFIG_BT is not set 371# CONFIG_BT is not set
364# CONFIG_AF_RXRPC is not set 372# CONFIG_AF_RXRPC is not set
365# CONFIG_PHONET is not set 373# CONFIG_PHONET is not set
366CONFIG_WIRELESS=y 374# CONFIG_WIRELESS is not set
367# CONFIG_CFG80211 is not set
368CONFIG_WIRELESS_OLD_REGULATORY=y
369# CONFIG_WIRELESS_EXT is not set
370# CONFIG_LIB80211 is not set
371# CONFIG_MAC80211 is not set
372# CONFIG_WIMAX is not set 375# CONFIG_WIMAX is not set
373# CONFIG_RFKILL is not set 376# CONFIG_RFKILL is not set
374# CONFIG_NET_9P is not set 377# CONFIG_NET_9P is not set
@@ -471,27 +474,18 @@ CONFIG_MTD_NAND_IDS=y
471# CONFIG_MTD_NAND_NANDSIM is not set 474# CONFIG_MTD_NAND_NANDSIM is not set
472# CONFIG_MTD_NAND_PLATFORM is not set 475# CONFIG_MTD_NAND_PLATFORM is not set
473# CONFIG_MTD_NAND_FSL_ELBC is not set 476# CONFIG_MTD_NAND_FSL_ELBC is not set
474# CONFIG_MTD_NAND_FSL_UPM is not set 477CONFIG_MTD_NAND_FSL_UPM=y
475# CONFIG_MTD_ONENAND is not set 478# CONFIG_MTD_ONENAND is not set
476 479
477# 480#
478# LPDDR flash memory drivers 481# LPDDR flash memory drivers
479# 482#
480# CONFIG_MTD_LPDDR is not set 483# CONFIG_MTD_LPDDR is not set
481# CONFIG_MTD_QINFO_PROBE is not set
482 484
483# 485#
484# UBI - Unsorted block images 486# UBI - Unsorted block images
485# 487#
486CONFIG_MTD_UBI=m 488# CONFIG_MTD_UBI is not set
487CONFIG_MTD_UBI_WL_THRESHOLD=4096
488CONFIG_MTD_UBI_BEB_RESERVE=1
489# CONFIG_MTD_UBI_GLUEBI is not set
490
491#
492# UBI debugging options
493#
494# CONFIG_MTD_UBI_DEBUG is not set
495CONFIG_OF_DEVICE=y 489CONFIG_OF_DEVICE=y
496CONFIG_OF_I2C=y 490CONFIG_OF_I2C=y
497# CONFIG_PARPORT is not set 491# CONFIG_PARPORT is not set
@@ -515,69 +509,21 @@ CONFIG_BLK_DEV_RAM_SIZE=32768
515# CONFIG_BLK_DEV_HD is not set 509# CONFIG_BLK_DEV_HD is not set
516CONFIG_MISC_DEVICES=y 510CONFIG_MISC_DEVICES=y
517# CONFIG_PHANTOM is not set 511# CONFIG_PHANTOM is not set
518# CONFIG_EEPROM_93CX6 is not set
519# CONFIG_SGI_IOC4 is not set 512# CONFIG_SGI_IOC4 is not set
520# CONFIG_TIFM_CORE is not set 513# CONFIG_TIFM_CORE is not set
521# CONFIG_ICS932S401 is not set 514# CONFIG_ICS932S401 is not set
522# CONFIG_ENCLOSURE_SERVICES is not set 515# CONFIG_ENCLOSURE_SERVICES is not set
523# CONFIG_HP_ILO is not set 516# CONFIG_HP_ILO is not set
524# CONFIG_C2PORT is not set 517# CONFIG_C2PORT is not set
518
519#
520# EEPROM support
521#
522# CONFIG_EEPROM_AT24 is not set
523# CONFIG_EEPROM_LEGACY is not set
524# CONFIG_EEPROM_93CX6 is not set
525CONFIG_HAVE_IDE=y 525CONFIG_HAVE_IDE=y
526CONFIG_IDE=y 526# CONFIG_IDE is not set
527
528#
529# Please see Documentation/ide/ide.txt for help/info on IDE drives
530#
531CONFIG_IDE_TIMINGS=y
532# CONFIG_BLK_DEV_IDE_SATA is not set
533CONFIG_IDE_GD=y
534CONFIG_IDE_GD_ATA=y
535# CONFIG_IDE_GD_ATAPI is not set
536# CONFIG_BLK_DEV_IDECD is not set
537# CONFIG_BLK_DEV_IDETAPE is not set
538# CONFIG_IDE_TASK_IOCTL is not set
539CONFIG_IDE_PROC_FS=y
540
541#
542# IDE chipset support/bugfixes
543#
544# CONFIG_BLK_DEV_PLATFORM is not set
545CONFIG_BLK_DEV_IDEDMA_SFF=y
546
547#
548# PCI IDE chipsets support
549#
550CONFIG_BLK_DEV_IDEPCI=y
551CONFIG_IDEPCI_PCIBUS_ORDER=y
552# CONFIG_BLK_DEV_OFFBOARD is not set
553CONFIG_BLK_DEV_GENERIC=y
554# CONFIG_BLK_DEV_OPTI621 is not set
555CONFIG_BLK_DEV_IDEDMA_PCI=y
556# CONFIG_BLK_DEV_AEC62XX is not set
557# CONFIG_BLK_DEV_ALI15X3 is not set
558# CONFIG_BLK_DEV_AMD74XX is not set
559# CONFIG_BLK_DEV_CMD64X is not set
560# CONFIG_BLK_DEV_TRIFLEX is not set
561# CONFIG_BLK_DEV_CS5520 is not set
562# CONFIG_BLK_DEV_CS5530 is not set
563# CONFIG_BLK_DEV_HPT366 is not set
564# CONFIG_BLK_DEV_JMICRON is not set
565# CONFIG_BLK_DEV_SC1200 is not set
566# CONFIG_BLK_DEV_PIIX is not set
567# CONFIG_BLK_DEV_IT8172 is not set
568# CONFIG_BLK_DEV_IT8213 is not set
569# CONFIG_BLK_DEV_IT821X is not set
570# CONFIG_BLK_DEV_NS87415 is not set
571# CONFIG_BLK_DEV_PDC202XX_OLD is not set
572# CONFIG_BLK_DEV_PDC202XX_NEW is not set
573# CONFIG_BLK_DEV_SVWKS is not set
574# CONFIG_BLK_DEV_SIIMAGE is not set
575# CONFIG_BLK_DEV_SL82C105 is not set
576# CONFIG_BLK_DEV_SLC90E66 is not set
577# CONFIG_BLK_DEV_TRM290 is not set
578CONFIG_BLK_DEV_VIA82CXXX=y
579# CONFIG_BLK_DEV_TC86C001 is not set
580CONFIG_BLK_DEV_IDEDMA=y
581 527
582# 528#
583# SCSI device support 529# SCSI device support
@@ -650,7 +596,7 @@ CONFIG_MII=y
650CONFIG_NETDEV_1000=y 596CONFIG_NETDEV_1000=y
651# CONFIG_ACENIC is not set 597# CONFIG_ACENIC is not set
652# CONFIG_DL2K is not set 598# CONFIG_DL2K is not set
653CONFIG_E1000=y 599# CONFIG_E1000 is not set
654# CONFIG_E1000E is not set 600# CONFIG_E1000E is not set
655# CONFIG_IP1000 is not set 601# CONFIG_IP1000 is not set
656# CONFIG_IGB is not set 602# CONFIG_IGB is not set
@@ -668,6 +614,7 @@ CONFIG_GIANFAR=y
668# CONFIG_QLA3XXX is not set 614# CONFIG_QLA3XXX is not set
669# CONFIG_ATL1 is not set 615# CONFIG_ATL1 is not set
670# CONFIG_ATL1E is not set 616# CONFIG_ATL1E is not set
617# CONFIG_ATL1C is not set
671# CONFIG_JME is not set 618# CONFIG_JME is not set
672CONFIG_NETDEV_10000=y 619CONFIG_NETDEV_10000=y
673# CONFIG_CHELSIO_T1 is not set 620# CONFIG_CHELSIO_T1 is not set
@@ -835,8 +782,6 @@ CONFIG_I2C_MPC=y
835# Miscellaneous I2C Chip support 782# Miscellaneous I2C Chip support
836# 783#
837# CONFIG_DS1682 is not set 784# CONFIG_DS1682 is not set
838# CONFIG_EEPROM_AT24 is not set
839# CONFIG_EEPROM_LEGACY is not set
840# CONFIG_SENSORS_PCF8574 is not set 785# CONFIG_SENSORS_PCF8574 is not set
841# CONFIG_PCF8575 is not set 786# CONFIG_PCF8575 is not set
842# CONFIG_SENSORS_PCA9539 is not set 787# CONFIG_SENSORS_PCA9539 is not set
@@ -975,26 +920,7 @@ CONFIG_HID=y
975# Special HID drivers 920# Special HID drivers
976# 921#
977CONFIG_HID_COMPAT=y 922CONFIG_HID_COMPAT=y
978CONFIG_USB_SUPPORT=y 923# CONFIG_USB_SUPPORT is not set
979CONFIG_USB_ARCH_HAS_HCD=y
980CONFIG_USB_ARCH_HAS_OHCI=y
981CONFIG_USB_ARCH_HAS_EHCI=y
982# CONFIG_USB is not set
983# CONFIG_USB_OTG_WHITELIST is not set
984# CONFIG_USB_OTG_BLACKLIST_HUB is not set
985
986#
987# Enable Host or Gadget support to see Inventra options
988#
989
990#
991# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
992#
993# CONFIG_USB_GADGET is not set
994
995#
996# OTG and related infrastructure
997#
998# CONFIG_UWB is not set 924# CONFIG_UWB is not set
999# CONFIG_MMC is not set 925# CONFIG_MMC is not set
1000# CONFIG_MEMSTICK is not set 926# CONFIG_MEMSTICK is not set
@@ -1064,16 +990,9 @@ CONFIG_RTC_DRV_DS1307=y
1064# 990#
1065# File systems 991# File systems
1066# 992#
1067CONFIG_EXT2_FS=y 993# CONFIG_EXT2_FS is not set
1068# CONFIG_EXT2_FS_XATTR is not set 994# CONFIG_EXT3_FS is not set
1069# CONFIG_EXT2_FS_XIP is not set
1070CONFIG_EXT3_FS=y
1071CONFIG_EXT3_FS_XATTR=y
1072# CONFIG_EXT3_FS_POSIX_ACL is not set
1073# CONFIG_EXT3_FS_SECURITY is not set
1074# CONFIG_EXT4_FS is not set 995# CONFIG_EXT4_FS is not set
1075CONFIG_JBD=y
1076CONFIG_FS_MBCACHE=y
1077# CONFIG_REISERFS_FS is not set 996# CONFIG_REISERFS_FS is not set
1078# CONFIG_JFS_FS is not set 997# CONFIG_JFS_FS is not set
1079# CONFIG_FS_POSIX_ACL is not set 998# CONFIG_FS_POSIX_ACL is not set
@@ -1122,8 +1041,17 @@ CONFIG_MISC_FILESYSTEMS=y
1122# CONFIG_BEFS_FS is not set 1041# CONFIG_BEFS_FS is not set
1123# CONFIG_BFS_FS is not set 1042# CONFIG_BFS_FS is not set
1124# CONFIG_EFS_FS is not set 1043# CONFIG_EFS_FS is not set
1125# CONFIG_JFFS2_FS is not set 1044CONFIG_JFFS2_FS=y
1126# CONFIG_UBIFS_FS is not set 1045CONFIG_JFFS2_FS_DEBUG=0
1046CONFIG_JFFS2_FS_WRITEBUFFER=y
1047# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
1048# CONFIG_JFFS2_SUMMARY is not set
1049# CONFIG_JFFS2_FS_XATTR is not set
1050# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
1051CONFIG_JFFS2_ZLIB=y
1052# CONFIG_JFFS2_LZO is not set
1053CONFIG_JFFS2_RTIME=y
1054# CONFIG_JFFS2_RUBIN is not set
1127# CONFIG_CRAMFS is not set 1055# CONFIG_CRAMFS is not set
1128# CONFIG_SQUASHFS is not set 1056# CONFIG_SQUASHFS is not set
1129# CONFIG_VXFS_FS is not set 1057# CONFIG_VXFS_FS is not set
@@ -1184,6 +1112,8 @@ CONFIG_GENERIC_FIND_LAST_BIT=y
1184CONFIG_CRC32=y 1112CONFIG_CRC32=y
1185# CONFIG_CRC7 is not set 1113# CONFIG_CRC7 is not set
1186# CONFIG_LIBCRC32C is not set 1114# CONFIG_LIBCRC32C is not set
1115CONFIG_ZLIB_INFLATE=y
1116CONFIG_ZLIB_DEFLATE=y
1187CONFIG_PLIST=y 1117CONFIG_PLIST=y
1188CONFIG_HAS_IOMEM=y 1118CONFIG_HAS_IOMEM=y
1189CONFIG_HAS_IOPORT=y 1119CONFIG_HAS_IOPORT=y
@@ -1219,6 +1149,7 @@ CONFIG_DEBUG_MUTEXES=y
1219# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 1149# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
1220# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 1150# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
1221# CONFIG_DEBUG_KOBJECT is not set 1151# CONFIG_DEBUG_KOBJECT is not set
1152# CONFIG_DEBUG_HIGHMEM is not set
1222# CONFIG_DEBUG_BUGVERBOSE is not set 1153# CONFIG_DEBUG_BUGVERBOSE is not set
1223# CONFIG_DEBUG_INFO is not set 1154# CONFIG_DEBUG_INFO is not set
1224# CONFIG_DEBUG_VM is not set 1155# CONFIG_DEBUG_VM is not set
@@ -1236,6 +1167,7 @@ CONFIG_DEBUG_MUTEXES=y
1236# CONFIG_LATENCYTOP is not set 1167# CONFIG_LATENCYTOP is not set
1237CONFIG_SYSCTL_SYSCALL_CHECK=y 1168CONFIG_SYSCTL_SYSCALL_CHECK=y
1238CONFIG_HAVE_FUNCTION_TRACER=y 1169CONFIG_HAVE_FUNCTION_TRACER=y
1170CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1239CONFIG_HAVE_DYNAMIC_FTRACE=y 1171CONFIG_HAVE_DYNAMIC_FTRACE=y
1240CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y 1172CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
1241 1173
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 6d406c5c5de4..9696cc36d2dc 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -27,7 +27,7 @@
27 PPC_LONG "1b,4b,2b,4b\n" \ 27 PPC_LONG "1b,4b,2b,4b\n" \
28 ".previous" \ 28 ".previous" \
29 : "=&r" (oldval), "=&r" (ret) \ 29 : "=&r" (oldval), "=&r" (ret) \
30 : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \ 30 : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
31 : "cr0", "memory") 31 : "cr0", "memory")
32 32
33static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) 33static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
@@ -47,19 +47,19 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
47 47
48 switch (op) { 48 switch (op) {
49 case FUTEX_OP_SET: 49 case FUTEX_OP_SET:
50 __futex_atomic_op("", ret, oldval, uaddr, oparg); 50 __futex_atomic_op("mr %1,%4\n", ret, oldval, uaddr, oparg);
51 break; 51 break;
52 case FUTEX_OP_ADD: 52 case FUTEX_OP_ADD:
53 __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg); 53 __futex_atomic_op("add %1,%0,%4\n", ret, oldval, uaddr, oparg);
54 break; 54 break;
55 case FUTEX_OP_OR: 55 case FUTEX_OP_OR:
56 __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg); 56 __futex_atomic_op("or %1,%0,%4\n", ret, oldval, uaddr, oparg);
57 break; 57 break;
58 case FUTEX_OP_ANDN: 58 case FUTEX_OP_ANDN:
59 __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg); 59 __futex_atomic_op("andc %1,%0,%4\n", ret, oldval, uaddr, oparg);
60 break; 60 break;
61 case FUTEX_OP_XOR: 61 case FUTEX_OP_XOR:
62 __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg); 62 __futex_atomic_op("xor %1,%0,%4\n", ret, oldval, uaddr, oparg);
63 break; 63 break;
64 default: 64 default:
65 ret = -ENOSYS; 65 ret = -ENOSYS;
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index cbf154387091..86d2366ab6a1 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -52,6 +52,12 @@
52 */ 52 */
53#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000) 53#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000)
54 54
55/* This indicates that the processor uses the wrong opcode for tlbilx
56 * instructions. During the ISA 2.06 development the opcode for tlbilx
57 * changed and some early implementations used to old opcode
58 */
59#define MMU_FTR_TLBILX_EARLY_OPCODE ASM_CONST(0x00400000)
60
55#ifndef __ASSEMBLY__ 61#ifndef __ASSEMBLY__
56#include <asm/cputable.h> 62#include <asm/cputable.h>
57 63
diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h
index 414c50e2e881..94942d60ddfd 100644
--- a/arch/powerpc/include/asm/parport.h
+++ b/arch/powerpc/include/asm/parport.h
@@ -29,7 +29,7 @@ static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
29 prop = of_get_property(np, "interrupts", NULL); 29 prop = of_get_property(np, "interrupts", NULL);
30 if (!prop) 30 if (!prop)
31 continue; 31 continue;
32 if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL) != NULL) 32 if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL, 0) != NULL)
33 count++; 33 count++;
34 } 34 }
35 return count; 35 return count;
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index f4a4db8d5555..ef4da37f3c10 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -43,7 +43,8 @@
43 43
44#define PPC_INST_STSWI 0x7c0005aa 44#define PPC_INST_STSWI 0x7c0005aa
45#define PPC_INST_STSWX 0x7c00052a 45#define PPC_INST_STSWX 0x7c00052a
46#define PPC_INST_TLBILX 0x7c000626 46#define PPC_INST_TLBILX 0x7c000024
47#define PPC_INST_TLBILX_EARLY 0x7c000626
47#define PPC_INST_WAIT 0x7c00007c 48#define PPC_INST_WAIT 0x7c00007c
48 49
49/* macros to insert fields into opcodes */ 50/* macros to insert fields into opcodes */
@@ -63,10 +64,18 @@
63#define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI) 64#define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI)
64#define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI) 65#define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI)
65#define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_INST_TLBILX | \ 66#define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_INST_TLBILX | \
66 __PPC_T_TLB(t) | __PPC_RA(a) | __PPC_RB(b)) 67 __PPC_T_TLB(t) | \
68 __PPC_RA(a) | __PPC_RB(b))
67#define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b) 69#define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b)
68#define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b) 70#define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b)
69#define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b) 71#define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b)
72
73#define PPC_TLBILX_EARLY(t, a, b) stringify_in_c(.long PPC_INST_TLBILX_EARLY | \
74 __PPC_T_TLB(t) | \
75 __PPC_RA(a) | __PPC_RB(b))
76#define PPC_TLBILX_ALL_EARLY(a, b) PPC_TLBILX_EARLY(0, a, b)
77#define PPC_TLBILX_PID_EARLY(a, b) PPC_TLBILX_EARLY(1, a, b)
78#define PPC_TLBILX_VA_EARLY(a, b) PPC_TLBILX_EARLY(3, a, b)
70#define PPC_WAIT(w) stringify_in_c(.long PPC_INST_WAIT | \ 79#define PPC_WAIT(w) stringify_in_c(.long PPC_INST_WAIT | \
71 __PPC_WC(w)) 80 __PPC_WC(w))
72 81
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index cd1b687544f3..57db50f40289 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1766,7 +1766,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1766 .cpu_features = CPU_FTRS_E500MC, 1766 .cpu_features = CPU_FTRS_E500MC,
1767 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1767 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1768 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | 1768 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
1769 MMU_FTR_USE_TLBILX, 1769 MMU_FTR_USE_TLBILX | MMU_FTR_TLBILX_EARLY_OPCODE,
1770 .icache_bsize = 64, 1770 .icache_bsize = 64,
1771 .dcache_bsize = 64, 1771 .dcache_bsize = 64,
1772 .num_pmcs = 4, 1772 .num_pmcs = 4,
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 7af72970faed..ad2eb4d34dd4 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -125,7 +125,6 @@ static void do_flush_tlb_page_ipi(void *param)
125 125
126void flush_tlb_mm(struct mm_struct *mm) 126void flush_tlb_mm(struct mm_struct *mm)
127{ 127{
128 cpumask_t cpu_mask;
129 unsigned int pid; 128 unsigned int pid;
130 129
131 preempt_disable(); 130 preempt_disable();
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index 788b87c36f77..45fed3698349 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -138,7 +138,11 @@ BEGIN_MMU_FTR_SECTION
138 andi. r3,r3,MMUCSR0_TLBFI@l 138 andi. r3,r3,MMUCSR0_TLBFI@l
139 bne 1b 139 bne 1b
140MMU_FTR_SECTION_ELSE 140MMU_FTR_SECTION_ELSE
141 PPC_TLBILX_ALL(0,0) 141 BEGIN_MMU_FTR_SECTION_NESTED(96)
142 PPC_TLBILX_ALL(0,r3)
143 MMU_FTR_SECTION_ELSE_NESTED(96)
144 PPC_TLBILX_ALL_EARLY(0,r3)
145 ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_TLBILX_EARLY_OPCODE, 96)
142ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX) 146ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
143 msync 147 msync
144 isync 148 isync
@@ -151,7 +155,11 @@ BEGIN_MMU_FTR_SECTION
151 wrteei 0 155 wrteei 0
152 mfspr r4,SPRN_MAS6 /* save MAS6 */ 156 mfspr r4,SPRN_MAS6 /* save MAS6 */
153 mtspr SPRN_MAS6,r3 157 mtspr SPRN_MAS6,r3
158 BEGIN_MMU_FTR_SECTION_NESTED(96)
154 PPC_TLBILX_PID(0,0) 159 PPC_TLBILX_PID(0,0)
160 MMU_FTR_SECTION_ELSE_NESTED(96)
161 PPC_TLBILX_PID_EARLY(0,0)
162 ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_TLBILX_EARLY_OPCODE, 96)
155 mtspr SPRN_MAS6,r4 /* restore MAS6 */ 163 mtspr SPRN_MAS6,r4 /* restore MAS6 */
156 wrtee r10 164 wrtee r10
157MMU_FTR_SECTION_ELSE 165MMU_FTR_SECTION_ELSE
@@ -185,7 +193,11 @@ BEGIN_MMU_FTR_SECTION
185 mtspr SPRN_MAS1,r4 193 mtspr SPRN_MAS1,r4
186 tlbwe 194 tlbwe
187MMU_FTR_SECTION_ELSE 195MMU_FTR_SECTION_ELSE
196 BEGIN_MMU_FTR_SECTION_NESTED(96)
188 PPC_TLBILX_VA(0,r3) 197 PPC_TLBILX_VA(0,r3)
198 MMU_FTR_SECTION_ELSE_NESTED(96)
199 PPC_TLBILX_VA_EARLY(0,r3)
200 ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_TLBILX_EARLY_OPCODE, 96)
189ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX) 201ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
190 msync 202 msync
191 isync 203 isync
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index fafcaa0e81ef..ab69925d579b 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -25,6 +25,7 @@
25#include <asm/smp.h> 25#include <asm/smp.h>
26#include <asm/system.h> 26#include <asm/system.h>
27#include <asm/uaccess.h> 27#include <asm/uaccess.h>
28#include <asm/firmware.h>
28 29
29#include "plpar_wrappers.h" 30#include "plpar_wrappers.h"
30 31
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 380420f8c400..9a2a6e32f00f 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -182,6 +182,8 @@ static void eeh_report_reset(struct pci_dev *dev, void *userdata)
182 if (!driver) 182 if (!driver)
183 return; 183 return;
184 184
185 dev->error_state = pci_channel_io_normal;
186
185 eeh_enable_irq(dev); 187 eeh_enable_irq(dev);
186 188
187 if (!driver->err_handler || 189 if (!driver->err_handler ||
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 27b70d8a359c..aeb3cff95f63 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -176,7 +176,7 @@ static void __appldata_mod_vtimer_wrap(void *p) {
176 struct vtimer_list *timer; 176 struct vtimer_list *timer;
177 u64 expires; 177 u64 expires;
178 } *args = p; 178 } *args = p;
179 mod_virt_timer(args->timer, args->expires); 179 mod_virt_timer_periodic(args->timer, args->expires);
180} 180}
181 181
182#define APPLDATA_ADD_TIMER 0 182#define APPLDATA_ADD_TIMER 0
diff --git a/arch/s390/include/asm/cpuid.h b/arch/s390/include/asm/cpuid.h
new file mode 100644
index 000000000000..07836a2e5222
--- /dev/null
+++ b/arch/s390/include/asm/cpuid.h
@@ -0,0 +1,25 @@
1/*
2 * Copyright IBM Corp. 2000,2009
3 * Author(s): Hartmut Penner <hp@de.ibm.com>,
4 * Martin Schwidefsky <schwidefsky@de.ibm.com>
5 * Christian Ehrhardt <ehrhardt@de.ibm.com>
6 */
7
8#ifndef _ASM_S390_CPUID_H_
9#define _ASM_S390_CPUID_H_
10
11/*
12 * CPU type and hardware bug flags. Kept separately for each CPU.
13 * Members of this structure are referenced in head.S, so think twice
14 * before touching them. [mj]
15 */
16
17typedef struct
18{
19 unsigned int version : 8;
20 unsigned int ident : 24;
21 unsigned int machine : 16;
22 unsigned int unused : 16;
23} __attribute__ ((packed)) cpuid_t;
24
25#endif /* _ASM_S390_CPUID_H_ */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index c6e674f5fca9..54ea39f96ecd 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -15,6 +15,7 @@
15#define ASM_KVM_HOST_H 15#define ASM_KVM_HOST_H
16#include <linux/kvm_host.h> 16#include <linux/kvm_host.h>
17#include <asm/debug.h> 17#include <asm/debug.h>
18#include <asm/cpuid.h>
18 19
19#define KVM_MAX_VCPUS 64 20#define KVM_MAX_VCPUS 64
20#define KVM_MEMORY_SLOTS 32 21#define KVM_MEMORY_SLOTS 32
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index b349f1c7fdfa..3aeca492b147 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -66,6 +66,7 @@
66#define __LC_USER_EXEC_ASCE 0x02ac 66#define __LC_USER_EXEC_ASCE 0x02ac
67#define __LC_CPUID 0x02b0 67#define __LC_CPUID 0x02b0
68#define __LC_INT_CLOCK 0x02c8 68#define __LC_INT_CLOCK 0x02c8
69#define __LC_MACHINE_FLAGS 0x02d8
69#define __LC_IRB 0x0300 70#define __LC_IRB 0x0300
70#define __LC_PFAULT_INTPARM 0x0080 71#define __LC_PFAULT_INTPARM 0x0080
71#define __LC_CPU_TIMER_SAVE_AREA 0x00d8 72#define __LC_CPU_TIMER_SAVE_AREA 0x00d8
@@ -110,6 +111,7 @@
110#define __LC_CPUID 0x0320 111#define __LC_CPUID 0x0320
111#define __LC_INT_CLOCK 0x0340 112#define __LC_INT_CLOCK 0x0340
112#define __LC_VDSO_PER_CPU 0x0350 113#define __LC_VDSO_PER_CPU 0x0350
114#define __LC_MACHINE_FLAGS 0x0358
113#define __LC_IRB 0x0380 115#define __LC_IRB 0x0380
114#define __LC_PASTE 0x03c0 116#define __LC_PASTE 0x03c0
115#define __LC_PFAULT_INTPARM 0x11b8 117#define __LC_PFAULT_INTPARM 0x11b8
@@ -127,9 +129,9 @@
127 129
128#ifndef __ASSEMBLY__ 130#ifndef __ASSEMBLY__
129 131
130#include <asm/processor.h> 132#include <asm/cpuid.h>
133#include <asm/ptrace.h>
131#include <linux/types.h> 134#include <linux/types.h>
132#include <asm/sigp.h>
133 135
134void restart_int_handler(void); 136void restart_int_handler(void);
135void ext_int_handler(void); 137void ext_int_handler(void);
@@ -277,7 +279,8 @@ struct _lowcore
277 __u32 ext_call_fast; /* 0x02c4 */ 279 __u32 ext_call_fast; /* 0x02c4 */
278 __u64 int_clock; /* 0x02c8 */ 280 __u64 int_clock; /* 0x02c8 */
279 __u64 clock_comparator; /* 0x02d0 */ 281 __u64 clock_comparator; /* 0x02d0 */
280 __u8 pad_0x02d8[0x0300-0x02d8]; /* 0x02d8 */ 282 __u32 machine_flags; /* 0x02d8 */
283 __u8 pad_0x02dc[0x0300-0x02dc]; /* 0x02dc */
281 284
282 /* Interrupt response block */ 285 /* Interrupt response block */
283 __u8 irb[64]; /* 0x0300 */ 286 __u8 irb[64]; /* 0x0300 */
@@ -381,7 +384,8 @@ struct _lowcore
381 __u64 int_clock; /* 0x0340 */ 384 __u64 int_clock; /* 0x0340 */
382 __u64 clock_comparator; /* 0x0348 */ 385 __u64 clock_comparator; /* 0x0348 */
383 __u64 vdso_per_cpu_data; /* 0x0350 */ 386 __u64 vdso_per_cpu_data; /* 0x0350 */
384 __u8 pad_0x0358[0x0380-0x0358]; /* 0x0358 */ 387 __u64 machine_flags; /* 0x0358 */
388 __u8 pad_0x0360[0x0380-0x0360]; /* 0x0360 */
385 389
386 /* Interrupt response block. */ 390 /* Interrupt response block. */
387 __u8 irb[64]; /* 0x0380 */ 391 __u8 irb[64]; /* 0x0380 */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 61862b3ac794..c139fa7b8e89 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -14,7 +14,10 @@
14#define __ASM_S390_PROCESSOR_H 14#define __ASM_S390_PROCESSOR_H
15 15
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <asm/cpuid.h>
18#include <asm/page.h>
17#include <asm/ptrace.h> 19#include <asm/ptrace.h>
20#include <asm/setup.h>
18 21
19#ifdef __KERNEL__ 22#ifdef __KERNEL__
20/* 23/*
@@ -23,20 +26,6 @@
23 */ 26 */
24#define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; }) 27#define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; })
25 28
26/*
27 * CPU type and hardware bug flags. Kept separately for each CPU.
28 * Members of this structure are referenced in head.S, so think twice
29 * before touching them. [mj]
30 */
31
32typedef struct
33{
34 unsigned int version : 8;
35 unsigned int ident : 24;
36 unsigned int machine : 16;
37 unsigned int unused : 16;
38} __attribute__ ((packed)) cpuid_t;
39
40static inline void get_cpu_id(cpuid_t *ptr) 29static inline void get_cpu_id(cpuid_t *ptr)
41{ 30{
42 asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); 31 asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr));
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index f1b051630c50..539263fc9ab9 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -313,8 +313,6 @@ typedef struct
313 313
314 314
315#ifdef __KERNEL__ 315#ifdef __KERNEL__
316#include <asm/setup.h>
317#include <asm/page.h>
318 316
319/* 317/*
320 * The pt_regs struct defines the way the registers are stored on 318 * The pt_regs struct defines the way the registers are stored on
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index e8bd6ac22c99..38b0fc221ed7 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -14,6 +14,7 @@
14 14
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16 16
17#include <asm/lowcore.h>
17#include <asm/types.h> 18#include <asm/types.h>
18 19
19#define PARMAREA 0x10400 20#define PARMAREA 0x10400
@@ -63,7 +64,6 @@ extern unsigned int s390_noexec;
63/* 64/*
64 * Machine features detected in head.S 65 * Machine features detected in head.S
65 */ 66 */
66extern unsigned long machine_flags;
67 67
68#define MACHINE_FLAG_VM (1UL << 0) 68#define MACHINE_FLAG_VM (1UL << 0)
69#define MACHINE_FLAG_IEEE (1UL << 1) 69#define MACHINE_FLAG_IEEE (1UL << 1)
@@ -77,28 +77,28 @@ extern unsigned long machine_flags;
77#define MACHINE_FLAG_HPAGE (1UL << 10) 77#define MACHINE_FLAG_HPAGE (1UL << 10)
78#define MACHINE_FLAG_PFMF (1UL << 11) 78#define MACHINE_FLAG_PFMF (1UL << 11)
79 79
80#define MACHINE_IS_VM (machine_flags & MACHINE_FLAG_VM) 80#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
81#define MACHINE_IS_KVM (machine_flags & MACHINE_FLAG_KVM) 81#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
82#define MACHINE_HAS_DIAG9C (machine_flags & MACHINE_FLAG_DIAG9C) 82#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
83 83
84#ifndef __s390x__ 84#ifndef __s390x__
85#define MACHINE_HAS_IEEE (machine_flags & MACHINE_FLAG_IEEE) 85#define MACHINE_HAS_IEEE (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE)
86#define MACHINE_HAS_CSP (machine_flags & MACHINE_FLAG_CSP) 86#define MACHINE_HAS_CSP (S390_lowcore.machine_flags & MACHINE_FLAG_CSP)
87#define MACHINE_HAS_IDTE (0) 87#define MACHINE_HAS_IDTE (0)
88#define MACHINE_HAS_DIAG44 (1) 88#define MACHINE_HAS_DIAG44 (1)
89#define MACHINE_HAS_MVPG (machine_flags & MACHINE_FLAG_MVPG) 89#define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG)
90#define MACHINE_HAS_MVCOS (0) 90#define MACHINE_HAS_MVCOS (0)
91#define MACHINE_HAS_HPAGE (0) 91#define MACHINE_HAS_HPAGE (0)
92#define MACHINE_HAS_PFMF (0) 92#define MACHINE_HAS_PFMF (0)
93#else /* __s390x__ */ 93#else /* __s390x__ */
94#define MACHINE_HAS_IEEE (1) 94#define MACHINE_HAS_IEEE (1)
95#define MACHINE_HAS_CSP (1) 95#define MACHINE_HAS_CSP (1)
96#define MACHINE_HAS_IDTE (machine_flags & MACHINE_FLAG_IDTE) 96#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
97#define MACHINE_HAS_DIAG44 (machine_flags & MACHINE_FLAG_DIAG44) 97#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
98#define MACHINE_HAS_MVPG (1) 98#define MACHINE_HAS_MVPG (1)
99#define MACHINE_HAS_MVCOS (machine_flags & MACHINE_FLAG_MVCOS) 99#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS)
100#define MACHINE_HAS_HPAGE (machine_flags & MACHINE_FLAG_HPAGE) 100#define MACHINE_HAS_HPAGE (S390_lowcore.machine_flags & MACHINE_FLAG_HPAGE)
101#define MACHINE_HAS_PFMF (machine_flags & MACHINE_FLAG_PFMF) 101#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
102#endif /* __s390x__ */ 102#endif /* __s390x__ */
103 103
104#define ZFCPDUMP_HSA_SIZE (32UL<<20) 104#define ZFCPDUMP_HSA_SIZE (32UL<<20)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index c544aa524535..461f2abd2e6f 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -31,8 +31,9 @@
31#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER) 31#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
32 32
33#ifndef __ASSEMBLY__ 33#ifndef __ASSEMBLY__
34#include <asm/processor.h>
35#include <asm/lowcore.h> 34#include <asm/lowcore.h>
35#include <asm/page.h>
36#include <asm/processor.h>
36 37
37/* 38/*
38 * low level task data that entry.S needs immediate access to 39 * low level task data that entry.S needs immediate access to
diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h
index e4bcab739c19..814243cafdfe 100644
--- a/arch/s390/include/asm/timer.h
+++ b/arch/s390/include/asm/timer.h
@@ -41,6 +41,7 @@ extern void init_virt_timer(struct vtimer_list *timer);
41extern void add_virt_timer(void *new); 41extern void add_virt_timer(void *new);
42extern void add_virt_timer_periodic(void *new); 42extern void add_virt_timer_periodic(void *new);
43extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires); 43extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires);
44extern int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires);
44extern int del_virt_timer(struct vtimer_list *timer); 45extern int del_virt_timer(struct vtimer_list *timer);
45 46
46extern void init_cpu_vtimer(void); 47extern void init_cpu_vtimer(void);
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index d744c3d62de5..cc21e3e20fd7 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -11,6 +11,9 @@
11#ifndef _ASM_S390_TIMEX_H 11#ifndef _ASM_S390_TIMEX_H
12#define _ASM_S390_TIMEX_H 12#define _ASM_S390_TIMEX_H
13 13
14/* The value of the TOD clock for 1.1.1970. */
15#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
16
14/* Inline functions for clock register access. */ 17/* Inline functions for clock register access. */
15static inline int set_clock(__u64 time) 18static inline int set_clock(__u64 time)
16{ 19{
@@ -85,4 +88,6 @@ int get_sync_clock(unsigned long long *clock);
85void init_cpu_timer(void); 88void init_cpu_timer(void);
86unsigned long long monotonic_clock(void); 89unsigned long long monotonic_clock(void);
87 90
91extern u64 sched_clock_base_cc;
92
88#endif 93#endif
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index c8ad350d1444..f0f19e6ace6c 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -265,7 +265,9 @@
265#define __NR_pipe2 325 265#define __NR_pipe2 325
266#define __NR_dup3 326 266#define __NR_dup3 326
267#define __NR_epoll_create1 327 267#define __NR_epoll_create1 327
268#define NR_syscalls 328 268#define __NR_preadv 328
269#define __NR_pwritev 329
270#define NR_syscalls 330
269 271
270/* 272/*
271 * There are some system calls that are not present on 64 bit, some 273 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 67a60016babb..fa9905ce7d0b 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -27,6 +27,8 @@ int main(void)
27 DEFINE(__TI_flags, offsetof(struct thread_info, flags)); 27 DEFINE(__TI_flags, offsetof(struct thread_info, flags));
28 DEFINE(__TI_cpu, offsetof(struct thread_info, cpu)); 28 DEFINE(__TI_cpu, offsetof(struct thread_info, cpu));
29 DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); 29 DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
30 DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));
31 DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer));
30 BLANK(); 32 BLANK();
31 DEFINE(__PT_ARGS, offsetof(struct pt_regs, args)); 33 DEFINE(__PT_ARGS, offsetof(struct pt_regs, args));
32 DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); 34 DEFINE(__PT_PSW, offsetof(struct pt_regs, psw));
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 87cf5a79a351..fb38af6316bb 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1805,3 +1805,21 @@ compat_sys_keyctl_wrapper:
1805 llgfr %r5,%r5 # u32 1805 llgfr %r5,%r5 # u32
1806 llgfr %r6,%r6 # u32 1806 llgfr %r6,%r6 # u32
1807 jg compat_sys_keyctl # branch to system call 1807 jg compat_sys_keyctl # branch to system call
1808
1809 .globl compat_sys_preadv_wrapper
1810compat_sys_preadv_wrapper:
1811 llgfr %r2,%r2 # unsigned long
1812 llgtr %r3,%r3 # compat_iovec *
1813 llgfr %r4,%r4 # unsigned long
1814 llgfr %r5,%r5 # u32
1815 llgfr %r6,%r6 # u32
1816 jg compat_sys_preadv # branch to system call
1817
1818 .globl compat_sys_pwritev_wrapper
1819compat_sys_pwritev_wrapper:
1820 llgfr %r2,%r2 # unsigned long
1821 llgtr %r3,%r3 # compat_iovec *
1822 llgfr %r4,%r4 # unsigned long
1823 llgfr %r5,%r5 # u32
1824 llgfr %r6,%r6 # u32
1825 jg compat_sys_pwritev # branch to system call
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 4d221c81c849..cf09948faad6 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -34,8 +34,25 @@
34 34
35char kernel_nss_name[NSS_NAME_SIZE + 1]; 35char kernel_nss_name[NSS_NAME_SIZE + 1];
36 36
37static unsigned long machine_flags;
38
37static void __init setup_boot_command_line(void); 39static void __init setup_boot_command_line(void);
38 40
41/*
42 * Get the TOD clock running.
43 */
44static void __init reset_tod_clock(void)
45{
46 u64 time;
47
48 if (store_clock(&time) == 0)
49 return;
50 /* TOD clock not running. Set the clock to Unix Epoch. */
51 if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
52 disabled_wait(0);
53
54 sched_clock_base_cc = TOD_UNIX_EPOCH;
55}
39 56
40#ifdef CONFIG_SHARED_KERNEL 57#ifdef CONFIG_SHARED_KERNEL
41int __init savesys_ipl_nss(char *cmd, const int cmdlen); 58int __init savesys_ipl_nss(char *cmd, const int cmdlen);
@@ -370,6 +387,7 @@ static void __init setup_boot_command_line(void)
370 */ 387 */
371void __init startup_init(void) 388void __init startup_init(void)
372{ 389{
390 reset_tod_clock();
373 ipl_save_parameters(); 391 ipl_save_parameters();
374 rescue_initrd(); 392 rescue_initrd();
375 clear_bss_section(); 393 clear_bss_section();
@@ -391,5 +409,6 @@ void __init startup_init(void)
391 setup_hpage(); 409 setup_hpage();
392 sclp_facilities_detect(); 410 sclp_facilities_detect();
393 detect_memory_layout(memory_chunk); 411 detect_memory_layout(memory_chunk);
412 S390_lowcore.machine_flags = machine_flags;
394 lockdep_on(); 413 lockdep_on();
395} 414}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 1268aa2991bf..f3e275934213 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -837,16 +837,29 @@ mcck_return:
837 __CPUINIT 837 __CPUINIT
838 .globl restart_int_handler 838 .globl restart_int_handler
839restart_int_handler: 839restart_int_handler:
840 basr %r1,0
841restart_base:
842 spt restart_vtime-restart_base(%r1)
843 stck __LC_LAST_UPDATE_CLOCK
844 mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
845 mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
840 l %r15,__LC_SAVE_AREA+60 # load ksp 846 l %r15,__LC_SAVE_AREA+60 # load ksp
841 lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs 847 lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
842 lam %a0,%a15,__LC_AREGS_SAVE_AREA 848 lam %a0,%a15,__LC_AREGS_SAVE_AREA
843 lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone 849 lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone
850 l %r1,__LC_THREAD_INFO
851 mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
852 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
853 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
844 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on 854 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
845 basr %r14,0 855 basr %r14,0
846 l %r14,restart_addr-.(%r14) 856 l %r14,restart_addr-.(%r14)
847 br %r14 # branch to start_secondary 857 br %r14 # branch to start_secondary
848restart_addr: 858restart_addr:
849 .long start_secondary 859 .long start_secondary
860 .align 8
861restart_vtime:
862 .long 0x7fffffff,0xffffffff
850 .previous 863 .previous
851#else 864#else
852/* 865/*
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index c6fbde13971a..84a105838e03 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -831,14 +831,27 @@ mcck_return:
831 __CPUINIT 831 __CPUINIT
832 .globl restart_int_handler 832 .globl restart_int_handler
833restart_int_handler: 833restart_int_handler:
834 basr %r1,0
835restart_base:
836 spt restart_vtime-restart_base(%r1)
837 stck __LC_LAST_UPDATE_CLOCK
838 mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
839 mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
834 lg %r15,__LC_SAVE_AREA+120 # load ksp 840 lg %r15,__LC_SAVE_AREA+120 # load ksp
835 lghi %r10,__LC_CREGS_SAVE_AREA 841 lghi %r10,__LC_CREGS_SAVE_AREA
836 lctlg %c0,%c15,0(%r10) # get new ctl regs 842 lctlg %c0,%c15,0(%r10) # get new ctl regs
837 lghi %r10,__LC_AREGS_SAVE_AREA 843 lghi %r10,__LC_AREGS_SAVE_AREA
838 lam %a0,%a15,0(%r10) 844 lam %a0,%a15,0(%r10)
839 lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone 845 lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone
846 lg %r1,__LC_THREAD_INFO
847 mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
848 mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
849 xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
840 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on 850 stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
841 jg start_secondary 851 jg start_secondary
852 .align 8
853restart_vtime:
854 .long 0x7fffffff,0xffffffff
842 .previous 855 .previous
843#else 856#else
844/* 857/*
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 1046c2c9f8d1..bba14494ee00 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -471,7 +471,12 @@ startup:basr %r13,0 # get base
471.LPG0: 471.LPG0:
472 xc 0x200(256),0x200 # partially clear lowcore 472 xc 0x200(256),0x200 # partially clear lowcore
473 xc 0x300(256),0x300 473 xc 0x300(256),0x300
474 474 l %r1,5f-.LPG0(%r13)
475 stck 0(%r1)
476 spt 6f-.LPG0(%r13)
477 mvc __LC_LAST_UPDATE_CLOCK(8),0(%r1)
478 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
479 mvc __LC_EXIT_TIMER(8),5f-.LPG0(%r13)
475#ifndef CONFIG_MARCH_G5 480#ifndef CONFIG_MARCH_G5
476 # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10} 481 # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10}
477 stidp __LC_CPUID # store cpuid 482 stidp __LC_CPUID # store cpuid
@@ -496,9 +501,13 @@ startup:basr %r13,0 # get base
496 brct %r0,0b 501 brct %r0,0b
497#endif 502#endif
498 503
499 l %r13,0f-.LPG0(%r13) 504 l %r13,4f-.LPG0(%r13)
500 b 0(%r13) 505 b 0(%r13)
5010: .long startup_continue 506 .align 4
5074: .long startup_continue
5085: .long sched_clock_base_cc
509 .align 8
5106: .long 0x7fffffff,0xffffffff
502 511
503# 512#
504# params at 10400 (setup.h) 513# params at 10400 (setup.h)
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 4bfdc421d7e9..28cf196ba775 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/hardirq.h>
13#include <linux/time.h> 14#include <linux/time.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <asm/lowcore.h> 16#include <asm/lowcore.h>
@@ -253,7 +254,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
253 struct mci *mci; 254 struct mci *mci;
254 int umode; 255 int umode;
255 256
256 lockdep_off(); 257 nmi_enter();
257 s390_idle_check(); 258 s390_idle_check();
258 259
259 mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 260 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
@@ -363,7 +364,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
363 mcck->warning = 1; 364 mcck->warning = 1;
364 set_thread_flag(TIF_MCCK_PENDING); 365 set_thread_flag(TIF_MCCK_PENDING);
365 } 366 }
366 lockdep_on(); 367 nmi_exit();
367} 368}
368 369
369static int __init machine_check_init(void) 370static int __init machine_check_init(void)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 06201b93cbbf..7402b6a39ead 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -82,9 +82,6 @@ EXPORT_SYMBOL(console_devno);
82unsigned int console_irq = -1; 82unsigned int console_irq = -1;
83EXPORT_SYMBOL(console_irq); 83EXPORT_SYMBOL(console_irq);
84 84
85unsigned long machine_flags;
86EXPORT_SYMBOL(machine_flags);
87
88unsigned long elf_hwcap = 0; 85unsigned long elf_hwcap = 0;
89char elf_platform[ELF_PLATFORM_SIZE]; 86char elf_platform[ELF_PLATFORM_SIZE];
90 87
@@ -426,6 +423,7 @@ setup_lowcore(void)
426 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; 423 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
427 lc->current_task = (unsigned long) init_thread_union.thread_info.task; 424 lc->current_task = (unsigned long) init_thread_union.thread_info.task;
428 lc->thread_info = (unsigned long) &init_thread_union; 425 lc->thread_info = (unsigned long) &init_thread_union;
426 lc->machine_flags = S390_lowcore.machine_flags;
429#ifndef CONFIG_64BIT 427#ifndef CONFIG_64BIT
430 if (MACHINE_HAS_IEEE) { 428 if (MACHINE_HAS_IEEE) {
431 lc->extended_save_area_addr = (__u32) 429 lc->extended_save_area_addr = (__u32)
@@ -436,6 +434,14 @@ setup_lowcore(void)
436#else 434#else
437 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; 435 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
438#endif 436#endif
437 lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
438 lc->async_enter_timer = S390_lowcore.async_enter_timer;
439 lc->exit_timer = S390_lowcore.exit_timer;
440 lc->user_timer = S390_lowcore.user_timer;
441 lc->system_timer = S390_lowcore.system_timer;
442 lc->steal_timer = S390_lowcore.steal_timer;
443 lc->last_update_timer = S390_lowcore.last_update_timer;
444 lc->last_update_clock = S390_lowcore.last_update_clock;
439 set_prefix((u32)(unsigned long) lc); 445 set_prefix((u32)(unsigned long) lc);
440 lowcore_ptr[0] = lc; 446 lowcore_ptr[0] = lc;
441} 447}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 006ed5016eb4..a985a3ba4401 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -571,6 +571,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
571 cpu_lowcore->current_task = (unsigned long) idle; 571 cpu_lowcore->current_task = (unsigned long) idle;
572 cpu_lowcore->cpu_nr = cpu; 572 cpu_lowcore->cpu_nr = cpu;
573 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; 573 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
574 cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
574 eieio(); 575 eieio();
575 576
576 while (signal_processor(cpu, sigp_restart) == sigp_busy) 577 while (signal_processor(cpu, sigp_restart) == sigp_busy)
@@ -590,7 +591,8 @@ static int __init setup_possible_cpus(char *s)
590 int pcpus, cpu; 591 int pcpus, cpu;
591 592
592 pcpus = simple_strtoul(s, NULL, 0); 593 pcpus = simple_strtoul(s, NULL, 0);
593 for (cpu = 0; cpu < pcpus && cpu < nr_cpu_ids; cpu++) 594 init_cpu_possible(cpumask_of(0));
595 for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++)
594 set_cpu_possible(cpu, true); 596 set_cpu_possible(cpu, true);
595 return 0; 597 return 0;
596} 598}
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index fe5b25a988ab..2c7739fe70b1 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -336,3 +336,5 @@ SYSCALL(sys_inotify_init1,sys_inotify_init1,sys_inotify_init1_wrapper)
336SYSCALL(sys_pipe2,sys_pipe2,sys_pipe2_wrapper) /* 325 */ 336SYSCALL(sys_pipe2,sys_pipe2,sys_pipe2_wrapper) /* 325 */
337SYSCALL(sys_dup3,sys_dup3,sys_dup3_wrapper) 337SYSCALL(sys_dup3,sys_dup3,sys_dup3_wrapper)
338SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper) 338SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper)
339SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper)
340SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index f72d41068dc2..6ded50dfa75a 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -52,9 +52,6 @@
52#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) 52#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
53#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) 53#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
54 54
55/* The value of the TOD clock for 1.1.1970. */
56#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
57
58/* 55/*
59 * Create a small time difference between the timer interrupts 56 * Create a small time difference between the timer interrupts
60 * on the different cpus to avoid lock contention. 57 * on the different cpus to avoid lock contention.
@@ -63,9 +60,10 @@
63 60
64#define TICK_SIZE tick 61#define TICK_SIZE tick
65 62
63u64 sched_clock_base_cc = -1; /* Force to data section. */
64
66static ext_int_info_t ext_int_info_cc; 65static ext_int_info_t ext_int_info_cc;
67static ext_int_info_t ext_int_etr_cc; 66static ext_int_info_t ext_int_etr_cc;
68static u64 sched_clock_base_cc;
69 67
70static DEFINE_PER_CPU(struct clock_event_device, comparators); 68static DEFINE_PER_CPU(struct clock_event_device, comparators);
71 69
@@ -195,22 +193,12 @@ static void timing_alert_interrupt(__u16 code)
195static void etr_reset(void); 193static void etr_reset(void);
196static void stp_reset(void); 194static void stp_reset(void);
197 195
198/* 196unsigned long read_persistent_clock(void)
199 * Get the TOD clock running.
200 */
201static u64 __init reset_tod_clock(void)
202{ 197{
203 u64 time; 198 struct timespec ts;
204
205 etr_reset();
206 stp_reset();
207 if (store_clock(&time) == 0)
208 return time;
209 /* TOD clock not running. Set the clock to Unix Epoch. */
210 if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
211 panic("TOD clock not operational.");
212 199
213 return TOD_UNIX_EPOCH; 200 tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, &ts);
201 return ts.tv_sec;
214} 202}
215 203
216static cycle_t read_tod_clock(void) 204static cycle_t read_tod_clock(void)
@@ -265,12 +253,13 @@ void update_vsyscall_tz(void)
265 */ 253 */
266void __init time_init(void) 254void __init time_init(void)
267{ 255{
268 sched_clock_base_cc = reset_tod_clock(); 256 struct timespec ts;
257 unsigned long flags;
258 cycle_t now;
269 259
270 /* set xtime */ 260 /* Reset time synchronization interfaces. */
271 tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &xtime); 261 etr_reset();
272 set_normalized_timespec(&wall_to_monotonic, 262 stp_reset();
273 -xtime.tv_sec, -xtime.tv_nsec);
274 263
275 /* request the clock comparator external interrupt */ 264 /* request the clock comparator external interrupt */
276 if (register_early_external_interrupt(0x1004, 265 if (register_early_external_interrupt(0x1004,
@@ -278,17 +267,38 @@ void __init time_init(void)
278 &ext_int_info_cc) != 0) 267 &ext_int_info_cc) != 0)
279 panic("Couldn't request external interrupt 0x1004"); 268 panic("Couldn't request external interrupt 0x1004");
280 269
281 if (clocksource_register(&clocksource_tod) != 0)
282 panic("Could not register TOD clock source");
283
284 /* request the timing alert external interrupt */ 270 /* request the timing alert external interrupt */
285 if (register_early_external_interrupt(0x1406, 271 if (register_early_external_interrupt(0x1406,
286 timing_alert_interrupt, 272 timing_alert_interrupt,
287 &ext_int_etr_cc) != 0) 273 &ext_int_etr_cc) != 0)
288 panic("Couldn't request external interrupt 0x1406"); 274 panic("Couldn't request external interrupt 0x1406");
289 275
276 if (clocksource_register(&clocksource_tod) != 0)
277 panic("Could not register TOD clock source");
278
279 /*
280 * The TOD clock is an accurate clock. The xtime should be
281 * initialized in a way that the difference between TOD and
282 * xtime is reasonably small. Too bad that timekeeping_init
283 * sets xtime.tv_nsec to zero. In addition the clock source
284 * change from the jiffies clock source to the TOD clock
285 * source add another error of up to 1/HZ second. The same
286 * function sets wall_to_monotonic to a value that is too
287 * small for /proc/uptime to be accurate.
288 * Reset xtime and wall_to_monotonic to sane values.
289 */
290 write_seqlock_irqsave(&xtime_lock, flags);
291 now = get_clock();
292 tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime);
293 clocksource_tod.cycle_last = now;
294 clocksource_tod.raw_time = xtime;
295 tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts);
296 set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec);
297 write_sequnlock_irqrestore(&xtime_lock, flags);
298
290 /* Enable TOD clock interrupts on the boot cpu. */ 299 /* Enable TOD clock interrupts on the boot cpu. */
291 init_cpu_timer(); 300 init_cpu_timer();
301
292 /* Enable cpu timer interrupts on the boot cpu. */ 302 /* Enable cpu timer interrupts on the boot cpu. */
293 vtime_init(); 303 vtime_init();
294} 304}
@@ -1423,6 +1433,7 @@ static void *stp_page;
1423static void stp_work_fn(struct work_struct *work); 1433static void stp_work_fn(struct work_struct *work);
1424static DEFINE_MUTEX(stp_work_mutex); 1434static DEFINE_MUTEX(stp_work_mutex);
1425static DECLARE_WORK(stp_work, stp_work_fn); 1435static DECLARE_WORK(stp_work, stp_work_fn);
1436static struct timer_list stp_timer;
1426 1437
1427static int __init early_parse_stp(char *p) 1438static int __init early_parse_stp(char *p)
1428{ 1439{
@@ -1454,10 +1465,16 @@ static void __init stp_reset(void)
1454 } 1465 }
1455} 1466}
1456 1467
1468static void stp_timeout(unsigned long dummy)
1469{
1470 queue_work(time_sync_wq, &stp_work);
1471}
1472
1457static int __init stp_init(void) 1473static int __init stp_init(void)
1458{ 1474{
1459 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) 1475 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
1460 return 0; 1476 return 0;
1477 setup_timer(&stp_timer, stp_timeout, 0UL);
1461 time_init_wq(); 1478 time_init_wq();
1462 if (!stp_online) 1479 if (!stp_online)
1463 return 0; 1480 return 0;
@@ -1565,6 +1582,7 @@ static void stp_work_fn(struct work_struct *work)
1565 1582
1566 if (!stp_online) { 1583 if (!stp_online) {
1567 chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); 1584 chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
1585 del_timer_sync(&stp_timer);
1568 goto out_unlock; 1586 goto out_unlock;
1569 } 1587 }
1570 1588
@@ -1586,6 +1604,13 @@ static void stp_work_fn(struct work_struct *work)
1586 stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map); 1604 stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map);
1587 put_online_cpus(); 1605 put_online_cpus();
1588 1606
1607 if (!check_sync_clock())
1608 /*
1609 * There is a usable clock but the synchonization failed.
1610 * Retry after a second.
1611 */
1612 mod_timer(&stp_timer, jiffies + HZ);
1613
1589out_unlock: 1614out_unlock:
1590 mutex_unlock(&stp_work_mutex); 1615 mutex_unlock(&stp_work_mutex);
1591} 1616}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index ecf0304e61c1..38ea92ff04f9 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -134,6 +134,8 @@ void vtime_start_cpu(void)
134 /* Account time spent with enabled wait psw loaded as idle time. */ 134 /* Account time spent with enabled wait psw loaded as idle time. */
135 idle_time = S390_lowcore.int_clock - idle->idle_enter; 135 idle_time = S390_lowcore.int_clock - idle->idle_enter;
136 account_idle_time(idle_time); 136 account_idle_time(idle_time);
137 S390_lowcore.steal_timer +=
138 idle->idle_enter - S390_lowcore.last_update_clock;
137 S390_lowcore.last_update_clock = S390_lowcore.int_clock; 139 S390_lowcore.last_update_clock = S390_lowcore.int_clock;
138 140
139 /* Account system time spent going idle. */ 141 /* Account system time spent going idle. */
@@ -425,17 +427,7 @@ void add_virt_timer_periodic(void *new)
425} 427}
426EXPORT_SYMBOL(add_virt_timer_periodic); 428EXPORT_SYMBOL(add_virt_timer_periodic);
427 429
428/* 430int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic)
429 * If we change a pending timer the function must be called on the CPU
430 * where the timer is running on, e.g. by smp_call_function_single()
431 *
432 * The original mod_timer adds the timer if it is not pending. For
433 * compatibility we do the same. The timer will be added on the current
434 * CPU as a oneshot timer.
435 *
436 * returns whether it has modified a pending timer (1) or not (0)
437 */
438int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
439{ 431{
440 struct vtimer_queue *vq; 432 struct vtimer_queue *vq;
441 unsigned long flags; 433 unsigned long flags;
@@ -444,39 +436,35 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
444 BUG_ON(!timer->function); 436 BUG_ON(!timer->function);
445 BUG_ON(!expires || expires > VTIMER_MAX_SLICE); 437 BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
446 438
447 /*
448 * This is a common optimization triggered by the
449 * networking code - if the timer is re-modified
450 * to be the same thing then just return:
451 */
452 if (timer->expires == expires && vtimer_pending(timer)) 439 if (timer->expires == expires && vtimer_pending(timer))
453 return 1; 440 return 1;
454 441
455 cpu = get_cpu(); 442 cpu = get_cpu();
456 vq = &per_cpu(virt_cpu_timer, cpu); 443 vq = &per_cpu(virt_cpu_timer, cpu);
457 444
458 /* check if we run on the right CPU */
459 BUG_ON(timer->cpu != cpu);
460
461 /* disable interrupts before test if timer is pending */ 445 /* disable interrupts before test if timer is pending */
462 spin_lock_irqsave(&vq->lock, flags); 446 spin_lock_irqsave(&vq->lock, flags);
463 447
464 /* if timer isn't pending add it on the current CPU */ 448 /* if timer isn't pending add it on the current CPU */
465 if (!vtimer_pending(timer)) { 449 if (!vtimer_pending(timer)) {
466 spin_unlock_irqrestore(&vq->lock, flags); 450 spin_unlock_irqrestore(&vq->lock, flags);
467 /* we do not activate an interval timer with mod_virt_timer */ 451
468 timer->interval = 0; 452 if (periodic)
453 timer->interval = expires;
454 else
455 timer->interval = 0;
469 timer->expires = expires; 456 timer->expires = expires;
470 timer->cpu = cpu; 457 timer->cpu = cpu;
471 internal_add_vtimer(timer); 458 internal_add_vtimer(timer);
472 return 0; 459 return 0;
473 } 460 }
474 461
462 /* check if we run on the right CPU */
463 BUG_ON(timer->cpu != cpu);
464
475 list_del_init(&timer->entry); 465 list_del_init(&timer->entry);
476 timer->expires = expires; 466 timer->expires = expires;
477 467 if (periodic)
478 /* also change the interval if we have an interval timer */
479 if (timer->interval)
480 timer->interval = expires; 468 timer->interval = expires;
481 469
482 /* the timer can't expire anymore so we can release the lock */ 470 /* the timer can't expire anymore so we can release the lock */
@@ -484,9 +472,32 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
484 internal_add_vtimer(timer); 472 internal_add_vtimer(timer);
485 return 1; 473 return 1;
486} 474}
475
476/*
477 * If we change a pending timer the function must be called on the CPU
478 * where the timer is running on.
479 *
480 * returns whether it has modified a pending timer (1) or not (0)
481 */
482int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
483{
484 return __mod_vtimer(timer, expires, 0);
485}
487EXPORT_SYMBOL(mod_virt_timer); 486EXPORT_SYMBOL(mod_virt_timer);
488 487
489/* 488/*
489 * If we change a pending timer the function must be called on the CPU
490 * where the timer is running on.
491 *
492 * returns whether it has modified a pending timer (1) or not (0)
493 */
494int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires)
495{
496 return __mod_vtimer(timer, expires, 1);
497}
498EXPORT_SYMBOL(mod_virt_timer_periodic);
499
500/*
490 * delete a virtual timer 501 * delete a virtual timer
491 * 502 *
492 * returns whether the deleted timer was pending (1) or not (0) 503 * returns whether the deleted timer was pending (1) or not (0)
@@ -516,16 +527,8 @@ EXPORT_SYMBOL(del_virt_timer);
516 */ 527 */
517void init_cpu_vtimer(void) 528void init_cpu_vtimer(void)
518{ 529{
519 struct thread_info *ti = current_thread_info();
520 struct vtimer_queue *vq; 530 struct vtimer_queue *vq;
521 531
522 S390_lowcore.user_timer = ti->user_timer;
523 S390_lowcore.system_timer = ti->system_timer;
524
525 /* kick the virtual timer */
526 asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
527 asm volatile ("STPT %0" : "=m" (S390_lowcore.last_update_timer));
528
529 /* initialize per cpu vtimer structure */ 532 /* initialize per cpu vtimer structure */
530 vq = &__get_cpu_var(virt_cpu_timer); 533 vq = &__get_cpu_var(virt_cpu_timer);
531 INIT_LIST_HEAD(&vq->list); 534 INIT_LIST_HEAD(&vq->list);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5e4babecf934..e7390dd0283d 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -14,6 +14,7 @@ config SUPERH
14 select HAVE_GENERIC_DMA_COHERENT 14 select HAVE_GENERIC_DMA_COHERENT
15 select HAVE_IOREMAP_PROT if MMU 15 select HAVE_IOREMAP_PROT if MMU
16 select HAVE_ARCH_TRACEHOOK 16 select HAVE_ARCH_TRACEHOOK
17 select HAVE_DMA_API_DEBUG
17 help 18 help
18 The SuperH is a RISC processor targeted for use in embedded systems 19 The SuperH is a RISC processor targeted for use in embedded systems
19 and consumer electronics; it was also used in the Sega Dreamcast 20 and consumer electronics; it was also used in the Sega Dreamcast
@@ -21,7 +22,7 @@ config SUPERH
21 <http://www.linux-sh.org/>. 22 <http://www.linux-sh.org/>.
22 23
23config SUPERH32 24config SUPERH32
24 def_bool !SUPERH64 25 def_bool ARCH = "sh"
25 select HAVE_KPROBES 26 select HAVE_KPROBES
26 select HAVE_KRETPROBES 27 select HAVE_KRETPROBES
27 select HAVE_FUNCTION_TRACER 28 select HAVE_FUNCTION_TRACER
@@ -31,7 +32,7 @@ config SUPERH32
31 select ARCH_HIBERNATION_POSSIBLE if MMU 32 select ARCH_HIBERNATION_POSSIBLE if MMU
32 33
33config SUPERH64 34config SUPERH64
34 def_bool y if CPU_SH5 35 def_bool ARCH = "sh64"
35 36
36config ARCH_DEFCONFIG 37config ARCH_DEFCONFIG
37 string 38 string
@@ -187,6 +188,8 @@ config ARCH_SHMOBILE
187 bool 188 bool
188 select ARCH_SUSPEND_POSSIBLE 189 select ARCH_SUSPEND_POSSIBLE
189 190
191if SUPERH32
192
190choice 193choice
191 prompt "Processor sub-type selection" 194 prompt "Processor sub-type selection"
192 195
@@ -408,6 +411,15 @@ config CPU_SUBTYPE_SH7366
408 select SYS_SUPPORTS_NUMA 411 select SYS_SUPPORTS_NUMA
409 select SYS_SUPPORTS_CMT 412 select SYS_SUPPORTS_CMT
410 413
414endchoice
415
416endif
417
418if SUPERH64
419
420choice
421 prompt "Processor sub-type selection"
422
411# SH-5 Processor Support 423# SH-5 Processor Support
412 424
413config CPU_SUBTYPE_SH5_101 425config CPU_SUBTYPE_SH5_101
@@ -420,6 +432,8 @@ config CPU_SUBTYPE_SH5_103
420 432
421endchoice 433endchoice
422 434
435endif
436
423source "arch/sh/mm/Kconfig" 437source "arch/sh/mm/Kconfig"
424 438
425source "arch/sh/Kconfig.cpu" 439source "arch/sh/Kconfig.cpu"
diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c
index 912458f666eb..39e46919df14 100644
--- a/arch/sh/boards/board-ap325rxa.c
+++ b/arch/sh/boards/board-ap325rxa.c
@@ -349,6 +349,7 @@ static int ov7725_power(struct device *dev, int mode)
349static struct ov772x_camera_info ov7725_info = { 349static struct ov772x_camera_info ov7725_info = {
350 .buswidth = SOCAM_DATAWIDTH_8, 350 .buswidth = SOCAM_DATAWIDTH_8,
351 .flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP, 351 .flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP,
352 .edgectrl = OV772X_AUTO_EDGECTRL(0xf, 0),
352 .link = { 353 .link = {
353 .power = ov7725_power, 354 .power = ov7725_power,
354 }, 355 },
diff --git a/arch/sh/boards/board-urquell.c b/arch/sh/boards/board-urquell.c
index 8367d1d789c3..beb88c4da2c1 100644
--- a/arch/sh/boards/board-urquell.c
+++ b/arch/sh/boards/board-urquell.c
@@ -2,6 +2,8 @@
2 * Renesas Technology Corp. SH7786 Urquell Support. 2 * Renesas Technology Corp. SH7786 Urquell Support.
3 * 3 *
4 * Copyright (C) 2008 Kuninori Morimoto <morimoto.kuninori@renesas.com> 4 * Copyright (C) 2008 Kuninori Morimoto <morimoto.kuninori@renesas.com>
5 *
6 * Based on board-sh7785lcr.c
5 * Copyright (C) 2008 Yoshihiro Shimoda 7 * Copyright (C) 2008 Yoshihiro Shimoda
6 * 8 *
7 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
@@ -21,6 +23,32 @@
21#include <asm/heartbeat.h> 23#include <asm/heartbeat.h>
22#include <asm/sizes.h> 24#include <asm/sizes.h>
23 25
26/*
27 * bit 1234 5678
28 *----------------------------
29 * SW1 0101 0010 -> Pck 33MHz version
30 * (1101 0010) Pck 66MHz version
31 * SW2 0x1x xxxx -> little endian
32 * 29bit mode
33 * SW47 0001 1000 -> CS0 : on-board flash
34 * CS1 : SRAM, registers, LAN, PCMCIA
35 * 38400 bps for SCIF1
36 *
37 * Address
38 * 0x00000000 - 0x04000000 (CS0) Nor Flash
39 * 0x04000000 - 0x04200000 (CS1) SRAM
40 * 0x05000000 - 0x05800000 (CS1) on board register
41 * 0x05800000 - 0x06000000 (CS1) LAN91C111
42 * 0x06000000 - 0x06400000 (CS1) PCMCIA
43 * 0x08000000 - 0x10000000 (CS2-CS3) DDR3
44 * 0x10000000 - 0x14000000 (CS4) PCIe
45 * 0x14000000 - 0x14800000 (CS5) Core0 LRAM/URAM
46 * 0x14800000 - 0x15000000 (CS5) Core1 LRAM/URAM
47 * 0x18000000 - 0x1C000000 (CS6) ATA/NAND-Flash
48 * 0x1C000000 - (CS7) SH7786 Control register
49 */
50
51/* HeartBeat */
24static struct resource heartbeat_resources[] = { 52static struct resource heartbeat_resources[] = {
25 [0] = { 53 [0] = {
26 .start = BOARDREG(SLEDR), 54 .start = BOARDREG(SLEDR),
@@ -43,6 +71,7 @@ static struct platform_device heartbeat_device = {
43 .resource = heartbeat_resources, 71 .resource = heartbeat_resources,
44}; 72};
45 73
74/* LAN91C111 */
46static struct smc91x_platdata smc91x_info = { 75static struct smc91x_platdata smc91x_info = {
47 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, 76 .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
48}; 77};
@@ -69,6 +98,7 @@ static struct platform_device smc91x_eth_device = {
69 }, 98 },
70}; 99};
71 100
101/* Nor Flash */
72static struct mtd_partition nor_flash_partitions[] = { 102static struct mtd_partition nor_flash_partitions[] = {
73 { 103 {
74 .name = "loader", 104 .name = "loader",
diff --git a/arch/sh/drivers/pci/ops-sh7785lcr.c b/arch/sh/drivers/pci/ops-sh7785lcr.c
index e8b7446a7c2b..fb0869f0bef8 100644
--- a/arch/sh/drivers/pci/ops-sh7785lcr.c
+++ b/arch/sh/drivers/pci/ops-sh7785lcr.c
@@ -48,8 +48,13 @@ EXPORT_SYMBOL(board_pci_channels);
48 48
49static struct sh4_pci_address_map sh7785_pci_map = { 49static struct sh4_pci_address_map sh7785_pci_map = {
50 .window0 = { 50 .window0 = {
51#if defined(CONFIG_32BIT)
52 .base = SH7780_32BIT_DDR_BASE_ADDR,
53 .size = 0x40000000,
54#else
51 .base = SH7780_CS0_BASE_ADDR, 55 .base = SH7780_CS0_BASE_ADDR,
52 .size = 0x20000000, 56 .size = 0x20000000,
57#endif
53 }, 58 },
54 59
55 .flags = SH4_PCIC_NO_RESET, 60 .flags = SH4_PCIC_NO_RESET,
diff --git a/arch/sh/drivers/pci/pci-sh7780.h b/arch/sh/drivers/pci/pci-sh7780.h
index 97b2c98f05c4..93adc7119b79 100644
--- a/arch/sh/drivers/pci/pci-sh7780.h
+++ b/arch/sh/drivers/pci/pci-sh7780.h
@@ -104,6 +104,8 @@
104#define SH7780_CS5_BASE_ADDR (SH7780_CS4_BASE_ADDR + SH7780_MEM_REGION_SIZE) 104#define SH7780_CS5_BASE_ADDR (SH7780_CS4_BASE_ADDR + SH7780_MEM_REGION_SIZE)
105#define SH7780_CS6_BASE_ADDR (SH7780_CS5_BASE_ADDR + SH7780_MEM_REGION_SIZE) 105#define SH7780_CS6_BASE_ADDR (SH7780_CS5_BASE_ADDR + SH7780_MEM_REGION_SIZE)
106 106
107#define SH7780_32BIT_DDR_BASE_ADDR 0x40000000
108
107struct sh4_pci_address_map; 109struct sh4_pci_address_map;
108 110
109/* arch/sh/drivers/pci/pci-sh7780.c */ 111/* arch/sh/drivers/pci/pci-sh7780.c */
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index e36c7b870861..0d6ac7a1db49 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -19,6 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/dma-debug.h>
22#include <asm/io.h> 23#include <asm/io.h>
23 24
24static int __init pcibios_init(void) 25static int __init pcibios_init(void)
@@ -43,6 +44,8 @@ static int __init pcibios_init(void)
43 44
44 pci_fixup_irqs(pci_common_swizzle, pcibios_map_platform_irq); 45 pci_fixup_irqs(pci_common_swizzle, pcibios_map_platform_irq);
45 46
47 dma_debug_add_bus(&pci_bus_type);
48
46 return 0; 49 return 0;
47} 50}
48subsys_initcall(pcibios_init); 51subsys_initcall(pcibios_init);
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index 627315ecdb52..ea9d4f41c9d2 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/scatterlist.h> 5#include <linux/scatterlist.h>
6#include <linux/dma-debug.h>
6#include <asm/cacheflush.h> 7#include <asm/cacheflush.h>
7#include <asm/io.h> 8#include <asm/io.h>
8#include <asm-generic/dma-coherent.h> 9#include <asm-generic/dma-coherent.h>
@@ -38,16 +39,26 @@ static inline dma_addr_t dma_map_single(struct device *dev,
38 void *ptr, size_t size, 39 void *ptr, size_t size,
39 enum dma_data_direction dir) 40 enum dma_data_direction dir)
40{ 41{
42 dma_addr_t addr = virt_to_phys(ptr);
43
41#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) 44#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
42 if (dev->bus == &pci_bus_type) 45 if (dev->bus == &pci_bus_type)
43 return virt_to_phys(ptr); 46 return addr;
44#endif 47#endif
45 dma_cache_sync(dev, ptr, size, dir); 48 dma_cache_sync(dev, ptr, size, dir);
46 49
47 return virt_to_phys(ptr); 50 debug_dma_map_page(dev, virt_to_page(ptr),
51 (unsigned long)ptr & ~PAGE_MASK, size,
52 dir, addr, true);
53
54 return addr;
48} 55}
49 56
50#define dma_unmap_single(dev, addr, size, dir) do { } while (0) 57static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
58 size_t size, enum dma_data_direction dir)
59{
60 debug_dma_unmap_page(dev, addr, size, dir, true);
61}
51 62
52static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, 63static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
53 int nents, enum dma_data_direction dir) 64 int nents, enum dma_data_direction dir)
@@ -59,12 +70,19 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
59 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); 70 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
60#endif 71#endif
61 sg[i].dma_address = sg_phys(&sg[i]); 72 sg[i].dma_address = sg_phys(&sg[i]);
73 sg[i].dma_length = sg[i].length;
62 } 74 }
63 75
76 debug_dma_map_sg(dev, sg, nents, i, dir);
77
64 return nents; 78 return nents;
65} 79}
66 80
67#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) 81static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
82 int nents, enum dma_data_direction dir)
83{
84 debug_dma_unmap_sg(dev, sg, nents, dir);
85}
68 86
69static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 87static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
70 unsigned long offset, size_t size, 88 unsigned long offset, size_t size,
@@ -111,6 +129,7 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
111 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir); 129 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
112#endif 130#endif
113 sg[i].dma_address = sg_phys(&sg[i]); 131 sg[i].dma_address = sg_phys(&sg[i]);
132 sg[i].dma_length = sg[i].length;
114 } 133 }
115} 134}
116 135
@@ -119,6 +138,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev,
119 enum dma_data_direction dir) 138 enum dma_data_direction dir)
120{ 139{
121 dma_sync_single(dev, dma_handle, size, dir); 140 dma_sync_single(dev, dma_handle, size, dir);
141 debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
122} 142}
123 143
124static inline void dma_sync_single_for_device(struct device *dev, 144static inline void dma_sync_single_for_device(struct device *dev,
@@ -127,6 +147,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
127 enum dma_data_direction dir) 147 enum dma_data_direction dir)
128{ 148{
129 dma_sync_single(dev, dma_handle, size, dir); 149 dma_sync_single(dev, dma_handle, size, dir);
150 debug_dma_sync_single_for_device(dev, dma_handle, size, dir);
130} 151}
131 152
132static inline void dma_sync_single_range_for_cpu(struct device *dev, 153static inline void dma_sync_single_range_for_cpu(struct device *dev,
@@ -136,6 +157,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
136 enum dma_data_direction direction) 157 enum dma_data_direction direction)
137{ 158{
138 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); 159 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
160 debug_dma_sync_single_range_for_cpu(dev, dma_handle,
161 offset, size, direction);
139} 162}
140 163
141static inline void dma_sync_single_range_for_device(struct device *dev, 164static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -145,6 +168,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
145 enum dma_data_direction direction) 168 enum dma_data_direction direction)
146{ 169{
147 dma_sync_single_for_device(dev, dma_handle+offset, size, direction); 170 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
171 debug_dma_sync_single_range_for_device(dev, dma_handle,
172 offset, size, direction);
148} 173}
149 174
150 175
@@ -153,6 +178,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev,
153 enum dma_data_direction dir) 178 enum dma_data_direction dir)
154{ 179{
155 dma_sync_sg(dev, sg, nelems, dir); 180 dma_sync_sg(dev, sg, nelems, dir);
181 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
156} 182}
157 183
158static inline void dma_sync_sg_for_device(struct device *dev, 184static inline void dma_sync_sg_for_device(struct device *dev,
@@ -160,9 +186,9 @@ static inline void dma_sync_sg_for_device(struct device *dev,
160 enum dma_data_direction dir) 186 enum dma_data_direction dir)
161{ 187{
162 dma_sync_sg(dev, sg, nelems, dir); 188 dma_sync_sg(dev, sg, nelems, dir);
189 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
163} 190}
164 191
165
166static inline int dma_get_cache_alignment(void) 192static inline int dma_get_cache_alignment(void)
167{ 193{
168 /* 194 /*
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h
index 2084d0373693..c693d268a413 100644
--- a/arch/sh/include/asm/scatterlist.h
+++ b/arch/sh/include/asm/scatterlist.h
@@ -5,12 +5,13 @@
5 5
6struct scatterlist { 6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG 7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic; 8 unsigned long sg_magic;
9#endif 9#endif
10 unsigned long page_link; 10 unsigned long page_link;
11 unsigned int offset;/* for highmem, page offset */ 11 unsigned int offset; /* for highmem, page offset */
12 dma_addr_t dma_address; 12 unsigned int length;
13 unsigned int length; 13 dma_addr_t dma_address;
14 unsigned int dma_length;
14}; 15};
15 16
16#define ISA_DMA_THRESHOLD PHYS_ADDR_MASK 17#define ISA_DMA_THRESHOLD PHYS_ADDR_MASK
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index a3f239545897..8489a0905a87 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -37,8 +37,11 @@
37#define pcibus_to_node(bus) ((void)(bus), -1) 37#define pcibus_to_node(bus) ((void)(bus), -1)
38#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \ 38#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
39 CPU_MASK_ALL : \ 39 CPU_MASK_ALL : \
40 node_to_cpumask(pcibus_to_node(bus)) \ 40 node_to_cpumask(pcibus_to_node(bus)))
41 ) 41#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
42 CPU_MASK_ALL_PTR : \
43 cpumask_of_node(pcibus_to_node(bus)))
44
42#endif 45#endif
43 46
44#include <asm-generic/topology.h> 47#include <asm-generic/topology.h>
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index d52c000cf924..2efb819e2db3 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -341,8 +341,10 @@
341#define __NR_dup3 330 341#define __NR_dup3 330
342#define __NR_pipe2 331 342#define __NR_pipe2 331
343#define __NR_inotify_init1 332 343#define __NR_inotify_init1 332
344#define __NR_preadv 333
345#define __NR_pwritev 334
344 346
345#define NR_syscalls 333 347#define NR_syscalls 335
346 348
347#ifdef __KERNEL__ 349#ifdef __KERNEL__
348 350
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index 7c54e91753c1..6eb9d2934c0f 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -381,10 +381,12 @@
381#define __NR_dup3 358 381#define __NR_dup3 358
382#define __NR_pipe2 359 382#define __NR_pipe2 359
383#define __NR_inotify_init1 360 383#define __NR_inotify_init1 360
384#define __NR_preadv 361
385#define __NR_pwritev 362
384 386
385#ifdef __KERNEL__ 387#ifdef __KERNEL__
386 388
387#define NR_syscalls 361 389#define NR_syscalls 363
388 390
389#define __ARCH_WANT_IPC_PARSE_VERSION 391#define __ARCH_WANT_IPC_PARSE_VERSION
390#define __ARCH_WANT_OLD_READDIR 392#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 5a47e1cf442e..90e8cfff55fd 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -143,14 +143,14 @@ static void __init sh7786_usb_setup(void)
143 * Set the PHY and PLL enable bit 143 * Set the PHY and PLL enable bit
144 */ 144 */
145 __raw_writel(PHY_ENB | PLL_ENB, USBPCTL1); 145 __raw_writel(PHY_ENB | PLL_ENB, USBPCTL1);
146 while (i-- && 146 while (i--) {
147 ((__raw_readl(USBST) & ACT_PLL_STATUS) != ACT_PLL_STATUS)) 147 if (ACT_PLL_STATUS == (__raw_readl(USBST) & ACT_PLL_STATUS)) {
148 /* Set the PHY RST bit */
149 __raw_writel(PHY_ENB | PLL_ENB | PHY_RST, USBPCTL1);
150 printk(KERN_INFO "sh7786 usb setup done\n");
151 break;
152 }
148 cpu_relax(); 153 cpu_relax();
149
150 if (i) {
151 /* Set the PHY RST bit */
152 __raw_writel(PHY_ENB | PLL_ENB | PHY_RST, USBPCTL1);
153 printk(KERN_INFO "sh7786 usb setup done\n");
154 } 154 }
155} 155}
156 156
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index e67c1733e1b9..05202edd8e21 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -349,3 +349,5 @@ ENTRY(sys_call_table)
349 .long sys_dup3 /* 330 */ 349 .long sys_dup3 /* 330 */
350 .long sys_pipe2 350 .long sys_pipe2
351 .long sys_inotify_init1 351 .long sys_inotify_init1
352 .long sys_preadv
353 .long sys_writev
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index 557cb91f5caf..a083609f9284 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -387,3 +387,5 @@ sys_call_table:
387 .long sys_dup3 387 .long sys_dup3
388 .long sys_pipe2 388 .long sys_pipe2
389 .long sys_inotify_init1 /* 360 */ 389 .long sys_inotify_init1 /* 360 */
390 .long sys_preadv
391 .long sys_pwritev
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index edcd5fbf9651..e098ec158ddb 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -10,11 +10,22 @@
10 * for more details. 10 * for more details.
11 */ 11 */
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/init.h>
13#include <linux/platform_device.h> 14#include <linux/platform_device.h>
14#include <linux/dma-mapping.h> 15#include <linux/dma-mapping.h>
16#include <linux/dma-debug.h>
17#include <linux/io.h>
15#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
16#include <asm/addrspace.h> 19#include <asm/addrspace.h>
17#include <asm/io.h> 20
21#define PREALLOC_DMA_DEBUG_ENTRIES 4096
22
23static int __init dma_init(void)
24{
25 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
26 return 0;
27}
28fs_initcall(dma_init);
18 29
19void *dma_alloc_coherent(struct device *dev, size_t size, 30void *dma_alloc_coherent(struct device *dev, size_t size,
20 dma_addr_t *dma_handle, gfp_t gfp) 31 dma_addr_t *dma_handle, gfp_t gfp)
@@ -45,6 +56,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
45 split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); 56 split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
46 57
47 *dma_handle = virt_to_phys(ret); 58 *dma_handle = virt_to_phys(ret);
59
60 debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache);
61
48 return ret_nocache; 62 return ret_nocache;
49} 63}
50EXPORT_SYMBOL(dma_alloc_coherent); 64EXPORT_SYMBOL(dma_alloc_coherent);
@@ -56,12 +70,15 @@ void dma_free_coherent(struct device *dev, size_t size,
56 unsigned long pfn = dma_handle >> PAGE_SHIFT; 70 unsigned long pfn = dma_handle >> PAGE_SHIFT;
57 int k; 71 int k;
58 72
59 if (!dma_release_from_coherent(dev, order, vaddr)) { 73 WARN_ON(irqs_disabled()); /* for portability */
60 WARN_ON(irqs_disabled()); /* for portability */ 74
61 for (k = 0; k < (1 << order); k++) 75 if (dma_release_from_coherent(dev, order, vaddr))
62 __free_pages(pfn_to_page(pfn + k), 0); 76 return;
63 iounmap(vaddr); 77
64 } 78 debug_dma_free_coherent(dev, size, vaddr, dma_handle);
79 for (k = 0; k < (1 << order); k++)
80 __free_pages(pfn_to_page(pfn + k), 0);
81 iounmap(vaddr);
65} 82}
66EXPORT_SYMBOL(dma_free_coherent); 83EXPORT_SYMBOL(dma_free_coherent);
67 84
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index ce465975a6a5..bb91b1248cd1 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -15,6 +15,8 @@
15 15
16#ifdef __KERNEL__ 16#ifdef __KERNEL__
17 17
18#include <asm/system.h>
19
18#define ATOMIC_INIT(i) { (i) } 20#define ATOMIC_INIT(i) { (i) }
19 21
20extern int __atomic_add_return(int, atomic_t *); 22extern int __atomic_add_return(int, atomic_t *);
diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
index dff3f0253aa8..ff9ead640c4a 100644
--- a/arch/sparc/include/asm/parport.h
+++ b/arch/sparc/include/asm/parport.h
@@ -117,7 +117,7 @@ static int __devinit ecpp_probe(struct of_device *op, const struct of_device_id
117 if (!strcmp(parent->name, "dma")) { 117 if (!strcmp(parent->name, "dma")) {
118 p = parport_pc_probe_port(base, base + 0x400, 118 p = parport_pc_probe_port(base, base + 0x400,
119 op->irqs[0], PARPORT_DMA_NOFIFO, 119 op->irqs[0], PARPORT_DMA_NOFIFO,
120 op->dev.parent->parent); 120 op->dev.parent->parent, 0);
121 if (!p) 121 if (!p)
122 return -ENOMEM; 122 return -ENOMEM;
123 dev_set_drvdata(&op->dev, p); 123 dev_set_drvdata(&op->dev, p);
@@ -168,7 +168,8 @@ static int __devinit ecpp_probe(struct of_device *op, const struct of_device_id
168 p = parport_pc_probe_port(base, base + 0x400, 168 p = parport_pc_probe_port(base, base + 0x400,
169 op->irqs[0], 169 op->irqs[0],
170 slot, 170 slot,
171 op->dev.parent); 171 op->dev.parent,
172 0);
172 err = -ENOMEM; 173 err = -ENOMEM;
173 if (!p) 174 if (!p)
174 goto out_disable_irq; 175 goto out_disable_irq;
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 6ce5d2598a09..adf5f273868a 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1183,8 +1183,7 @@ out_free_txq:
1183 free_queue(lp->tx_num_entries, lp->tx_base); 1183 free_queue(lp->tx_num_entries, lp->tx_base);
1184 1184
1185out_free_mssbuf: 1185out_free_mssbuf:
1186 if (mssbuf) 1186 kfree(mssbuf);
1187 kfree(mssbuf);
1188 1187
1189out_free_iommu: 1188out_free_iommu:
1190 ldc_iommu_release(lp); 1189 ldc_iommu_release(lp);
@@ -1217,8 +1216,7 @@ void ldc_free(struct ldc_channel *lp)
1217 1216
1218 hlist_del(&lp->list); 1217 hlist_del(&lp->list);
1219 1218
1220 if (lp->mssbuf) 1219 kfree(lp->mssbuf);
1221 kfree(lp->mssbuf);
1222 1220
1223 ldc_iommu_release(lp); 1221 ldc_iommu_release(lp);
1224 1222
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 708e12a26b05..f7642e5a94db 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -118,9 +118,9 @@ void __cpuinit smp_callin(void)
118 while (!cpu_isset(cpuid, smp_commenced_mask)) 118 while (!cpu_isset(cpuid, smp_commenced_mask))
119 rmb(); 119 rmb();
120 120
121 ipi_call_lock(); 121 ipi_call_lock_irq();
122 cpu_set(cpuid, cpu_online_map); 122 cpu_set(cpuid, cpu_online_map);
123 ipi_call_unlock(); 123 ipi_call_unlock_irq();
124 124
125 /* idle thread is expected to have preempt disabled */ 125 /* idle thread is expected to have preempt disabled */
126 preempt_disable(); 126 preempt_disable();
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index bc25b9f5e4cd..c9086e6307a5 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -353,6 +353,7 @@ config X86_UV
353 bool "SGI Ultraviolet" 353 bool "SGI Ultraviolet"
354 depends on X86_64 354 depends on X86_64
355 depends on X86_EXTENDED_PLATFORM 355 depends on X86_EXTENDED_PLATFORM
356 depends on NUMA
356 select X86_X2APIC 357 select X86_X2APIC
357 ---help--- 358 ---help---
358 This option is needed in order to support SGI Ultraviolet systems. 359 This option is needed in order to support SGI Ultraviolet systems.
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index db68ac8a5ac2..2cae46c7c8a2 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -17,6 +17,11 @@
17/* ========================================================================= */ 17/* ========================================================================= */
18/* UVH_BAU_DATA_CONFIG */ 18/* UVH_BAU_DATA_CONFIG */
19/* ========================================================================= */ 19/* ========================================================================= */
20#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
21#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
22#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
23#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
24/* 1011 timebase 7 (168millisec) * 3 ticks -> 500ms */
20#define UVH_BAU_DATA_CONFIG 0x61680UL 25#define UVH_BAU_DATA_CONFIG 0x61680UL
21#define UVH_BAU_DATA_CONFIG_32 0x0438 26#define UVH_BAU_DATA_CONFIG_32 0x0438
22 27
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 1248318436e8..de1a50af807b 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -549,7 +549,8 @@ void __init uv_system_init(void)
549 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 549 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
550 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; 550 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
551 int max_pnode = 0; 551 int max_pnode = 0;
552 unsigned long mmr_base, present; 552 unsigned long mmr_base, present, paddr;
553 unsigned short pnode_mask;
553 554
554 map_low_mmrs(); 555 map_low_mmrs();
555 556
@@ -592,6 +593,7 @@ void __init uv_system_init(void)
592 } 593 }
593 } 594 }
594 595
596 pnode_mask = (1 << n_val) - 1;
595 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 597 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
596 gnode_upper = (((unsigned long)node_id.s.node_id) & 598 gnode_upper = (((unsigned long)node_id.s.node_id) &
597 ~((1 << n_val) - 1)) << m_val; 599 ~((1 << n_val) - 1)) << m_val;
@@ -615,7 +617,7 @@ void __init uv_system_init(void)
615 uv_cpu_hub_info(cpu)->numa_blade_id = blade; 617 uv_cpu_hub_info(cpu)->numa_blade_id = blade;
616 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; 618 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
617 uv_cpu_hub_info(cpu)->pnode = pnode; 619 uv_cpu_hub_info(cpu)->pnode = pnode;
618 uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; 620 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
619 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; 621 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
620 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; 622 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
621 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 623 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
@@ -631,6 +633,16 @@ void __init uv_system_init(void)
631 lcpu, blade); 633 lcpu, blade);
632 } 634 }
633 635
636 /* Add blade/pnode info for nodes without cpus */
637 for_each_online_node(nid) {
638 if (uv_node_to_blade[nid] >= 0)
639 continue;
640 paddr = node_start_pfn(nid) << PAGE_SHIFT;
641 pnode = (paddr >> m_val) & pnode_mask;
642 blade = boot_pnode_to_blade(pnode);
643 uv_node_to_blade[nid] = blade;
644 }
645
634 map_gru_high(max_pnode); 646 map_gru_high(max_pnode);
635 map_mmr_high(max_pnode); 647 map_mmr_high(max_pnode);
636 map_config_high(max_pnode); 648 map_config_high(max_pnode);
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index f63882728d91..63a88e1f987d 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -182,7 +182,8 @@ void uv_bios_init(void)
182 memcpy(&uv_systab, tab, sizeof(struct uv_systab)); 182 memcpy(&uv_systab, tab, sizeof(struct uv_systab));
183 iounmap(tab); 183 iounmap(tab);
184 184
185 printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision); 185 printk(KERN_INFO "EFI UV System Table Revision %d\n",
186 uv_systab.revision);
186} 187}
187#else /* !CONFIG_EFI */ 188#else /* !CONFIG_EFI */
188 189
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 837c2c4cc203..ecdb682ab516 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -204,7 +204,13 @@ static void drv_read(struct drv_cmd *cmd)
204 204
205static void drv_write(struct drv_cmd *cmd) 205static void drv_write(struct drv_cmd *cmd)
206{ 206{
207 int this_cpu;
208
209 this_cpu = get_cpu();
210 if (cpumask_test_cpu(this_cpu, cmd->mask))
211 do_drv_write(cmd);
207 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); 212 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
213 put_cpu();
208} 214}
209 215
210static u32 get_cur_val(const struct cpumask *mask) 216static u32 get_cur_val(const struct cpumask *mask)
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index a0f3851ef310..2e0eb4140951 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -108,40 +108,29 @@ struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
108EXPORT_SYMBOL_GPL(ucode_cpu_info); 108EXPORT_SYMBOL_GPL(ucode_cpu_info);
109 109
110#ifdef CONFIG_MICROCODE_OLD_INTERFACE 110#ifdef CONFIG_MICROCODE_OLD_INTERFACE
111struct update_for_cpu {
112 const void __user *buf;
113 size_t size;
114};
115
116static long update_for_cpu(void *_ufc)
117{
118 struct update_for_cpu *ufc = _ufc;
119 int error;
120
121 error = microcode_ops->request_microcode_user(smp_processor_id(),
122 ufc->buf, ufc->size);
123 if (error < 0)
124 return error;
125 if (!error)
126 microcode_ops->apply_microcode(smp_processor_id());
127 return error;
128}
129
130static int do_microcode_update(const void __user *buf, size_t size) 111static int do_microcode_update(const void __user *buf, size_t size)
131{ 112{
113 cpumask_t old;
132 int error = 0; 114 int error = 0;
133 int cpu; 115 int cpu;
134 struct update_for_cpu ufc = { .buf = buf, .size = size }; 116
117 old = current->cpus_allowed;
135 118
136 for_each_online_cpu(cpu) { 119 for_each_online_cpu(cpu) {
137 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 120 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
138 121
139 if (!uci->valid) 122 if (!uci->valid)
140 continue; 123 continue;
141 error = work_on_cpu(cpu, update_for_cpu, &ufc); 124
125 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
126 error = microcode_ops->request_microcode_user(cpu, buf, size);
142 if (error < 0) 127 if (error < 0)
143 break; 128 goto out;
129 if (!error)
130 microcode_ops->apply_microcode(cpu);
144 } 131 }
132out:
133 set_cpus_allowed_ptr(current, &old);
145 return error; 134 return error;
146} 135}
147 136
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index deb5ebb32c3b..ed0c33761e6d 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -25,6 +25,8 @@ static int uv_bau_retry_limit __read_mostly;
25 25
26/* position of pnode (which is nasid>>1): */ 26/* position of pnode (which is nasid>>1): */
27static int uv_nshift __read_mostly; 27static int uv_nshift __read_mostly;
28/* base pnode in this partition */
29static int uv_partition_base_pnode __read_mostly;
28 30
29static unsigned long uv_mmask __read_mostly; 31static unsigned long uv_mmask __read_mostly;
30 32
@@ -32,6 +34,34 @@ static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
32static DEFINE_PER_CPU(struct bau_control, bau_control); 34static DEFINE_PER_CPU(struct bau_control, bau_control);
33 35
34/* 36/*
37 * Determine the first node on a blade.
38 */
39static int __init blade_to_first_node(int blade)
40{
41 int node, b;
42
43 for_each_online_node(node) {
44 b = uv_node_to_blade_id(node);
45 if (blade == b)
46 return node;
47 }
48 return -1; /* shouldn't happen */
49}
50
51/*
52 * Determine the apicid of the first cpu on a blade.
53 */
54static int __init blade_to_first_apicid(int blade)
55{
56 int cpu;
57
58 for_each_present_cpu(cpu)
59 if (blade == uv_cpu_to_blade_id(cpu))
60 return per_cpu(x86_cpu_to_apicid, cpu);
61 return -1;
62}
63
64/*
35 * Free a software acknowledge hardware resource by clearing its Pending 65 * Free a software acknowledge hardware resource by clearing its Pending
36 * bit. This will return a reply to the sender. 66 * bit. This will return a reply to the sender.
37 * If the message has timed out, a reply has already been sent by the 67 * If the message has timed out, a reply has already been sent by the
@@ -67,7 +97,7 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
67 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot; 97 msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
68 cpu = uv_blade_processor_id(); 98 cpu = uv_blade_processor_id();
69 msg->number_of_cpus = 99 msg->number_of_cpus =
70 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id())); 100 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
71 this_cpu_mask = 1UL << cpu; 101 this_cpu_mask = 1UL << cpu;
72 if (msp->seen_by.bits & this_cpu_mask) 102 if (msp->seen_by.bits & this_cpu_mask)
73 return; 103 return;
@@ -215,14 +245,14 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
215 * Returns @flush_mask if some remote flushing remains to be done. The 245 * Returns @flush_mask if some remote flushing remains to be done. The
216 * mask will have some bits still set. 246 * mask will have some bits still set.
217 */ 247 */
218const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade, 248const struct cpumask *uv_flush_send_and_wait(int cpu, int this_pnode,
219 struct bau_desc *bau_desc, 249 struct bau_desc *bau_desc,
220 struct cpumask *flush_mask) 250 struct cpumask *flush_mask)
221{ 251{
222 int completion_status = 0; 252 int completion_status = 0;
223 int right_shift; 253 int right_shift;
224 int tries = 0; 254 int tries = 0;
225 int blade; 255 int pnode;
226 int bit; 256 int bit;
227 unsigned long mmr_offset; 257 unsigned long mmr_offset;
228 unsigned long index; 258 unsigned long index;
@@ -265,8 +295,8 @@ const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
265 * use the IPI method of shootdown on them. 295 * use the IPI method of shootdown on them.
266 */ 296 */
267 for_each_cpu(bit, flush_mask) { 297 for_each_cpu(bit, flush_mask) {
268 blade = uv_cpu_to_blade_id(bit); 298 pnode = uv_cpu_to_pnode(bit);
269 if (blade == this_blade) 299 if (pnode == this_pnode)
270 continue; 300 continue;
271 cpumask_clear_cpu(bit, flush_mask); 301 cpumask_clear_cpu(bit, flush_mask);
272 } 302 }
@@ -309,16 +339,16 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
309 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask); 339 struct cpumask *flush_mask = __get_cpu_var(uv_flush_tlb_mask);
310 int i; 340 int i;
311 int bit; 341 int bit;
312 int blade; 342 int pnode;
313 int uv_cpu; 343 int uv_cpu;
314 int this_blade; 344 int this_pnode;
315 int locals = 0; 345 int locals = 0;
316 struct bau_desc *bau_desc; 346 struct bau_desc *bau_desc;
317 347
318 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu)); 348 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
319 349
320 uv_cpu = uv_blade_processor_id(); 350 uv_cpu = uv_blade_processor_id();
321 this_blade = uv_numa_blade_id(); 351 this_pnode = uv_hub_info->pnode;
322 bau_desc = __get_cpu_var(bau_control).descriptor_base; 352 bau_desc = __get_cpu_var(bau_control).descriptor_base;
323 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu; 353 bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
324 354
@@ -326,13 +356,14 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
326 356
327 i = 0; 357 i = 0;
328 for_each_cpu(bit, flush_mask) { 358 for_each_cpu(bit, flush_mask) {
329 blade = uv_cpu_to_blade_id(bit); 359 pnode = uv_cpu_to_pnode(bit);
330 BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); 360 BUG_ON(pnode > (UV_DISTRIBUTION_SIZE - 1));
331 if (blade == this_blade) { 361 if (pnode == this_pnode) {
332 locals++; 362 locals++;
333 continue; 363 continue;
334 } 364 }
335 bau_node_set(blade, &bau_desc->distribution); 365 bau_node_set(pnode - uv_partition_base_pnode,
366 &bau_desc->distribution);
336 i++; 367 i++;
337 } 368 }
338 if (i == 0) { 369 if (i == 0) {
@@ -350,7 +381,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
350 bau_desc->payload.address = va; 381 bau_desc->payload.address = va;
351 bau_desc->payload.sending_cpu = cpu; 382 bau_desc->payload.sending_cpu = cpu;
352 383
353 return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask); 384 return uv_flush_send_and_wait(uv_cpu, this_pnode, bau_desc, flush_mask);
354} 385}
355 386
356/* 387/*
@@ -418,24 +449,58 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
418 set_irq_regs(old_regs); 449 set_irq_regs(old_regs);
419} 450}
420 451
452/*
453 * uv_enable_timeouts
454 *
455 * Each target blade (i.e. blades that have cpu's) needs to have
456 * shootdown message timeouts enabled. The timeout does not cause
457 * an interrupt, but causes an error message to be returned to
458 * the sender.
459 */
421static void uv_enable_timeouts(void) 460static void uv_enable_timeouts(void)
422{ 461{
423 int i;
424 int blade; 462 int blade;
425 int last_blade; 463 int nblades;
426 int pnode; 464 int pnode;
427 int cur_cpu = 0; 465 unsigned long mmr_image;
428 unsigned long apicid;
429 466
430 last_blade = -1; 467 nblades = uv_num_possible_blades();
431 for_each_online_node(i) { 468
432 blade = uv_node_to_blade_id(i); 469 for (blade = 0; blade < nblades; blade++) {
433 if (blade == last_blade) 470 if (!uv_blade_nr_possible_cpus(blade))
434 continue; 471 continue;
435 last_blade = blade; 472
436 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
437 pnode = uv_blade_to_pnode(blade); 473 pnode = uv_blade_to_pnode(blade);
438 cur_cpu += uv_blade_nr_possible_cpus(i); 474 mmr_image =
475 uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
476 /*
477 * Set the timeout period and then lock it in, in three
478 * steps; captures and locks in the period.
479 *
480 * To program the period, the SOFT_ACK_MODE must be off.
481 */
482 mmr_image &= ~((unsigned long)1 <<
483 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
484 uv_write_global_mmr64
485 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
486 /*
487 * Set the 4-bit period.
488 */
489 mmr_image &= ~((unsigned long)0xf <<
490 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
491 mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
492 UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT);
493 uv_write_global_mmr64
494 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
495 /*
496 * Subsequent reversals of the timebase bit (3) cause an
497 * immediate timeout of one or all INTD resources as
498 * indicated in bits 2:0 (7 causes all of them to timeout).
499 */
500 mmr_image |= ((unsigned long)1 <<
501 UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT);
502 uv_write_global_mmr64
503 (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
439 } 504 }
440} 505}
441 506
@@ -482,8 +547,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
482 stat->requestee, stat->onetlb, stat->alltlb, 547 stat->requestee, stat->onetlb, stat->alltlb,
483 stat->s_retry, stat->d_retry, stat->ptc_i); 548 stat->s_retry, stat->d_retry, stat->ptc_i);
484 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n", 549 seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
485 uv_read_global_mmr64(uv_blade_to_pnode 550 uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
486 (uv_cpu_to_blade_id(cpu)),
487 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE), 551 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
488 stat->sflush, stat->dflush, 552 stat->sflush, stat->dflush,
489 stat->retriesok, stat->nomsg, 553 stat->retriesok, stat->nomsg,
@@ -617,16 +681,18 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node)
617 * finish the initialization of the per-blade control structures 681 * finish the initialization of the per-blade control structures
618 */ 682 */
619static void __init 683static void __init
620uv_table_bases_finish(int blade, int node, int cur_cpu, 684uv_table_bases_finish(int blade,
621 struct bau_control *bau_tablesp, 685 struct bau_control *bau_tablesp,
622 struct bau_desc *adp) 686 struct bau_desc *adp)
623{ 687{
624 struct bau_control *bcp; 688 struct bau_control *bcp;
625 int i; 689 int cpu;
626 690
627 for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) { 691 for_each_present_cpu(cpu) {
628 bcp = (struct bau_control *)&per_cpu(bau_control, i); 692 if (blade != uv_cpu_to_blade_id(cpu))
693 continue;
629 694
695 bcp = (struct bau_control *)&per_cpu(bau_control, cpu);
630 bcp->bau_msg_head = bau_tablesp->va_queue_first; 696 bcp->bau_msg_head = bau_tablesp->va_queue_first;
631 bcp->va_queue_first = bau_tablesp->va_queue_first; 697 bcp->va_queue_first = bau_tablesp->va_queue_first;
632 bcp->va_queue_last = bau_tablesp->va_queue_last; 698 bcp->va_queue_last = bau_tablesp->va_queue_last;
@@ -649,11 +715,10 @@ uv_activation_descriptor_init(int node, int pnode)
649 struct bau_desc *adp; 715 struct bau_desc *adp;
650 struct bau_desc *ad2; 716 struct bau_desc *ad2;
651 717
652 adp = (struct bau_desc *) 718 adp = (struct bau_desc *)kmalloc_node(16384, GFP_KERNEL, node);
653 kmalloc_node(16384, GFP_KERNEL, node);
654 BUG_ON(!adp); 719 BUG_ON(!adp);
655 720
656 pa = __pa((unsigned long)adp); 721 pa = uv_gpa(adp); /* need the real nasid*/
657 n = pa >> uv_nshift; 722 n = pa >> uv_nshift;
658 m = pa & uv_mmask; 723 m = pa & uv_mmask;
659 724
@@ -667,8 +732,12 @@ uv_activation_descriptor_init(int node, int pnode)
667 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) { 732 for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
668 memset(ad2, 0, sizeof(struct bau_desc)); 733 memset(ad2, 0, sizeof(struct bau_desc));
669 ad2->header.sw_ack_flag = 1; 734 ad2->header.sw_ack_flag = 1;
670 ad2->header.base_dest_nodeid = 735 /*
671 uv_blade_to_pnode(uv_cpu_to_blade_id(0)); 736 * base_dest_nodeid is the first node in the partition, so
737 * the bit map will indicate partition-relative node numbers.
738 * note that base_dest_nodeid is actually a nasid.
739 */
740 ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
672 ad2->header.command = UV_NET_ENDPOINT_INTD; 741 ad2->header.command = UV_NET_ENDPOINT_INTD;
673 ad2->header.int_both = 1; 742 ad2->header.int_both = 1;
674 /* 743 /*
@@ -686,6 +755,8 @@ static struct bau_payload_queue_entry * __init
686uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp) 755uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
687{ 756{
688 struct bau_payload_queue_entry *pqp; 757 struct bau_payload_queue_entry *pqp;
758 unsigned long pa;
759 int pn;
689 char *cp; 760 char *cp;
690 761
691 pqp = (struct bau_payload_queue_entry *) kmalloc_node( 762 pqp = (struct bau_payload_queue_entry *) kmalloc_node(
@@ -696,10 +767,14 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
696 cp = (char *)pqp + 31; 767 cp = (char *)pqp + 31;
697 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5); 768 pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
698 bau_tablesp->va_queue_first = pqp; 769 bau_tablesp->va_queue_first = pqp;
770 /*
771 * need the pnode of where the memory was really allocated
772 */
773 pa = uv_gpa(pqp);
774 pn = pa >> uv_nshift;
699 uv_write_global_mmr64(pnode, 775 uv_write_global_mmr64(pnode,
700 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, 776 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
701 ((unsigned long)pnode << 777 ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
702 UV_PAYLOADQ_PNODE_SHIFT) |
703 uv_physnodeaddr(pqp)); 778 uv_physnodeaddr(pqp));
704 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, 779 uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
705 uv_physnodeaddr(pqp)); 780 uv_physnodeaddr(pqp));
@@ -715,8 +790,9 @@ uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
715/* 790/*
716 * Initialization of each UV blade's structures 791 * Initialization of each UV blade's structures
717 */ 792 */
718static int __init uv_init_blade(int blade, int node, int cur_cpu) 793static int __init uv_init_blade(int blade)
719{ 794{
795 int node;
720 int pnode; 796 int pnode;
721 unsigned long pa; 797 unsigned long pa;
722 unsigned long apicid; 798 unsigned long apicid;
@@ -724,16 +800,17 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu)
724 struct bau_payload_queue_entry *pqp; 800 struct bau_payload_queue_entry *pqp;
725 struct bau_control *bau_tablesp; 801 struct bau_control *bau_tablesp;
726 802
803 node = blade_to_first_node(blade);
727 bau_tablesp = uv_table_bases_init(blade, node); 804 bau_tablesp = uv_table_bases_init(blade, node);
728 pnode = uv_blade_to_pnode(blade); 805 pnode = uv_blade_to_pnode(blade);
729 adp = uv_activation_descriptor_init(node, pnode); 806 adp = uv_activation_descriptor_init(node, pnode);
730 pqp = uv_payload_queue_init(node, pnode, bau_tablesp); 807 pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
731 uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp); 808 uv_table_bases_finish(blade, bau_tablesp, adp);
732 /* 809 /*
733 * the below initialization can't be in firmware because the 810 * the below initialization can't be in firmware because the
734 * messaging IRQ will be determined by the OS 811 * messaging IRQ will be determined by the OS
735 */ 812 */
736 apicid = per_cpu(x86_cpu_to_apicid, cur_cpu); 813 apicid = blade_to_first_apicid(blade);
737 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); 814 pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
738 if ((pa & 0xff) != UV_BAU_MESSAGE) { 815 if ((pa & 0xff) != UV_BAU_MESSAGE) {
739 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 816 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
@@ -748,9 +825,7 @@ static int __init uv_init_blade(int blade, int node, int cur_cpu)
748static int __init uv_bau_init(void) 825static int __init uv_bau_init(void)
749{ 826{
750 int blade; 827 int blade;
751 int node;
752 int nblades; 828 int nblades;
753 int last_blade;
754 int cur_cpu; 829 int cur_cpu;
755 830
756 if (!is_uv_system()) 831 if (!is_uv_system())
@@ -763,29 +838,21 @@ static int __init uv_bau_init(void)
763 uv_bau_retry_limit = 1; 838 uv_bau_retry_limit = 1;
764 uv_nshift = uv_hub_info->n_val; 839 uv_nshift = uv_hub_info->n_val;
765 uv_mmask = (1UL << uv_hub_info->n_val) - 1; 840 uv_mmask = (1UL << uv_hub_info->n_val) - 1;
766 nblades = 0; 841 nblades = uv_num_possible_blades();
767 last_blade = -1; 842
768 cur_cpu = 0;
769 for_each_online_node(node) {
770 blade = uv_node_to_blade_id(node);
771 if (blade == last_blade)
772 continue;
773 last_blade = blade;
774 nblades++;
775 }
776 uv_bau_table_bases = (struct bau_control **) 843 uv_bau_table_bases = (struct bau_control **)
777 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL); 844 kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
778 BUG_ON(!uv_bau_table_bases); 845 BUG_ON(!uv_bau_table_bases);
779 846
780 last_blade = -1; 847 uv_partition_base_pnode = 0x7fffffff;
781 for_each_online_node(node) { 848 for (blade = 0; blade < nblades; blade++)
782 blade = uv_node_to_blade_id(node); 849 if (uv_blade_nr_possible_cpus(blade) &&
783 if (blade == last_blade) 850 (uv_blade_to_pnode(blade) < uv_partition_base_pnode))
784 continue; 851 uv_partition_base_pnode = uv_blade_to_pnode(blade);
785 last_blade = blade; 852 for (blade = 0; blade < nblades; blade++)
786 uv_init_blade(blade, node, cur_cpu); 853 if (uv_blade_nr_possible_cpus(blade))
787 cur_cpu += uv_blade_nr_possible_cpus(blade); 854 uv_init_blade(blade);
788 } 855
789 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1); 856 alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
790 uv_enable_timeouts(); 857 uv_enable_timeouts();
791 858
diff --git a/arch/x86/kernel/uv_sysfs.c b/arch/x86/kernel/uv_sysfs.c
index 67f9b9dbf800..36afb98675a4 100644
--- a/arch/x86/kernel/uv_sysfs.c
+++ b/arch/x86/kernel/uv_sysfs.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/sysdev.h> 22#include <linux/sysdev.h>
23#include <asm/uv/bios.h> 23#include <asm/uv/bios.h>
24#include <asm/uv/uv.h>
24 25
25struct kobject *sgi_uv_kobj; 26struct kobject *sgi_uv_kobj;
26 27
@@ -47,6 +48,9 @@ static int __init sgi_uv_sysfs_init(void)
47{ 48{
48 unsigned long ret; 49 unsigned long ret;
49 50
51 if (!is_uv_system())
52 return -ENODEV;
53
50 if (!sgi_uv_kobj) 54 if (!sgi_uv_kobj)
51 sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj); 55 sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
52 if (!sgi_uv_kobj) { 56 if (!sgi_uv_kobj) {
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 631f6f44460a..c48fa670d221 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -17,9 +17,6 @@
17#include <linux/rbtree.h> 17#include <linux/rbtree.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19 19
20#define REQ_SYNC 1
21#define REQ_ASYNC 0
22
23/* 20/*
24 * See Documentation/block/as-iosched.txt 21 * See Documentation/block/as-iosched.txt
25 */ 22 */
@@ -93,7 +90,7 @@ struct as_data {
93 struct list_head fifo_list[2]; 90 struct list_head fifo_list[2];
94 91
95 struct request *next_rq[2]; /* next in sort order */ 92 struct request *next_rq[2]; /* next in sort order */
96 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ 93 sector_t last_sector[2]; /* last SYNC & ASYNC sectors */
97 94
98 unsigned long exit_prob; /* probability a task will exit while 95 unsigned long exit_prob; /* probability a task will exit while
99 being waited on */ 96 being waited on */
@@ -109,7 +106,7 @@ struct as_data {
109 unsigned long last_check_fifo[2]; 106 unsigned long last_check_fifo[2];
110 int changed_batch; /* 1: waiting for old batch to end */ 107 int changed_batch; /* 1: waiting for old batch to end */
111 int new_batch; /* 1: waiting on first read complete */ 108 int new_batch; /* 1: waiting on first read complete */
112 int batch_data_dir; /* current batch REQ_SYNC / REQ_ASYNC */ 109 int batch_data_dir; /* current batch SYNC / ASYNC */
113 int write_batch_count; /* max # of reqs in a write batch */ 110 int write_batch_count; /* max # of reqs in a write batch */
114 int current_write_count; /* how many requests left this batch */ 111 int current_write_count; /* how many requests left this batch */
115 int write_batch_idled; /* has the write batch gone idle? */ 112 int write_batch_idled; /* has the write batch gone idle? */
@@ -554,7 +551,7 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
554 if (aic == NULL) 551 if (aic == NULL)
555 return; 552 return;
556 553
557 if (data_dir == REQ_SYNC) { 554 if (data_dir == BLK_RW_SYNC) {
558 unsigned long in_flight = atomic_read(&aic->nr_queued) 555 unsigned long in_flight = atomic_read(&aic->nr_queued)
559 + atomic_read(&aic->nr_dispatched); 556 + atomic_read(&aic->nr_dispatched);
560 spin_lock(&aic->lock); 557 spin_lock(&aic->lock);
@@ -811,7 +808,7 @@ static void as_update_rq(struct as_data *ad, struct request *rq)
811 */ 808 */
812static void update_write_batch(struct as_data *ad) 809static void update_write_batch(struct as_data *ad)
813{ 810{
814 unsigned long batch = ad->batch_expire[REQ_ASYNC]; 811 unsigned long batch = ad->batch_expire[BLK_RW_ASYNC];
815 long write_time; 812 long write_time;
816 813
817 write_time = (jiffies - ad->current_batch_expires) + batch; 814 write_time = (jiffies - ad->current_batch_expires) + batch;
@@ -855,7 +852,7 @@ static void as_completed_request(struct request_queue *q, struct request *rq)
855 kblockd_schedule_work(q, &ad->antic_work); 852 kblockd_schedule_work(q, &ad->antic_work);
856 ad->changed_batch = 0; 853 ad->changed_batch = 0;
857 854
858 if (ad->batch_data_dir == REQ_SYNC) 855 if (ad->batch_data_dir == BLK_RW_SYNC)
859 ad->new_batch = 1; 856 ad->new_batch = 1;
860 } 857 }
861 WARN_ON(ad->nr_dispatched == 0); 858 WARN_ON(ad->nr_dispatched == 0);
@@ -869,7 +866,7 @@ static void as_completed_request(struct request_queue *q, struct request *rq)
869 if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) { 866 if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) {
870 update_write_batch(ad); 867 update_write_batch(ad);
871 ad->current_batch_expires = jiffies + 868 ad->current_batch_expires = jiffies +
872 ad->batch_expire[REQ_SYNC]; 869 ad->batch_expire[BLK_RW_SYNC];
873 ad->new_batch = 0; 870 ad->new_batch = 0;
874 } 871 }
875 872
@@ -960,7 +957,7 @@ static inline int as_batch_expired(struct as_data *ad)
960 if (ad->changed_batch || ad->new_batch) 957 if (ad->changed_batch || ad->new_batch)
961 return 0; 958 return 0;
962 959
963 if (ad->batch_data_dir == REQ_SYNC) 960 if (ad->batch_data_dir == BLK_RW_SYNC)
964 /* TODO! add a check so a complete fifo gets written? */ 961 /* TODO! add a check so a complete fifo gets written? */
965 return time_after(jiffies, ad->current_batch_expires); 962 return time_after(jiffies, ad->current_batch_expires);
966 963
@@ -986,7 +983,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
986 */ 983 */
987 ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; 984 ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;
988 985
989 if (data_dir == REQ_SYNC) { 986 if (data_dir == BLK_RW_SYNC) {
990 struct io_context *ioc = RQ_IOC(rq); 987 struct io_context *ioc = RQ_IOC(rq);
991 /* In case we have to anticipate after this */ 988 /* In case we have to anticipate after this */
992 copy_io_context(&ad->io_context, &ioc); 989 copy_io_context(&ad->io_context, &ioc);
@@ -1025,41 +1022,41 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
1025static int as_dispatch_request(struct request_queue *q, int force) 1022static int as_dispatch_request(struct request_queue *q, int force)
1026{ 1023{
1027 struct as_data *ad = q->elevator->elevator_data; 1024 struct as_data *ad = q->elevator->elevator_data;
1028 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); 1025 const int reads = !list_empty(&ad->fifo_list[BLK_RW_SYNC]);
1029 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); 1026 const int writes = !list_empty(&ad->fifo_list[BLK_RW_ASYNC]);
1030 struct request *rq; 1027 struct request *rq;
1031 1028
1032 if (unlikely(force)) { 1029 if (unlikely(force)) {
1033 /* 1030 /*
1034 * Forced dispatch, accounting is useless. Reset 1031 * Forced dispatch, accounting is useless. Reset
1035 * accounting states and dump fifo_lists. Note that 1032 * accounting states and dump fifo_lists. Note that
1036 * batch_data_dir is reset to REQ_SYNC to avoid 1033 * batch_data_dir is reset to BLK_RW_SYNC to avoid
1037 * screwing write batch accounting as write batch 1034 * screwing write batch accounting as write batch
1038 * accounting occurs on W->R transition. 1035 * accounting occurs on W->R transition.
1039 */ 1036 */
1040 int dispatched = 0; 1037 int dispatched = 0;
1041 1038
1042 ad->batch_data_dir = REQ_SYNC; 1039 ad->batch_data_dir = BLK_RW_SYNC;
1043 ad->changed_batch = 0; 1040 ad->changed_batch = 0;
1044 ad->new_batch = 0; 1041 ad->new_batch = 0;
1045 1042
1046 while (ad->next_rq[REQ_SYNC]) { 1043 while (ad->next_rq[BLK_RW_SYNC]) {
1047 as_move_to_dispatch(ad, ad->next_rq[REQ_SYNC]); 1044 as_move_to_dispatch(ad, ad->next_rq[BLK_RW_SYNC]);
1048 dispatched++; 1045 dispatched++;
1049 } 1046 }
1050 ad->last_check_fifo[REQ_SYNC] = jiffies; 1047 ad->last_check_fifo[BLK_RW_SYNC] = jiffies;
1051 1048
1052 while (ad->next_rq[REQ_ASYNC]) { 1049 while (ad->next_rq[BLK_RW_ASYNC]) {
1053 as_move_to_dispatch(ad, ad->next_rq[REQ_ASYNC]); 1050 as_move_to_dispatch(ad, ad->next_rq[BLK_RW_ASYNC]);
1054 dispatched++; 1051 dispatched++;
1055 } 1052 }
1056 ad->last_check_fifo[REQ_ASYNC] = jiffies; 1053 ad->last_check_fifo[BLK_RW_ASYNC] = jiffies;
1057 1054
1058 return dispatched; 1055 return dispatched;
1059 } 1056 }
1060 1057
1061 /* Signal that the write batch was uncontended, so we can't time it */ 1058 /* Signal that the write batch was uncontended, so we can't time it */
1062 if (ad->batch_data_dir == REQ_ASYNC && !reads) { 1059 if (ad->batch_data_dir == BLK_RW_ASYNC && !reads) {
1063 if (ad->current_write_count == 0 || !writes) 1060 if (ad->current_write_count == 0 || !writes)
1064 ad->write_batch_idled = 1; 1061 ad->write_batch_idled = 1;
1065 } 1062 }
@@ -1076,8 +1073,8 @@ static int as_dispatch_request(struct request_queue *q, int force)
1076 */ 1073 */
1077 rq = ad->next_rq[ad->batch_data_dir]; 1074 rq = ad->next_rq[ad->batch_data_dir];
1078 1075
1079 if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) { 1076 if (ad->batch_data_dir == BLK_RW_SYNC && ad->antic_expire) {
1080 if (as_fifo_expired(ad, REQ_SYNC)) 1077 if (as_fifo_expired(ad, BLK_RW_SYNC))
1081 goto fifo_expired; 1078 goto fifo_expired;
1082 1079
1083 if (as_can_anticipate(ad, rq)) { 1080 if (as_can_anticipate(ad, rq)) {
@@ -1090,7 +1087,7 @@ static int as_dispatch_request(struct request_queue *q, int force)
1090 /* we have a "next request" */ 1087 /* we have a "next request" */
1091 if (reads && !writes) 1088 if (reads && !writes)
1092 ad->current_batch_expires = 1089 ad->current_batch_expires =
1093 jiffies + ad->batch_expire[REQ_SYNC]; 1090 jiffies + ad->batch_expire[BLK_RW_SYNC];
1094 goto dispatch_request; 1091 goto dispatch_request;
1095 } 1092 }
1096 } 1093 }
@@ -1101,20 +1098,20 @@ static int as_dispatch_request(struct request_queue *q, int force)
1101 */ 1098 */
1102 1099
1103 if (reads) { 1100 if (reads) {
1104 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC])); 1101 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[BLK_RW_SYNC]));
1105 1102
1106 if (writes && ad->batch_data_dir == REQ_SYNC) 1103 if (writes && ad->batch_data_dir == BLK_RW_SYNC)
1107 /* 1104 /*
1108 * Last batch was a read, switch to writes 1105 * Last batch was a read, switch to writes
1109 */ 1106 */
1110 goto dispatch_writes; 1107 goto dispatch_writes;
1111 1108
1112 if (ad->batch_data_dir == REQ_ASYNC) { 1109 if (ad->batch_data_dir == BLK_RW_ASYNC) {
1113 WARN_ON(ad->new_batch); 1110 WARN_ON(ad->new_batch);
1114 ad->changed_batch = 1; 1111 ad->changed_batch = 1;
1115 } 1112 }
1116 ad->batch_data_dir = REQ_SYNC; 1113 ad->batch_data_dir = BLK_RW_SYNC;
1117 rq = rq_entry_fifo(ad->fifo_list[REQ_SYNC].next); 1114 rq = rq_entry_fifo(ad->fifo_list[BLK_RW_SYNC].next);
1118 ad->last_check_fifo[ad->batch_data_dir] = jiffies; 1115 ad->last_check_fifo[ad->batch_data_dir] = jiffies;
1119 goto dispatch_request; 1116 goto dispatch_request;
1120 } 1117 }
@@ -1125,9 +1122,9 @@ static int as_dispatch_request(struct request_queue *q, int force)
1125 1122
1126 if (writes) { 1123 if (writes) {
1127dispatch_writes: 1124dispatch_writes:
1128 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC])); 1125 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[BLK_RW_ASYNC]));
1129 1126
1130 if (ad->batch_data_dir == REQ_SYNC) { 1127 if (ad->batch_data_dir == BLK_RW_SYNC) {
1131 ad->changed_batch = 1; 1128 ad->changed_batch = 1;
1132 1129
1133 /* 1130 /*
@@ -1137,11 +1134,11 @@ dispatch_writes:
1137 */ 1134 */
1138 ad->new_batch = 0; 1135 ad->new_batch = 0;
1139 } 1136 }
1140 ad->batch_data_dir = REQ_ASYNC; 1137 ad->batch_data_dir = BLK_RW_ASYNC;
1141 ad->current_write_count = ad->write_batch_count; 1138 ad->current_write_count = ad->write_batch_count;
1142 ad->write_batch_idled = 0; 1139 ad->write_batch_idled = 0;
1143 rq = rq_entry_fifo(ad->fifo_list[REQ_ASYNC].next); 1140 rq = rq_entry_fifo(ad->fifo_list[BLK_RW_ASYNC].next);
1144 ad->last_check_fifo[REQ_ASYNC] = jiffies; 1141 ad->last_check_fifo[BLK_RW_ASYNC] = jiffies;
1145 goto dispatch_request; 1142 goto dispatch_request;
1146 } 1143 }
1147 1144
@@ -1164,9 +1161,9 @@ fifo_expired:
1164 if (ad->nr_dispatched) 1161 if (ad->nr_dispatched)
1165 return 0; 1162 return 0;
1166 1163
1167 if (ad->batch_data_dir == REQ_ASYNC) 1164 if (ad->batch_data_dir == BLK_RW_ASYNC)
1168 ad->current_batch_expires = jiffies + 1165 ad->current_batch_expires = jiffies +
1169 ad->batch_expire[REQ_ASYNC]; 1166 ad->batch_expire[BLK_RW_ASYNC];
1170 else 1167 else
1171 ad->new_batch = 1; 1168 ad->new_batch = 1;
1172 1169
@@ -1238,8 +1235,8 @@ static int as_queue_empty(struct request_queue *q)
1238{ 1235{
1239 struct as_data *ad = q->elevator->elevator_data; 1236 struct as_data *ad = q->elevator->elevator_data;
1240 1237
1241 return list_empty(&ad->fifo_list[REQ_ASYNC]) 1238 return list_empty(&ad->fifo_list[BLK_RW_ASYNC])
1242 && list_empty(&ad->fifo_list[REQ_SYNC]); 1239 && list_empty(&ad->fifo_list[BLK_RW_SYNC]);
1243} 1240}
1244 1241
1245static int 1242static int
@@ -1346,8 +1343,8 @@ static void as_exit_queue(struct elevator_queue *e)
1346 del_timer_sync(&ad->antic_timer); 1343 del_timer_sync(&ad->antic_timer);
1347 cancel_work_sync(&ad->antic_work); 1344 cancel_work_sync(&ad->antic_work);
1348 1345
1349 BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC])); 1346 BUG_ON(!list_empty(&ad->fifo_list[BLK_RW_SYNC]));
1350 BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC])); 1347 BUG_ON(!list_empty(&ad->fifo_list[BLK_RW_ASYNC]));
1351 1348
1352 put_io_context(ad->io_context); 1349 put_io_context(ad->io_context);
1353 kfree(ad); 1350 kfree(ad);
@@ -1372,18 +1369,18 @@ static void *as_init_queue(struct request_queue *q)
1372 init_timer(&ad->antic_timer); 1369 init_timer(&ad->antic_timer);
1373 INIT_WORK(&ad->antic_work, as_work_handler); 1370 INIT_WORK(&ad->antic_work, as_work_handler);
1374 1371
1375 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); 1372 INIT_LIST_HEAD(&ad->fifo_list[BLK_RW_SYNC]);
1376 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); 1373 INIT_LIST_HEAD(&ad->fifo_list[BLK_RW_ASYNC]);
1377 ad->sort_list[REQ_SYNC] = RB_ROOT; 1374 ad->sort_list[BLK_RW_SYNC] = RB_ROOT;
1378 ad->sort_list[REQ_ASYNC] = RB_ROOT; 1375 ad->sort_list[BLK_RW_ASYNC] = RB_ROOT;
1379 ad->fifo_expire[REQ_SYNC] = default_read_expire; 1376 ad->fifo_expire[BLK_RW_SYNC] = default_read_expire;
1380 ad->fifo_expire[REQ_ASYNC] = default_write_expire; 1377 ad->fifo_expire[BLK_RW_ASYNC] = default_write_expire;
1381 ad->antic_expire = default_antic_expire; 1378 ad->antic_expire = default_antic_expire;
1382 ad->batch_expire[REQ_SYNC] = default_read_batch_expire; 1379 ad->batch_expire[BLK_RW_SYNC] = default_read_batch_expire;
1383 ad->batch_expire[REQ_ASYNC] = default_write_batch_expire; 1380 ad->batch_expire[BLK_RW_ASYNC] = default_write_batch_expire;
1384 1381
1385 ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC]; 1382 ad->current_batch_expires = jiffies + ad->batch_expire[BLK_RW_SYNC];
1386 ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10; 1383 ad->write_batch_count = ad->batch_expire[BLK_RW_ASYNC] / 10;
1387 if (ad->write_batch_count < 2) 1384 if (ad->write_batch_count < 2)
1388 ad->write_batch_count = 2; 1385 ad->write_batch_count = 2;
1389 1386
@@ -1432,11 +1429,11 @@ static ssize_t __FUNC(struct elevator_queue *e, char *page) \
1432 struct as_data *ad = e->elevator_data; \ 1429 struct as_data *ad = e->elevator_data; \
1433 return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ 1430 return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
1434} 1431}
1435SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]); 1432SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[BLK_RW_SYNC]);
1436SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]); 1433SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[BLK_RW_ASYNC]);
1437SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire); 1434SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire);
1438SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]); 1435SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[BLK_RW_SYNC]);
1439SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]); 1436SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[BLK_RW_ASYNC]);
1440#undef SHOW_FUNCTION 1437#undef SHOW_FUNCTION
1441 1438
1442#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ 1439#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
@@ -1451,13 +1448,14 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)
1451 *(__PTR) = msecs_to_jiffies(*(__PTR)); \ 1448 *(__PTR) = msecs_to_jiffies(*(__PTR)); \
1452 return ret; \ 1449 return ret; \
1453} 1450}
1454STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX); 1451STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[BLK_RW_SYNC], 0, INT_MAX);
1455STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX); 1452STORE_FUNCTION(as_write_expire_store,
1453 &ad->fifo_expire[BLK_RW_ASYNC], 0, INT_MAX);
1456STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX); 1454STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX);
1457STORE_FUNCTION(as_read_batch_expire_store, 1455STORE_FUNCTION(as_read_batch_expire_store,
1458 &ad->batch_expire[REQ_SYNC], 0, INT_MAX); 1456 &ad->batch_expire[BLK_RW_SYNC], 0, INT_MAX);
1459STORE_FUNCTION(as_write_batch_expire_store, 1457STORE_FUNCTION(as_write_batch_expire_store,
1460 &ad->batch_expire[REQ_ASYNC], 0, INT_MAX); 1458 &ad->batch_expire[BLK_RW_ASYNC], 0, INT_MAX);
1461#undef STORE_FUNCTION 1459#undef STORE_FUNCTION
1462 1460
1463#define AS_ATTR(name) \ 1461#define AS_ATTR(name) \
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index f7dae57e6cab..20b4111fa050 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -319,9 +319,6 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
319 return -ENXIO; 319 return -ENXIO;
320 320
321 bio = bio_alloc(GFP_KERNEL, 0); 321 bio = bio_alloc(GFP_KERNEL, 0);
322 if (!bio)
323 return -ENOMEM;
324
325 bio->bi_end_io = bio_end_empty_barrier; 322 bio->bi_end_io = bio_end_empty_barrier;
326 bio->bi_private = &wait; 323 bio->bi_private = &wait;
327 bio->bi_bdev = bdev; 324 bio->bi_bdev = bdev;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 73f36beff5cd..cac4e9febe6a 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -209,14 +209,14 @@ static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
209 ssize_t ret = queue_var_store(&stats, page, count); 209 ssize_t ret = queue_var_store(&stats, page, count);
210 210
211 spin_lock_irq(q->queue_lock); 211 spin_lock_irq(q->queue_lock);
212 elv_quisce_start(q); 212 elv_quiesce_start(q);
213 213
214 if (stats) 214 if (stats)
215 queue_flag_set(QUEUE_FLAG_IO_STAT, q); 215 queue_flag_set(QUEUE_FLAG_IO_STAT, q);
216 else 216 else
217 queue_flag_clear(QUEUE_FLAG_IO_STAT, q); 217 queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
218 218
219 elv_quisce_end(q); 219 elv_quiesce_end(q);
220 spin_unlock_irq(q->queue_lock); 220 spin_unlock_irq(q->queue_lock);
221 221
222 return ret; 222 return ret;
diff --git a/block/blk.h b/block/blk.h
index 24fcaeeaf620..5dfc41267a08 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -70,8 +70,8 @@ void blk_queue_congestion_threshold(struct request_queue *q);
70 70
71int blk_dev_init(void); 71int blk_dev_init(void);
72 72
73void elv_quisce_start(struct request_queue *q); 73void elv_quiesce_start(struct request_queue *q);
74void elv_quisce_end(struct request_queue *q); 74void elv_quiesce_end(struct request_queue *q);
75 75
76 76
77/* 77/*
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index a4809de6fea6..0d3b70de3d80 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -56,9 +56,6 @@ static DEFINE_SPINLOCK(ioc_gone_lock);
56#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 56#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
57#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 57#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
58 58
59#define ASYNC (0)
60#define SYNC (1)
61
62#define sample_valid(samples) ((samples) > 80) 59#define sample_valid(samples) ((samples) > 80)
63 60
64/* 61/*
@@ -83,6 +80,14 @@ struct cfq_data {
83 * rr list of queues with requests and the count of them 80 * rr list of queues with requests and the count of them
84 */ 81 */
85 struct cfq_rb_root service_tree; 82 struct cfq_rb_root service_tree;
83
84 /*
85 * Each priority tree is sorted by next_request position. These
86 * trees are used when determining if two or more queues are
87 * interleaving requests (see cfq_close_cooperator).
88 */
89 struct rb_root prio_trees[CFQ_PRIO_LISTS];
90
86 unsigned int busy_queues; 91 unsigned int busy_queues;
87 /* 92 /*
88 * Used to track any pending rt requests so we can pre-empt current 93 * Used to track any pending rt requests so we can pre-empt current
@@ -147,6 +152,8 @@ struct cfq_queue {
147 struct rb_node rb_node; 152 struct rb_node rb_node;
148 /* service_tree key */ 153 /* service_tree key */
149 unsigned long rb_key; 154 unsigned long rb_key;
155 /* prio tree member */
156 struct rb_node p_node;
150 /* sorted list of pending requests */ 157 /* sorted list of pending requests */
151 struct rb_root sort_list; 158 struct rb_root sort_list;
152 /* if fifo isn't expired, next request to serve */ 159 /* if fifo isn't expired, next request to serve */
@@ -185,6 +192,7 @@ enum cfqq_state_flags {
185 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 192 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
186 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 193 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
187 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 194 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
195 CFQ_CFQQ_FLAG_coop, /* has done a coop jump of the queue */
188}; 196};
189 197
190#define CFQ_CFQQ_FNS(name) \ 198#define CFQ_CFQQ_FNS(name) \
@@ -211,6 +219,7 @@ CFQ_CFQQ_FNS(idle_window);
211CFQ_CFQQ_FNS(prio_changed); 219CFQ_CFQQ_FNS(prio_changed);
212CFQ_CFQQ_FNS(slice_new); 220CFQ_CFQQ_FNS(slice_new);
213CFQ_CFQQ_FNS(sync); 221CFQ_CFQQ_FNS(sync);
222CFQ_CFQQ_FNS(coop);
214#undef CFQ_CFQQ_FNS 223#undef CFQ_CFQQ_FNS
215 224
216#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 225#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
@@ -419,13 +428,17 @@ static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
419 return NULL; 428 return NULL;
420} 429}
421 430
431static void rb_erase_init(struct rb_node *n, struct rb_root *root)
432{
433 rb_erase(n, root);
434 RB_CLEAR_NODE(n);
435}
436
422static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) 437static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
423{ 438{
424 if (root->left == n) 439 if (root->left == n)
425 root->left = NULL; 440 root->left = NULL;
426 441 rb_erase_init(n, &root->rb);
427 rb_erase(n, &root->rb);
428 RB_CLEAR_NODE(n);
429} 442}
430 443
431/* 444/*
@@ -470,8 +483,8 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
470 * requests waiting to be processed. It is sorted in the order that 483 * requests waiting to be processed. It is sorted in the order that
471 * we will service the queues. 484 * we will service the queues.
472 */ 485 */
473static void cfq_service_tree_add(struct cfq_data *cfqd, 486static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
474 struct cfq_queue *cfqq, int add_front) 487 int add_front)
475{ 488{
476 struct rb_node **p, *parent; 489 struct rb_node **p, *parent;
477 struct cfq_queue *__cfqq; 490 struct cfq_queue *__cfqq;
@@ -544,6 +557,63 @@ static void cfq_service_tree_add(struct cfq_data *cfqd,
544 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); 557 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
545} 558}
546 559
560static struct cfq_queue *
561cfq_prio_tree_lookup(struct cfq_data *cfqd, int ioprio, sector_t sector,
562 struct rb_node **ret_parent, struct rb_node ***rb_link)
563{
564 struct rb_root *root = &cfqd->prio_trees[ioprio];
565 struct rb_node **p, *parent;
566 struct cfq_queue *cfqq = NULL;
567
568 parent = NULL;
569 p = &root->rb_node;
570 while (*p) {
571 struct rb_node **n;
572
573 parent = *p;
574 cfqq = rb_entry(parent, struct cfq_queue, p_node);
575
576 /*
577 * Sort strictly based on sector. Smallest to the left,
578 * largest to the right.
579 */
580 if (sector > cfqq->next_rq->sector)
581 n = &(*p)->rb_right;
582 else if (sector < cfqq->next_rq->sector)
583 n = &(*p)->rb_left;
584 else
585 break;
586 p = n;
587 }
588
589 *ret_parent = parent;
590 if (rb_link)
591 *rb_link = p;
592 return NULL;
593}
594
595static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
596{
597 struct rb_root *root = &cfqd->prio_trees[cfqq->ioprio];
598 struct rb_node **p, *parent;
599 struct cfq_queue *__cfqq;
600
601 if (!RB_EMPTY_NODE(&cfqq->p_node))
602 rb_erase_init(&cfqq->p_node, root);
603
604 if (cfq_class_idle(cfqq))
605 return;
606 if (!cfqq->next_rq)
607 return;
608
609 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->ioprio, cfqq->next_rq->sector,
610 &parent, &p);
611 BUG_ON(__cfqq);
612
613 rb_link_node(&cfqq->p_node, parent, p);
614 rb_insert_color(&cfqq->p_node, root);
615}
616
547/* 617/*
548 * Update cfqq's position in the service tree. 618 * Update cfqq's position in the service tree.
549 */ 619 */
@@ -552,8 +622,10 @@ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
552 /* 622 /*
553 * Resorting requires the cfqq to be on the RR list already. 623 * Resorting requires the cfqq to be on the RR list already.
554 */ 624 */
555 if (cfq_cfqq_on_rr(cfqq)) 625 if (cfq_cfqq_on_rr(cfqq)) {
556 cfq_service_tree_add(cfqd, cfqq, 0); 626 cfq_service_tree_add(cfqd, cfqq, 0);
627 cfq_prio_tree_add(cfqd, cfqq);
628 }
557} 629}
558 630
559/* 631/*
@@ -584,6 +656,8 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
584 656
585 if (!RB_EMPTY_NODE(&cfqq->rb_node)) 657 if (!RB_EMPTY_NODE(&cfqq->rb_node))
586 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 658 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
659 if (!RB_EMPTY_NODE(&cfqq->p_node))
660 rb_erase_init(&cfqq->p_node, &cfqd->prio_trees[cfqq->ioprio]);
587 661
588 BUG_ON(!cfqd->busy_queues); 662 BUG_ON(!cfqd->busy_queues);
589 cfqd->busy_queues--; 663 cfqd->busy_queues--;
@@ -613,7 +687,7 @@ static void cfq_add_rq_rb(struct request *rq)
613{ 687{
614 struct cfq_queue *cfqq = RQ_CFQQ(rq); 688 struct cfq_queue *cfqq = RQ_CFQQ(rq);
615 struct cfq_data *cfqd = cfqq->cfqd; 689 struct cfq_data *cfqd = cfqq->cfqd;
616 struct request *__alias; 690 struct request *__alias, *prev;
617 691
618 cfqq->queued[rq_is_sync(rq)]++; 692 cfqq->queued[rq_is_sync(rq)]++;
619 693
@@ -630,7 +704,15 @@ static void cfq_add_rq_rb(struct request *rq)
630 /* 704 /*
631 * check if this request is a better next-serve candidate 705 * check if this request is a better next-serve candidate
632 */ 706 */
707 prev = cfqq->next_rq;
633 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq); 708 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
709
710 /*
711 * adjust priority tree position, if ->next_rq changes
712 */
713 if (prev != cfqq->next_rq)
714 cfq_prio_tree_add(cfqd, cfqq);
715
634 BUG_ON(!cfqq->next_rq); 716 BUG_ON(!cfqq->next_rq);
635} 717}
636 718
@@ -843,11 +925,15 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
843/* 925/*
844 * Get and set a new active queue for service. 926 * Get and set a new active queue for service.
845 */ 927 */
846static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) 928static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
929 struct cfq_queue *cfqq)
847{ 930{
848 struct cfq_queue *cfqq; 931 if (!cfqq) {
932 cfqq = cfq_get_next_queue(cfqd);
933 if (cfqq)
934 cfq_clear_cfqq_coop(cfqq);
935 }
849 936
850 cfqq = cfq_get_next_queue(cfqd);
851 __cfq_set_active_queue(cfqd, cfqq); 937 __cfq_set_active_queue(cfqd, cfqq);
852 return cfqq; 938 return cfqq;
853} 939}
@@ -871,17 +957,89 @@ static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
871 return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean; 957 return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
872} 958}
873 959
874static int cfq_close_cooperator(struct cfq_data *cfq_data, 960static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
875 struct cfq_queue *cfqq) 961 struct cfq_queue *cur_cfqq)
962{
963 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->ioprio];
964 struct rb_node *parent, *node;
965 struct cfq_queue *__cfqq;
966 sector_t sector = cfqd->last_position;
967
968 if (RB_EMPTY_ROOT(root))
969 return NULL;
970
971 /*
972 * First, if we find a request starting at the end of the last
973 * request, choose it.
974 */
975 __cfqq = cfq_prio_tree_lookup(cfqd, cur_cfqq->ioprio,
976 sector, &parent, NULL);
977 if (__cfqq)
978 return __cfqq;
979
980 /*
981 * If the exact sector wasn't found, the parent of the NULL leaf
982 * will contain the closest sector.
983 */
984 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
985 if (cfq_rq_close(cfqd, __cfqq->next_rq))
986 return __cfqq;
987
988 if (__cfqq->next_rq->sector < sector)
989 node = rb_next(&__cfqq->p_node);
990 else
991 node = rb_prev(&__cfqq->p_node);
992 if (!node)
993 return NULL;
994
995 __cfqq = rb_entry(node, struct cfq_queue, p_node);
996 if (cfq_rq_close(cfqd, __cfqq->next_rq))
997 return __cfqq;
998
999 return NULL;
1000}
1001
1002/*
1003 * cfqd - obvious
1004 * cur_cfqq - passed in so that we don't decide that the current queue is
1005 * closely cooperating with itself.
1006 *
1007 * So, basically we're assuming that that cur_cfqq has dispatched at least
1008 * one request, and that cfqd->last_position reflects a position on the disk
1009 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
1010 * assumption.
1011 */
1012static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1013 struct cfq_queue *cur_cfqq,
1014 int probe)
876{ 1015{
1016 struct cfq_queue *cfqq;
1017
1018 /*
1019 * A valid cfq_io_context is necessary to compare requests against
1020 * the seek_mean of the current cfqq.
1021 */
1022 if (!cfqd->active_cic)
1023 return NULL;
1024
877 /* 1025 /*
878 * We should notice if some of the queues are cooperating, eg 1026 * We should notice if some of the queues are cooperating, eg
879 * working closely on the same area of the disk. In that case, 1027 * working closely on the same area of the disk. In that case,
880 * we can group them together and don't waste time idling. 1028 * we can group them together and don't waste time idling.
881 */ 1029 */
882 return 0; 1030 cfqq = cfqq_close(cfqd, cur_cfqq);
1031 if (!cfqq)
1032 return NULL;
1033
1034 if (cfq_cfqq_coop(cfqq))
1035 return NULL;
1036
1037 if (!probe)
1038 cfq_mark_cfqq_coop(cfqq);
1039 return cfqq;
883} 1040}
884 1041
1042
885#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024)) 1043#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
886 1044
887static void cfq_arm_slice_timer(struct cfq_data *cfqd) 1045static void cfq_arm_slice_timer(struct cfq_data *cfqd)
@@ -920,13 +1078,6 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
920 if (!cic || !atomic_read(&cic->ioc->nr_tasks)) 1078 if (!cic || !atomic_read(&cic->ioc->nr_tasks))
921 return; 1079 return;
922 1080
923 /*
924 * See if this prio level has a good candidate
925 */
926 if (cfq_close_cooperator(cfqd, cfqq) &&
927 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
928 return;
929
930 cfq_mark_cfqq_wait_request(cfqq); 1081 cfq_mark_cfqq_wait_request(cfqq);
931 1082
932 /* 1083 /*
@@ -939,7 +1090,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
939 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); 1090 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
940 1091
941 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1092 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
942 cfq_log(cfqd, "arm_idle: %lu", sl); 1093 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
943} 1094}
944 1095
945/* 1096/*
@@ -1003,7 +1154,7 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1003 */ 1154 */
1004static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) 1155static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1005{ 1156{
1006 struct cfq_queue *cfqq; 1157 struct cfq_queue *cfqq, *new_cfqq = NULL;
1007 1158
1008 cfqq = cfqd->active_queue; 1159 cfqq = cfqd->active_queue;
1009 if (!cfqq) 1160 if (!cfqq)
@@ -1037,6 +1188,16 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1037 goto keep_queue; 1188 goto keep_queue;
1038 1189
1039 /* 1190 /*
1191 * If another queue has a request waiting within our mean seek
1192 * distance, let it run. The expire code will check for close
1193 * cooperators and put the close queue at the front of the service
1194 * tree.
1195 */
1196 new_cfqq = cfq_close_cooperator(cfqd, cfqq, 0);
1197 if (new_cfqq)
1198 goto expire;
1199
1200 /*
1040 * No requests pending. If the active queue still has requests in 1201 * No requests pending. If the active queue still has requests in
1041 * flight or is idling for a new request, allow either of these 1202 * flight or is idling for a new request, allow either of these
1042 * conditions to happen (or time out) before selecting a new queue. 1203 * conditions to happen (or time out) before selecting a new queue.
@@ -1050,7 +1211,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1050expire: 1211expire:
1051 cfq_slice_expired(cfqd, 0); 1212 cfq_slice_expired(cfqd, 0);
1052new_queue: 1213new_queue:
1053 cfqq = cfq_set_active_queue(cfqd); 1214 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
1054keep_queue: 1215keep_queue:
1055 return cfqq; 1216 return cfqq;
1056} 1217}
@@ -1333,14 +1494,14 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1333 if (ioc->ioc_data == cic) 1494 if (ioc->ioc_data == cic)
1334 rcu_assign_pointer(ioc->ioc_data, NULL); 1495 rcu_assign_pointer(ioc->ioc_data, NULL);
1335 1496
1336 if (cic->cfqq[ASYNC]) { 1497 if (cic->cfqq[BLK_RW_ASYNC]) {
1337 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]); 1498 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
1338 cic->cfqq[ASYNC] = NULL; 1499 cic->cfqq[BLK_RW_ASYNC] = NULL;
1339 } 1500 }
1340 1501
1341 if (cic->cfqq[SYNC]) { 1502 if (cic->cfqq[BLK_RW_SYNC]) {
1342 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]); 1503 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
1343 cic->cfqq[SYNC] = NULL; 1504 cic->cfqq[BLK_RW_SYNC] = NULL;
1344 } 1505 }
1345} 1506}
1346 1507
@@ -1449,17 +1610,18 @@ static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
1449 1610
1450 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1611 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1451 1612
1452 cfqq = cic->cfqq[ASYNC]; 1613 cfqq = cic->cfqq[BLK_RW_ASYNC];
1453 if (cfqq) { 1614 if (cfqq) {
1454 struct cfq_queue *new_cfqq; 1615 struct cfq_queue *new_cfqq;
1455 new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC); 1616 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
1617 GFP_ATOMIC);
1456 if (new_cfqq) { 1618 if (new_cfqq) {
1457 cic->cfqq[ASYNC] = new_cfqq; 1619 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
1458 cfq_put_queue(cfqq); 1620 cfq_put_queue(cfqq);
1459 } 1621 }
1460 } 1622 }
1461 1623
1462 cfqq = cic->cfqq[SYNC]; 1624 cfqq = cic->cfqq[BLK_RW_SYNC];
1463 if (cfqq) 1625 if (cfqq)
1464 cfq_mark_cfqq_prio_changed(cfqq); 1626 cfq_mark_cfqq_prio_changed(cfqq);
1465 1627
@@ -1510,6 +1672,7 @@ retry:
1510 } 1672 }
1511 1673
1512 RB_CLEAR_NODE(&cfqq->rb_node); 1674 RB_CLEAR_NODE(&cfqq->rb_node);
1675 RB_CLEAR_NODE(&cfqq->p_node);
1513 INIT_LIST_HEAD(&cfqq->fifo); 1676 INIT_LIST_HEAD(&cfqq->fifo);
1514 1677
1515 atomic_set(&cfqq->ref, 0); 1678 atomic_set(&cfqq->ref, 0);
@@ -1905,10 +2068,20 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1905 * Remember that we saw a request from this process, but 2068 * Remember that we saw a request from this process, but
1906 * don't start queuing just yet. Otherwise we risk seeing lots 2069 * don't start queuing just yet. Otherwise we risk seeing lots
1907 * of tiny requests, because we disrupt the normal plugging 2070 * of tiny requests, because we disrupt the normal plugging
1908 * and merging. 2071 * and merging. If the request is already larger than a single
2072 * page, let it rip immediately. For that case we assume that
2073 * merging is already done. Ditto for a busy system that
2074 * has other work pending, don't risk delaying until the
2075 * idle timer unplug to continue working.
1909 */ 2076 */
1910 if (cfq_cfqq_wait_request(cfqq)) 2077 if (cfq_cfqq_wait_request(cfqq)) {
2078 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
2079 cfqd->busy_queues > 1) {
2080 del_timer(&cfqd->idle_slice_timer);
2081 blk_start_queueing(cfqd->queue);
2082 }
1911 cfq_mark_cfqq_must_dispatch(cfqq); 2083 cfq_mark_cfqq_must_dispatch(cfqq);
2084 }
1912 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 2085 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1913 /* 2086 /*
1914 * not the active queue - expire current slice if it is 2087 * not the active queue - expire current slice if it is
@@ -1992,16 +2165,24 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
1992 * or if we want to idle in case it has no pending requests. 2165 * or if we want to idle in case it has no pending requests.
1993 */ 2166 */
1994 if (cfqd->active_queue == cfqq) { 2167 if (cfqd->active_queue == cfqq) {
2168 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
2169
1995 if (cfq_cfqq_slice_new(cfqq)) { 2170 if (cfq_cfqq_slice_new(cfqq)) {
1996 cfq_set_prio_slice(cfqd, cfqq); 2171 cfq_set_prio_slice(cfqd, cfqq);
1997 cfq_clear_cfqq_slice_new(cfqq); 2172 cfq_clear_cfqq_slice_new(cfqq);
1998 } 2173 }
2174 /*
2175 * If there are no requests waiting in this queue, and
2176 * there are other queues ready to issue requests, AND
2177 * those other queues are issuing requests within our
2178 * mean seek distance, give them a chance to run instead
2179 * of idling.
2180 */
1999 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) 2181 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
2000 cfq_slice_expired(cfqd, 1); 2182 cfq_slice_expired(cfqd, 1);
2001 else if (sync && !rq_noidle(rq) && 2183 else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq, 1) &&
2002 RB_EMPTY_ROOT(&cfqq->sort_list)) { 2184 sync && !rq_noidle(rq))
2003 cfq_arm_slice_timer(cfqd); 2185 cfq_arm_slice_timer(cfqd);
2004 }
2005 } 2186 }
2006 2187
2007 if (!cfqd->rq_in_driver) 2188 if (!cfqd->rq_in_driver)
@@ -2062,7 +2243,7 @@ static int cfq_may_queue(struct request_queue *q, int rw)
2062 if (!cic) 2243 if (!cic)
2063 return ELV_MQUEUE_MAY; 2244 return ELV_MQUEUE_MAY;
2064 2245
2065 cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC); 2246 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
2066 if (cfqq) { 2247 if (cfqq) {
2067 cfq_init_prio_data(cfqq, cic->ioc); 2248 cfq_init_prio_data(cfqq, cic->ioc);
2068 cfq_prio_boost(cfqq); 2249 cfq_prio_boost(cfqq);
@@ -2152,11 +2333,10 @@ static void cfq_kick_queue(struct work_struct *work)
2152 struct cfq_data *cfqd = 2333 struct cfq_data *cfqd =
2153 container_of(work, struct cfq_data, unplug_work); 2334 container_of(work, struct cfq_data, unplug_work);
2154 struct request_queue *q = cfqd->queue; 2335 struct request_queue *q = cfqd->queue;
2155 unsigned long flags;
2156 2336
2157 spin_lock_irqsave(q->queue_lock, flags); 2337 spin_lock_irq(q->queue_lock);
2158 blk_start_queueing(q); 2338 blk_start_queueing(q);
2159 spin_unlock_irqrestore(q->queue_lock, flags); 2339 spin_unlock_irq(q->queue_lock);
2160} 2340}
2161 2341
2162/* 2342/*
diff --git a/block/elevator.c b/block/elevator.c
index fb81bcc14a8c..7073a9072577 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -590,7 +590,7 @@ void elv_drain_elevator(struct request_queue *q)
590/* 590/*
591 * Call with queue lock held, interrupts disabled 591 * Call with queue lock held, interrupts disabled
592 */ 592 */
593void elv_quisce_start(struct request_queue *q) 593void elv_quiesce_start(struct request_queue *q)
594{ 594{
595 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); 595 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
596 596
@@ -607,7 +607,7 @@ void elv_quisce_start(struct request_queue *q)
607 } 607 }
608} 608}
609 609
610void elv_quisce_end(struct request_queue *q) 610void elv_quiesce_end(struct request_queue *q)
611{ 611{
612 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); 612 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
613} 613}
@@ -1126,7 +1126,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1126 * Turn on BYPASS and drain all requests w/ elevator private data 1126 * Turn on BYPASS and drain all requests w/ elevator private data
1127 */ 1127 */
1128 spin_lock_irq(q->queue_lock); 1128 spin_lock_irq(q->queue_lock);
1129 elv_quisce_start(q); 1129 elv_quiesce_start(q);
1130 1130
1131 /* 1131 /*
1132 * Remember old elevator. 1132 * Remember old elevator.
@@ -1150,7 +1150,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1150 */ 1150 */
1151 elevator_exit(old_elevator); 1151 elevator_exit(old_elevator);
1152 spin_lock_irq(q->queue_lock); 1152 spin_lock_irq(q->queue_lock);
1153 elv_quisce_end(q); 1153 elv_quiesce_end(q);
1154 spin_unlock_irq(q->queue_lock); 1154 spin_unlock_irq(q->queue_lock);
1155 1155
1156 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); 1156 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
diff --git a/block/ioctl.c b/block/ioctl.c
index 0f22e629b13c..ad474d4bbcce 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -146,8 +146,6 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
146 struct bio *bio; 146 struct bio *bio;
147 147
148 bio = bio_alloc(GFP_KERNEL, 0); 148 bio = bio_alloc(GFP_KERNEL, 0);
149 if (!bio)
150 return -ENOMEM;
151 149
152 bio->bi_end_io = blk_ioc_discard_endio; 150 bio->bi_end_io = blk_ioc_discard_endio;
153 bio->bi_bdev = bdev; 151 bio->bi_bdev = bdev;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 626ee274c5c4..84b7f8709f41 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -217,7 +217,7 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
217static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, 217static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
218 struct bio *bio) 218 struct bio *bio)
219{ 219{
220 int ret = 0; 220 int r, ret = 0;
221 221
222 /* 222 /*
223 * fill in all the output members 223 * fill in all the output members
@@ -242,7 +242,9 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
242 ret = -EFAULT; 242 ret = -EFAULT;
243 } 243 }
244 244
245 blk_rq_unmap_user(bio); 245 r = blk_rq_unmap_user(bio);
246 if (!ret)
247 ret = r;
246 blk_put_request(rq); 248 blk_put_request(rq);
247 249
248 return ret; 250 return ret;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 065507c46644..17c5d48a75d2 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1231,6 +1231,9 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1231 * 1231 *
1232 * We follow the current spec and consider that 0x69/0x96 1232 * We follow the current spec and consider that 0x69/0x96
1233 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1233 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1234 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1235 * SEMB signature. This is worked around in
1236 * ata_dev_read_id().
1234 */ 1237 */
1235 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1238 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1236 DPRINTK("found ATA device by sig\n"); 1239 DPRINTK("found ATA device by sig\n");
@@ -1248,8 +1251,8 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1248 } 1251 }
1249 1252
1250 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1253 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1251 printk(KERN_INFO "ata: SEMB device ignored\n"); 1254 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1252 return ATA_DEV_SEMB_UNSUP; /* not yet */ 1255 return ATA_DEV_SEMB;
1253 } 1256 }
1254 1257
1255 DPRINTK("unknown device\n"); 1258 DPRINTK("unknown device\n");
@@ -1653,8 +1656,8 @@ unsigned long ata_id_xfermask(const u16 *id)
1653 /* 1656 /*
1654 * Process compact flash extended modes 1657 * Process compact flash extended modes
1655 */ 1658 */
1656 int pio = id[163] & 0x7; 1659 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1657 int dma = (id[163] >> 3) & 7; 1660 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1658 1661
1659 if (pio) 1662 if (pio)
1660 pio_mask |= (1 << 5); 1663 pio_mask |= (1 << 5);
@@ -2080,6 +2083,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
2080 struct ata_taskfile tf; 2083 struct ata_taskfile tf;
2081 unsigned int err_mask = 0; 2084 unsigned int err_mask = 0;
2082 const char *reason; 2085 const char *reason;
2086 bool is_semb = class == ATA_DEV_SEMB;
2083 int may_fallback = 1, tried_spinup = 0; 2087 int may_fallback = 1, tried_spinup = 0;
2084 int rc; 2088 int rc;
2085 2089
@@ -2090,6 +2094,8 @@ retry:
2090 ata_tf_init(dev, &tf); 2094 ata_tf_init(dev, &tf);
2091 2095
2092 switch (class) { 2096 switch (class) {
2097 case ATA_DEV_SEMB:
2098 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
2093 case ATA_DEV_ATA: 2099 case ATA_DEV_ATA:
2094 tf.command = ATA_CMD_ID_ATA; 2100 tf.command = ATA_CMD_ID_ATA;
2095 break; 2101 break;
@@ -2126,6 +2132,14 @@ retry:
2126 return -ENOENT; 2132 return -ENOENT;
2127 } 2133 }
2128 2134
2135 if (is_semb) {
2136 ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on "
2137 "device w/ SEMB sig, disabled\n");
2138 /* SEMB is not supported yet */
2139 *p_class = ATA_DEV_SEMB_UNSUP;
2140 return 0;
2141 }
2142
2129 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 2143 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2130 /* Device or controller might have reported 2144 /* Device or controller might have reported
2131 * the wrong device class. Give a shot at the 2145 * the wrong device class. Give a shot at the
@@ -2412,7 +2426,8 @@ int ata_dev_configure(struct ata_device *dev)
2412 /* ATA-specific feature tests */ 2426 /* ATA-specific feature tests */
2413 if (dev->class == ATA_DEV_ATA) { 2427 if (dev->class == ATA_DEV_ATA) {
2414 if (ata_id_is_cfa(id)) { 2428 if (ata_id_is_cfa(id)) {
2415 if (id[162] & 1) /* CPRM may make this media unusable */ 2429 /* CPRM may make this media unusable */
2430 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2416 ata_dev_printk(dev, KERN_WARNING, 2431 ata_dev_printk(dev, KERN_WARNING,
2417 "supports DRM functions and may " 2432 "supports DRM functions and may "
2418 "not be fully accessable.\n"); 2433 "not be fully accessable.\n");
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index b9747fa59e54..2733b0c90b75 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -647,23 +647,45 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
647 return rc; 647 return rc;
648} 648}
649 649
650static int ata_ioc32(struct ata_port *ap)
651{
652 if (ap->flags & ATA_FLAG_PIO_DMA)
653 return 1;
654 if (ap->pflags & ATA_PFLAG_PIO32)
655 return 1;
656 return 0;
657}
658
650int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, 659int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
651 int cmd, void __user *arg) 660 int cmd, void __user *arg)
652{ 661{
653 int val = -EINVAL, rc = -EINVAL; 662 int val = -EINVAL, rc = -EINVAL;
663 unsigned long flags;
654 664
655 switch (cmd) { 665 switch (cmd) {
656 case ATA_IOC_GET_IO32: 666 case ATA_IOC_GET_IO32:
657 val = 0; 667 spin_lock_irqsave(ap->lock, flags);
668 val = ata_ioc32(ap);
669 spin_unlock_irqrestore(ap->lock, flags);
658 if (copy_to_user(arg, &val, 1)) 670 if (copy_to_user(arg, &val, 1))
659 return -EFAULT; 671 return -EFAULT;
660 return 0; 672 return 0;
661 673
662 case ATA_IOC_SET_IO32: 674 case ATA_IOC_SET_IO32:
663 val = (unsigned long) arg; 675 val = (unsigned long) arg;
664 if (val != 0) 676 rc = 0;
665 return -EINVAL; 677 spin_lock_irqsave(ap->lock, flags);
666 return 0; 678 if (ap->pflags & ATA_PFLAG_PIO32CHANGE) {
679 if (val)
680 ap->pflags |= ATA_PFLAG_PIO32;
681 else
682 ap->pflags &= ~ATA_PFLAG_PIO32;
683 } else {
684 if (val != ata_ioc32(ap))
685 rc = -EINVAL;
686 }
687 spin_unlock_irqrestore(ap->lock, flags);
688 return rc;
667 689
668 case HDIO_GET_IDENTITY: 690 case HDIO_GET_IDENTITY:
669 return ata_get_identity(ap, scsidev, arg); 691 return ata_get_identity(ap, scsidev, arg);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 8332e97a9de3..bb18415d3d63 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -87,6 +87,7 @@ const struct ata_port_operations ata_bmdma32_port_ops = {
87 .inherits = &ata_bmdma_port_ops, 87 .inherits = &ata_bmdma_port_ops,
88 88
89 .sff_data_xfer = ata_sff_data_xfer32, 89 .sff_data_xfer = ata_sff_data_xfer32,
90 .port_start = ata_sff_port_start32,
90}; 91};
91EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); 92EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
92 93
@@ -769,6 +770,9 @@ unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
769 void __iomem *data_addr = ap->ioaddr.data_addr; 770 void __iomem *data_addr = ap->ioaddr.data_addr;
770 unsigned int words = buflen >> 2; 771 unsigned int words = buflen >> 2;
771 int slop = buflen & 3; 772 int slop = buflen & 3;
773
774 if (!(ap->pflags & ATA_PFLAG_PIO32))
775 return ata_sff_data_xfer(dev, buf, buflen, rw);
772 776
773 /* Transfer multiple of 4 bytes */ 777 /* Transfer multiple of 4 bytes */
774 if (rw == READ) 778 if (rw == READ)
@@ -2402,6 +2406,29 @@ int ata_sff_port_start(struct ata_port *ap)
2402EXPORT_SYMBOL_GPL(ata_sff_port_start); 2406EXPORT_SYMBOL_GPL(ata_sff_port_start);
2403 2407
2404/** 2408/**
2409 * ata_sff_port_start32 - Set port up for dma.
2410 * @ap: Port to initialize
2411 *
2412 * Called just after data structures for each port are
2413 * initialized. Allocates space for PRD table if the device
2414 * is DMA capable SFF.
2415 *
2416 * May be used as the port_start() entry in ata_port_operations for
2417 * devices that are capable of 32bit PIO.
2418 *
2419 * LOCKING:
2420 * Inherited from caller.
2421 */
2422int ata_sff_port_start32(struct ata_port *ap)
2423{
2424 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
2425 if (ap->ioaddr.bmdma_addr)
2426 return ata_port_start(ap);
2427 return 0;
2428}
2429EXPORT_SYMBOL_GPL(ata_sff_port_start32);
2430
2431/**
2405 * ata_sff_std_ports - initialize ioaddr with standard port offsets. 2432 * ata_sff_std_ports - initialize ioaddr with standard port offsets.
2406 * @ioaddr: IO address structure to be initialized 2433 * @ioaddr: IO address structure to be initialized
2407 * 2434 *
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 81ab57003aba..122c786449a9 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -8,7 +8,7 @@
8 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> 8 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
9 * Portions Copyright (C) 2001 Sun Microsystems, Inc. 9 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
10 * Portions Copyright (C) 2003 Red Hat Inc 10 * Portions Copyright (C) 2003 Red Hat Inc
11 * Portions Copyright (C) 2005-2007 MontaVista Software, Inc. 11 * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
12 * 12 *
13 * TODO 13 * TODO
14 * Look into engine reset on timeout errors. Should not be required. 14 * Look into engine reset on timeout errors. Should not be required.
@@ -24,7 +24,7 @@
24#include <linux/libata.h> 24#include <linux/libata.h>
25 25
26#define DRV_NAME "pata_hpt37x" 26#define DRV_NAME "pata_hpt37x"
27#define DRV_VERSION "0.6.11" 27#define DRV_VERSION "0.6.12"
28 28
29struct hpt_clock { 29struct hpt_clock {
30 u8 xfer_speed; 30 u8 xfer_speed;
@@ -445,23 +445,6 @@ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev)
445} 445}
446 446
447/** 447/**
448 * hpt370_bmdma_start - DMA engine begin
449 * @qc: ATA command
450 *
451 * The 370 and 370A want us to reset the DMA engine each time we
452 * use it. The 372 and later are fine.
453 */
454
455static void hpt370_bmdma_start(struct ata_queued_cmd *qc)
456{
457 struct ata_port *ap = qc->ap;
458 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
459 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
460 udelay(10);
461 ata_bmdma_start(qc);
462}
463
464/**
465 * hpt370_bmdma_end - DMA engine stop 448 * hpt370_bmdma_end - DMA engine stop
466 * @qc: ATA command 449 * @qc: ATA command
467 * 450 *
@@ -598,7 +581,6 @@ static struct scsi_host_template hpt37x_sht = {
598static struct ata_port_operations hpt370_port_ops = { 581static struct ata_port_operations hpt370_port_ops = {
599 .inherits = &ata_bmdma_port_ops, 582 .inherits = &ata_bmdma_port_ops,
600 583
601 .bmdma_start = hpt370_bmdma_start,
602 .bmdma_stop = hpt370_bmdma_stop, 584 .bmdma_stop = hpt370_bmdma_stop,
603 585
604 .mode_filter = hpt370_filter, 586 .mode_filter = hpt370_filter,
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 3f830f0fe2cc..6f985bed8cbb 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -108,6 +108,7 @@ struct legacy_controller {
108 struct ata_port_operations *ops; 108 struct ata_port_operations *ops;
109 unsigned int pio_mask; 109 unsigned int pio_mask;
110 unsigned int flags; 110 unsigned int flags;
111 unsigned int pflags;
111 int (*setup)(struct platform_device *, struct legacy_probe *probe, 112 int (*setup)(struct platform_device *, struct legacy_probe *probe,
112 struct legacy_data *data); 113 struct legacy_data *data);
113}; 114};
@@ -285,7 +286,8 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
285{ 286{
286 int slop = buflen & 3; 287 int slop = buflen & 3;
287 /* 32bit I/O capable *and* we need to write a whole number of dwords */ 288 /* 32bit I/O capable *and* we need to write a whole number of dwords */
288 if (ata_id_has_dword_io(dev->id) && (slop == 0 || slop == 3)) { 289 if (ata_id_has_dword_io(dev->id) && (slop == 0 || slop == 3)
290 && (ap->pflags & ATA_PFLAG_PIO32)) {
289 struct ata_port *ap = dev->link->ap; 291 struct ata_port *ap = dev->link->ap;
290 unsigned long flags; 292 unsigned long flags;
291 293
@@ -736,7 +738,8 @@ static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf,
736 struct ata_port *ap = adev->link->ap; 738 struct ata_port *ap = adev->link->ap;
737 int slop = buflen & 3; 739 int slop = buflen & 3;
738 740
739 if (ata_id_has_dword_io(adev->id) && (slop == 0 || slop == 3)) { 741 if (ata_id_has_dword_io(adev->id) && (slop == 0 || slop == 3)
742 && (ap->pflags & ATA_PFLAG_PIO32)) {
740 if (rw == WRITE) 743 if (rw == WRITE)
741 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); 744 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
742 else 745 else
@@ -858,27 +861,30 @@ static struct ata_port_operations winbond_port_ops = {
858 861
859static struct legacy_controller controllers[] = { 862static struct legacy_controller controllers[] = {
860 {"BIOS", &legacy_port_ops, 0x1F, 863 {"BIOS", &legacy_port_ops, 0x1F,
861 ATA_FLAG_NO_IORDY, NULL }, 864 ATA_FLAG_NO_IORDY, 0, NULL },
862 {"Snooping", &simple_port_ops, 0x1F, 865 {"Snooping", &simple_port_ops, 0x1F,
863 0 , NULL }, 866 0, 0, NULL },
864 {"PDC20230", &pdc20230_port_ops, 0x7, 867 {"PDC20230", &pdc20230_port_ops, 0x7,
865 ATA_FLAG_NO_IORDY, NULL }, 868 ATA_FLAG_NO_IORDY,
869 ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32_CHANGE, NULL },
866 {"HT6560A", &ht6560a_port_ops, 0x07, 870 {"HT6560A", &ht6560a_port_ops, 0x07,
867 ATA_FLAG_NO_IORDY, NULL }, 871 ATA_FLAG_NO_IORDY, 0, NULL },
868 {"HT6560B", &ht6560b_port_ops, 0x1F, 872 {"HT6560B", &ht6560b_port_ops, 0x1F,
869 ATA_FLAG_NO_IORDY, NULL }, 873 ATA_FLAG_NO_IORDY, 0, NULL },
870 {"OPTI82C611A", &opti82c611a_port_ops, 0x0F, 874 {"OPTI82C611A", &opti82c611a_port_ops, 0x0F,
871 0 , NULL }, 875 0, 0, NULL },
872 {"OPTI82C46X", &opti82c46x_port_ops, 0x0F, 876 {"OPTI82C46X", &opti82c46x_port_ops, 0x0F,
873 0 , NULL }, 877 0, 0, NULL },
874 {"QDI6500", &qdi6500_port_ops, 0x07, 878 {"QDI6500", &qdi6500_port_ops, 0x07,
875 ATA_FLAG_NO_IORDY, qdi_port }, 879 ATA_FLAG_NO_IORDY,
880 ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32_CHANGE, qdi_port },
876 {"QDI6580", &qdi6580_port_ops, 0x1F, 881 {"QDI6580", &qdi6580_port_ops, 0x1F,
877 0 , qdi_port }, 882 0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32_CHANGE, qdi_port },
878 {"QDI6580DP", &qdi6580dp_port_ops, 0x1F, 883 {"QDI6580DP", &qdi6580dp_port_ops, 0x1F,
879 0 , qdi_port }, 884 0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32_CHANGE, qdi_port },
880 {"W83759A", &winbond_port_ops, 0x1F, 885 {"W83759A", &winbond_port_ops, 0x1F,
881 0 , winbond_port } 886 0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32_CHANGE,
887 winbond_port }
882}; 888};
883 889
884/** 890/**
@@ -1008,6 +1014,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
1008 ap->ops = ops; 1014 ap->ops = ops;
1009 ap->pio_mask = pio_modes; 1015 ap->pio_mask = pio_modes;
1010 ap->flags |= ATA_FLAG_SLAVE_POSS | iordy; 1016 ap->flags |= ATA_FLAG_SLAVE_POSS | iordy;
1017 ap->pflags |= controller->pflags;
1011 ap->ioaddr.cmd_addr = io_addr; 1018 ap->ioaddr.cmd_addr = io_addr;
1012 ap->ioaddr.altstatus_addr = ctrl_addr; 1019 ap->ioaddr.altstatus_addr = ctrl_addr;
1013 ap->ioaddr.ctl_addr = ctrl_addr; 1020 ap->ioaddr.ctl_addr = ctrl_addr;
@@ -1032,6 +1039,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
1032 return 0; 1039 return 0;
1033 } 1040 }
1034 } 1041 }
1042 ata_host_detach(host);
1035fail: 1043fail:
1036 platform_device_unregister(pdev); 1044 platform_device_unregister(pdev);
1037 return ret; 1045 return ret;
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 0fb6b1b1e634..dd53a66b19e3 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -44,7 +44,7 @@
44#include <linux/libata.h> 44#include <linux/libata.h>
45 45
46#define DRV_NAME "pata_ninja32" 46#define DRV_NAME "pata_ninja32"
47#define DRV_VERSION "0.1.3" 47#define DRV_VERSION "0.1.5"
48 48
49 49
50/** 50/**
@@ -86,6 +86,7 @@ static struct ata_port_operations ninja32_port_ops = {
86 .sff_dev_select = ninja32_dev_select, 86 .sff_dev_select = ninja32_dev_select,
87 .cable_detect = ata_cable_40wire, 87 .cable_detect = ata_cable_40wire,
88 .set_piomode = ninja32_set_piomode, 88 .set_piomode = ninja32_set_piomode,
89 .sff_data_xfer = ata_sff_data_xfer32
89}; 90};
90 91
91static void ninja32_program(void __iomem *base) 92static void ninja32_program(void __iomem *base)
@@ -144,6 +145,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
144 ap->ioaddr.altstatus_addr = base + 0x1E; 145 ap->ioaddr.altstatus_addr = base + 0x1E;
145 ap->ioaddr.bmdma_addr = base; 146 ap->ioaddr.bmdma_addr = base;
146 ata_sff_std_ports(&ap->ioaddr); 147 ata_sff_std_ports(&ap->ioaddr);
148 ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
147 149
148 ninja32_program(base); 150 ninja32_program(base);
149 /* FIXME: Should we disable them at remove ? */ 151 /* FIXME: Should we disable them at remove ? */
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index bdd4f5f45575..5f7e64ba87e5 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -275,8 +275,10 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
275 if (rw == READ) { 275 if (rw == READ) {
276 copy_from_brd(mem + off, brd, sector, len); 276 copy_from_brd(mem + off, brd, sector, len);
277 flush_dcache_page(page); 277 flush_dcache_page(page);
278 } else 278 } else {
279 flush_dcache_page(page);
279 copy_to_brd(brd, mem + off, sector, len); 280 copy_to_brd(brd, mem + off, sector, len);
281 }
280 kunmap_atomic(mem, KM_USER0); 282 kunmap_atomic(mem, KM_USER0);
281 283
282out: 284out:
@@ -436,6 +438,7 @@ static struct brd_device *brd_alloc(int i)
436 if (!brd->brd_queue) 438 if (!brd->brd_queue)
437 goto out_free_dev; 439 goto out_free_dev;
438 blk_queue_make_request(brd->brd_queue, brd_make_request); 440 blk_queue_make_request(brd->brd_queue, brd_make_request);
441 blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL);
439 blk_queue_max_sectors(brd->brd_queue, 1024); 442 blk_queue_max_sectors(brd->brd_queue, 1024);
440 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); 443 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
441 444
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3750d8003048..473a8f7fbdb5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -446,6 +446,9 @@ struct drm_i915_gem_object {
446 uint32_t tiling_mode; 446 uint32_t tiling_mode;
447 uint32_t stride; 447 uint32_t stride;
448 448
449 /** Record of address bit 17 of each page at last unbind. */
450 long *bit_17;
451
449 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ 452 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
450 uint32_t agp_type; 453 uint32_t agp_type;
451 454
@@ -635,9 +638,13 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
635void i915_gem_detach_phys_object(struct drm_device *dev, 638void i915_gem_detach_phys_object(struct drm_device *dev,
636 struct drm_gem_object *obj); 639 struct drm_gem_object *obj);
637void i915_gem_free_all_phys_object(struct drm_device *dev); 640void i915_gem_free_all_phys_object(struct drm_device *dev);
641int i915_gem_object_get_pages(struct drm_gem_object *obj);
642void i915_gem_object_put_pages(struct drm_gem_object *obj);
638 643
639/* i915_gem_tiling.c */ 644/* i915_gem_tiling.c */
640void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 645void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
646void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
647void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
641 648
642/* i915_gem_debug.c */ 649/* i915_gem_debug.c */
643void i915_gem_dump_object(struct drm_gem_object *obj, int len, 650void i915_gem_dump_object(struct drm_gem_object *obj, int len,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1449b452cc63..4642115902d6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,8 +43,6 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43 uint64_t offset, 43 uint64_t offset,
44 uint64_t size); 44 uint64_t size);
45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 45static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
46static int i915_gem_object_get_pages(struct drm_gem_object *obj);
47static void i915_gem_object_put_pages(struct drm_gem_object *obj);
48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 46static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 47static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50 unsigned alignment); 48 unsigned alignment);
@@ -143,15 +141,27 @@ fast_shmem_read(struct page **pages,
143 int length) 141 int length)
144{ 142{
145 char __iomem *vaddr; 143 char __iomem *vaddr;
146 int ret; 144 int unwritten;
147 145
148 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); 146 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
149 if (vaddr == NULL) 147 if (vaddr == NULL)
150 return -ENOMEM; 148 return -ENOMEM;
151 ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); 149 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
152 kunmap_atomic(vaddr, KM_USER0); 150 kunmap_atomic(vaddr, KM_USER0);
153 151
154 return ret; 152 if (unwritten)
153 return -EFAULT;
154
155 return 0;
156}
157
158static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
159{
160 drm_i915_private_t *dev_priv = obj->dev->dev_private;
161 struct drm_i915_gem_object *obj_priv = obj->driver_private;
162
163 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
164 obj_priv->tiling_mode != I915_TILING_NONE;
155} 165}
156 166
157static inline int 167static inline int
@@ -181,6 +191,64 @@ slow_shmem_copy(struct page *dst_page,
181 return 0; 191 return 0;
182} 192}
183 193
194static inline int
195slow_shmem_bit17_copy(struct page *gpu_page,
196 int gpu_offset,
197 struct page *cpu_page,
198 int cpu_offset,
199 int length,
200 int is_read)
201{
202 char *gpu_vaddr, *cpu_vaddr;
203
204 /* Use the unswizzled path if this page isn't affected. */
205 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
206 if (is_read)
207 return slow_shmem_copy(cpu_page, cpu_offset,
208 gpu_page, gpu_offset, length);
209 else
210 return slow_shmem_copy(gpu_page, gpu_offset,
211 cpu_page, cpu_offset, length);
212 }
213
214 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
215 if (gpu_vaddr == NULL)
216 return -ENOMEM;
217
218 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
219 if (cpu_vaddr == NULL) {
220 kunmap_atomic(gpu_vaddr, KM_USER0);
221 return -ENOMEM;
222 }
223
224 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
225 * XORing with the other bits (A9 for Y, A9 and A10 for X)
226 */
227 while (length > 0) {
228 int cacheline_end = ALIGN(gpu_offset + 1, 64);
229 int this_length = min(cacheline_end - gpu_offset, length);
230 int swizzled_gpu_offset = gpu_offset ^ 64;
231
232 if (is_read) {
233 memcpy(cpu_vaddr + cpu_offset,
234 gpu_vaddr + swizzled_gpu_offset,
235 this_length);
236 } else {
237 memcpy(gpu_vaddr + swizzled_gpu_offset,
238 cpu_vaddr + cpu_offset,
239 this_length);
240 }
241 cpu_offset += this_length;
242 gpu_offset += this_length;
243 length -= this_length;
244 }
245
246 kunmap_atomic(cpu_vaddr, KM_USER1);
247 kunmap_atomic(gpu_vaddr, KM_USER0);
248
249 return 0;
250}
251
184/** 252/**
185 * This is the fast shmem pread path, which attempts to copy_from_user directly 253 * This is the fast shmem pread path, which attempts to copy_from_user directly
186 * from the backing pages of the object to the user's address space. On a 254 * from the backing pages of the object to the user's address space. On a
@@ -269,6 +337,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
269 int page_length; 337 int page_length;
270 int ret; 338 int ret;
271 uint64_t data_ptr = args->data_ptr; 339 uint64_t data_ptr = args->data_ptr;
340 int do_bit17_swizzling;
272 341
273 remain = args->size; 342 remain = args->size;
274 343
@@ -286,13 +355,15 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
286 355
287 down_read(&mm->mmap_sem); 356 down_read(&mm->mmap_sem);
288 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, 357 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
289 num_pages, 0, 0, user_pages, NULL); 358 num_pages, 1, 0, user_pages, NULL);
290 up_read(&mm->mmap_sem); 359 up_read(&mm->mmap_sem);
291 if (pinned_pages < num_pages) { 360 if (pinned_pages < num_pages) {
292 ret = -EFAULT; 361 ret = -EFAULT;
293 goto fail_put_user_pages; 362 goto fail_put_user_pages;
294 } 363 }
295 364
365 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
366
296 mutex_lock(&dev->struct_mutex); 367 mutex_lock(&dev->struct_mutex);
297 368
298 ret = i915_gem_object_get_pages(obj); 369 ret = i915_gem_object_get_pages(obj);
@@ -327,11 +398,20 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
327 if ((data_page_offset + page_length) > PAGE_SIZE) 398 if ((data_page_offset + page_length) > PAGE_SIZE)
328 page_length = PAGE_SIZE - data_page_offset; 399 page_length = PAGE_SIZE - data_page_offset;
329 400
330 ret = slow_shmem_copy(user_pages[data_page_index], 401 if (do_bit17_swizzling) {
331 data_page_offset, 402 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
332 obj_priv->pages[shmem_page_index], 403 shmem_page_offset,
333 shmem_page_offset, 404 user_pages[data_page_index],
334 page_length); 405 data_page_offset,
406 page_length,
407 1);
408 } else {
409 ret = slow_shmem_copy(user_pages[data_page_index],
410 data_page_offset,
411 obj_priv->pages[shmem_page_index],
412 shmem_page_offset,
413 page_length);
414 }
335 if (ret) 415 if (ret)
336 goto fail_put_pages; 416 goto fail_put_pages;
337 417
@@ -383,9 +463,14 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
383 return -EINVAL; 463 return -EINVAL;
384 } 464 }
385 465
386 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); 466 if (i915_gem_object_needs_bit17_swizzle(obj)) {
387 if (ret != 0)
388 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); 467 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
468 } else {
469 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
470 if (ret != 0)
471 ret = i915_gem_shmem_pread_slow(dev, obj, args,
472 file_priv);
473 }
389 474
390 drm_gem_object_unreference(obj); 475 drm_gem_object_unreference(obj);
391 476
@@ -727,6 +812,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
727 int page_length; 812 int page_length;
728 int ret; 813 int ret;
729 uint64_t data_ptr = args->data_ptr; 814 uint64_t data_ptr = args->data_ptr;
815 int do_bit17_swizzling;
730 816
731 remain = args->size; 817 remain = args->size;
732 818
@@ -751,6 +837,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
751 goto fail_put_user_pages; 837 goto fail_put_user_pages;
752 } 838 }
753 839
840 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
841
754 mutex_lock(&dev->struct_mutex); 842 mutex_lock(&dev->struct_mutex);
755 843
756 ret = i915_gem_object_get_pages(obj); 844 ret = i915_gem_object_get_pages(obj);
@@ -785,11 +873,20 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
785 if ((data_page_offset + page_length) > PAGE_SIZE) 873 if ((data_page_offset + page_length) > PAGE_SIZE)
786 page_length = PAGE_SIZE - data_page_offset; 874 page_length = PAGE_SIZE - data_page_offset;
787 875
788 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], 876 if (do_bit17_swizzling) {
789 shmem_page_offset, 877 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
790 user_pages[data_page_index], 878 shmem_page_offset,
791 data_page_offset, 879 user_pages[data_page_index],
792 page_length); 880 data_page_offset,
881 page_length,
882 0);
883 } else {
884 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
885 shmem_page_offset,
886 user_pages[data_page_index],
887 data_page_offset,
888 page_length);
889 }
793 if (ret) 890 if (ret)
794 goto fail_put_pages; 891 goto fail_put_pages;
795 892
@@ -854,6 +951,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
854 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, 951 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
855 file_priv); 952 file_priv);
856 } 953 }
954 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
955 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
857 } else { 956 } else {
858 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); 957 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
859 if (ret == -EFAULT) { 958 if (ret == -EFAULT) {
@@ -1285,7 +1384,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1285 return 0; 1384 return 0;
1286} 1385}
1287 1386
1288static void 1387void
1289i915_gem_object_put_pages(struct drm_gem_object *obj) 1388i915_gem_object_put_pages(struct drm_gem_object *obj)
1290{ 1389{
1291 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1390 struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1297,6 +1396,9 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1297 if (--obj_priv->pages_refcount != 0) 1396 if (--obj_priv->pages_refcount != 0)
1298 return; 1397 return;
1299 1398
1399 if (obj_priv->tiling_mode != I915_TILING_NONE)
1400 i915_gem_object_save_bit_17_swizzle(obj);
1401
1300 for (i = 0; i < page_count; i++) 1402 for (i = 0; i < page_count; i++)
1301 if (obj_priv->pages[i] != NULL) { 1403 if (obj_priv->pages[i] != NULL) {
1302 if (obj_priv->dirty) 1404 if (obj_priv->dirty)
@@ -1494,8 +1596,19 @@ i915_gem_retire_request(struct drm_device *dev,
1494 1596
1495 if (obj->write_domain != 0) 1597 if (obj->write_domain != 0)
1496 i915_gem_object_move_to_flushing(obj); 1598 i915_gem_object_move_to_flushing(obj);
1497 else 1599 else {
1600 /* Take a reference on the object so it won't be
1601 * freed while the spinlock is held. The list
1602 * protection for this spinlock is safe when breaking
1603 * the lock like this since the next thing we do
1604 * is just get the head of the list again.
1605 */
1606 drm_gem_object_reference(obj);
1498 i915_gem_object_move_to_inactive(obj); 1607 i915_gem_object_move_to_inactive(obj);
1608 spin_unlock(&dev_priv->mm.active_list_lock);
1609 drm_gem_object_unreference(obj);
1610 spin_lock(&dev_priv->mm.active_list_lock);
1611 }
1499 } 1612 }
1500out: 1613out:
1501 spin_unlock(&dev_priv->mm.active_list_lock); 1614 spin_unlock(&dev_priv->mm.active_list_lock);
@@ -1884,7 +1997,7 @@ i915_gem_evict_everything(struct drm_device *dev)
1884 return ret; 1997 return ret;
1885} 1998}
1886 1999
1887static int 2000int
1888i915_gem_object_get_pages(struct drm_gem_object *obj) 2001i915_gem_object_get_pages(struct drm_gem_object *obj)
1889{ 2002{
1890 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2003 struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1922,6 +2035,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
1922 } 2035 }
1923 obj_priv->pages[i] = page; 2036 obj_priv->pages[i] = page;
1924 } 2037 }
2038
2039 if (obj_priv->tiling_mode != I915_TILING_NONE)
2040 i915_gem_object_do_bit_17_swizzle(obj);
2041
1925 return 0; 2042 return 0;
1926} 2043}
1927 2044
@@ -3002,13 +3119,13 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3002 drm_free(*relocs, reloc_count * sizeof(**relocs), 3119 drm_free(*relocs, reloc_count * sizeof(**relocs),
3003 DRM_MEM_DRIVER); 3120 DRM_MEM_DRIVER);
3004 *relocs = NULL; 3121 *relocs = NULL;
3005 return ret; 3122 return -EFAULT;
3006 } 3123 }
3007 3124
3008 reloc_index += exec_list[i].relocation_count; 3125 reloc_index += exec_list[i].relocation_count;
3009 } 3126 }
3010 3127
3011 return ret; 3128 return 0;
3012} 3129}
3013 3130
3014static int 3131static int
@@ -3017,23 +3134,28 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3017 struct drm_i915_gem_relocation_entry *relocs) 3134 struct drm_i915_gem_relocation_entry *relocs)
3018{ 3135{
3019 uint32_t reloc_count = 0, i; 3136 uint32_t reloc_count = 0, i;
3020 int ret; 3137 int ret = 0;
3021 3138
3022 for (i = 0; i < buffer_count; i++) { 3139 for (i = 0; i < buffer_count; i++) {
3023 struct drm_i915_gem_relocation_entry __user *user_relocs; 3140 struct drm_i915_gem_relocation_entry __user *user_relocs;
3141 int unwritten;
3024 3142
3025 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; 3143 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3026 3144
3027 if (ret == 0) { 3145 unwritten = copy_to_user(user_relocs,
3028 ret = copy_to_user(user_relocs, 3146 &relocs[reloc_count],
3029 &relocs[reloc_count], 3147 exec_list[i].relocation_count *
3030 exec_list[i].relocation_count * 3148 sizeof(*relocs));
3031 sizeof(*relocs)); 3149
3150 if (unwritten) {
3151 ret = -EFAULT;
3152 goto err;
3032 } 3153 }
3033 3154
3034 reloc_count += exec_list[i].relocation_count; 3155 reloc_count += exec_list[i].relocation_count;
3035 } 3156 }
3036 3157
3158err:
3037 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); 3159 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
3038 3160
3039 return ret; 3161 return ret;
@@ -3243,7 +3365,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3243 exec_offset = exec_list[args->buffer_count - 1].offset; 3365 exec_offset = exec_list[args->buffer_count - 1].offset;
3244 3366
3245#if WATCH_EXEC 3367#if WATCH_EXEC
3246 i915_gem_dump_object(object_list[args->buffer_count - 1], 3368 i915_gem_dump_object(batch_obj,
3247 args->batch_len, 3369 args->batch_len,
3248 __func__, 3370 __func__,
3249 ~0); 3371 ~0);
@@ -3308,10 +3430,12 @@ err:
3308 (uintptr_t) args->buffers_ptr, 3430 (uintptr_t) args->buffers_ptr,
3309 exec_list, 3431 exec_list,
3310 sizeof(*exec_list) * args->buffer_count); 3432 sizeof(*exec_list) * args->buffer_count);
3311 if (ret) 3433 if (ret) {
3434 ret = -EFAULT;
3312 DRM_ERROR("failed to copy %d exec entries " 3435 DRM_ERROR("failed to copy %d exec entries "
3313 "back to user (%d)\n", 3436 "back to user (%d)\n",
3314 args->buffer_count, ret); 3437 args->buffer_count, ret);
3438 }
3315 } 3439 }
3316 3440
3317 /* Copy the updated relocations out regardless of current error 3441 /* Copy the updated relocations out regardless of current error
@@ -3593,6 +3717,7 @@ void i915_gem_free_object(struct drm_gem_object *obj)
3593 i915_gem_free_mmap_offset(obj); 3717 i915_gem_free_mmap_offset(obj);
3594 3718
3595 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 3719 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
3720 kfree(obj_priv->bit_17);
3596 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 3721 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
3597} 3722}
3598 3723
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index a1ac0c5e7307..986f1082c596 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -234,6 +234,96 @@ static int i915_hws_info(struct seq_file *m, void *data)
234 return 0; 234 return 0;
235} 235}
236 236
237static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
238{
239 int page, i;
240 uint32_t *mem;
241
242 for (page = 0; page < page_count; page++) {
243 mem = kmap(pages[page]);
244 for (i = 0; i < PAGE_SIZE; i += 4)
245 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
246 kunmap(pages[page]);
247 }
248}
249
250static int i915_batchbuffer_info(struct seq_file *m, void *data)
251{
252 struct drm_info_node *node = (struct drm_info_node *) m->private;
253 struct drm_device *dev = node->minor->dev;
254 drm_i915_private_t *dev_priv = dev->dev_private;
255 struct drm_gem_object *obj;
256 struct drm_i915_gem_object *obj_priv;
257 int ret;
258
259 spin_lock(&dev_priv->mm.active_list_lock);
260
261 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
262 obj = obj_priv->obj;
263 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
264 ret = i915_gem_object_get_pages(obj);
265 if (ret) {
266 DRM_ERROR("Failed to get pages: %d\n", ret);
267 spin_unlock(&dev_priv->mm.active_list_lock);
268 return ret;
269 }
270
271 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
272 i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
273
274 i915_gem_object_put_pages(obj);
275 }
276 }
277
278 spin_unlock(&dev_priv->mm.active_list_lock);
279
280 return 0;
281}
282
283static int i915_ringbuffer_data(struct seq_file *m, void *data)
284{
285 struct drm_info_node *node = (struct drm_info_node *) m->private;
286 struct drm_device *dev = node->minor->dev;
287 drm_i915_private_t *dev_priv = dev->dev_private;
288 u8 *virt;
289 uint32_t *ptr, off;
290
291 if (!dev_priv->ring.ring_obj) {
292 seq_printf(m, "No ringbuffer setup\n");
293 return 0;
294 }
295
296 virt = dev_priv->ring.virtual_start;
297
298 for (off = 0; off < dev_priv->ring.Size; off += 4) {
299 ptr = (uint32_t *)(virt + off);
300 seq_printf(m, "%08x : %08x\n", off, *ptr);
301 }
302
303 return 0;
304}
305
306static int i915_ringbuffer_info(struct seq_file *m, void *data)
307{
308 struct drm_info_node *node = (struct drm_info_node *) m->private;
309 struct drm_device *dev = node->minor->dev;
310 drm_i915_private_t *dev_priv = dev->dev_private;
311 unsigned int head, tail, mask;
312
313 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
314 tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
315 mask = dev_priv->ring.tail_mask;
316
317 seq_printf(m, "RingHead : %08x\n", head);
318 seq_printf(m, "RingTail : %08x\n", tail);
319 seq_printf(m, "RingMask : %08x\n", mask);
320 seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
321 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
322
323 return 0;
324}
325
326
237static struct drm_info_list i915_gem_debugfs_list[] = { 327static struct drm_info_list i915_gem_debugfs_list[] = {
238 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 328 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
239 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 329 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
@@ -243,6 +333,9 @@ static struct drm_info_list i915_gem_debugfs_list[] = {
243 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 333 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
244 {"i915_gem_interrupt", i915_interrupt_info, 0}, 334 {"i915_gem_interrupt", i915_interrupt_info, 0},
245 {"i915_gem_hws", i915_hws_info, 0}, 335 {"i915_gem_hws", i915_hws_info, 0},
336 {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
337 {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
338 {"i915_batchbuffers", i915_batchbuffer_info, 0},
246}; 339};
247#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) 340#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
248 341
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 6be3f927c86a..f27e523c764f 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,6 +25,8 @@
25 * 25 *
26 */ 26 */
27 27
28#include "linux/string.h"
29#include "linux/bitops.h"
28#include "drmP.h" 30#include "drmP.h"
29#include "drm.h" 31#include "drm.h"
30#include "i915_drm.h" 32#include "i915_drm.h"
@@ -127,8 +129,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
127 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 129 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
128 } else { 130 } else {
129 /* Bit 17 swizzling by the CPU in addition. */ 131 /* Bit 17 swizzling by the CPU in addition. */
130 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 132 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
131 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 133 swizzle_y = I915_BIT_6_SWIZZLE_9_17;
132 } 134 }
133 break; 135 break;
134 } 136 }
@@ -288,6 +290,19 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
288 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 290 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
289 else 291 else
290 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; 292 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
293
294 /* Hide bit 17 swizzling from the user. This prevents old Mesa
295 * from aborting the application on sw fallbacks to bit 17,
296 * and we use the pread/pwrite bit17 paths to swizzle for it.
297 * If there was a user that was relying on the swizzle
298 * information for drm_intel_bo_map()ed reads/writes this would
299 * break it, but we don't have any of those.
300 */
301 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
302 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
303 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
304 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
305
291 /* If we can't handle the swizzling, make it untiled. */ 306 /* If we can't handle the swizzling, make it untiled. */
292 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { 307 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
293 args->tiling_mode = I915_TILING_NONE; 308 args->tiling_mode = I915_TILING_NONE;
@@ -354,8 +369,100 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
354 DRM_ERROR("unknown tiling mode\n"); 369 DRM_ERROR("unknown tiling mode\n");
355 } 370 }
356 371
372 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
373 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
374 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
375 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
376 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
377
357 drm_gem_object_unreference(obj); 378 drm_gem_object_unreference(obj);
358 mutex_unlock(&dev->struct_mutex); 379 mutex_unlock(&dev->struct_mutex);
359 380
360 return 0; 381 return 0;
361} 382}
383
384/**
385 * Swap every 64 bytes of this page around, to account for it having a new
386 * bit 17 of its physical address and therefore being interpreted differently
387 * by the GPU.
388 */
389static int
390i915_gem_swizzle_page(struct page *page)
391{
392 char *vaddr;
393 int i;
394 char temp[64];
395
396 vaddr = kmap(page);
397 if (vaddr == NULL)
398 return -ENOMEM;
399
400 for (i = 0; i < PAGE_SIZE; i += 128) {
401 memcpy(temp, &vaddr[i], 64);
402 memcpy(&vaddr[i], &vaddr[i + 64], 64);
403 memcpy(&vaddr[i + 64], temp, 64);
404 }
405
406 kunmap(page);
407
408 return 0;
409}
410
411void
412i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
413{
414 struct drm_device *dev = obj->dev;
415 drm_i915_private_t *dev_priv = dev->dev_private;
416 struct drm_i915_gem_object *obj_priv = obj->driver_private;
417 int page_count = obj->size >> PAGE_SHIFT;
418 int i;
419
420 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
421 return;
422
423 if (obj_priv->bit_17 == NULL)
424 return;
425
426 for (i = 0; i < page_count; i++) {
427 char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
428 if ((new_bit_17 & 0x1) !=
429 (test_bit(i, obj_priv->bit_17) != 0)) {
430 int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
431 if (ret != 0) {
432 DRM_ERROR("Failed to swizzle page\n");
433 return;
434 }
435 set_page_dirty(obj_priv->pages[i]);
436 }
437 }
438}
439
440void
441i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
442{
443 struct drm_device *dev = obj->dev;
444 drm_i915_private_t *dev_priv = dev->dev_private;
445 struct drm_i915_gem_object *obj_priv = obj->driver_private;
446 int page_count = obj->size >> PAGE_SHIFT;
447 int i;
448
449 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
450 return;
451
452 if (obj_priv->bit_17 == NULL) {
453 obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
454 sizeof(long), GFP_KERNEL);
455 if (obj_priv->bit_17 == NULL) {
456 DRM_ERROR("Failed to allocate memory for bit 17 "
457 "record\n");
458 return;
459 }
460 }
461
462 for (i = 0; i < page_count; i++) {
463 if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
464 __set_bit(i, obj_priv->bit_17);
465 else
466 __clear_bit(i, obj_priv->bit_17);
467 }
468}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 64773ce52964..c2c8e95ff14d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -367,6 +367,7 @@ static const intel_limit_t intel_limits[] = {
367 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 367 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
368 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 368 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
369 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, 369 .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
370 .find_pll = intel_find_best_PLL,
370 }, 371 },
371 { /* INTEL_LIMIT_IGD_LVDS */ 372 { /* INTEL_LIMIT_IGD_LVDS */
372 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 373 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -380,6 +381,7 @@ static const intel_limit_t intel_limits[] = {
380 /* IGD only supports single-channel mode. */ 381 /* IGD only supports single-channel mode. */
381 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 382 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
382 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 383 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
384 .find_pll = intel_find_best_PLL,
383 }, 385 },
384 386
385}; 387};
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index b7f0ebe9f810..3e094beecb99 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -864,8 +864,8 @@ static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
864 864
865static struct sysrq_key_op sysrq_intelfb_restore_op = { 865static struct sysrq_key_op sysrq_intelfb_restore_op = {
866 .handler = intelfb_sysrq, 866 .handler = intelfb_sysrq,
867 .help_msg = "force fb", 867 .help_msg = "force-fb(G)",
868 .action_msg = "force restore of fb console", 868 .action_msg = "Restore framebuffer console",
869}; 869};
870 870
871int intelfb_probe(struct drm_device *dev) 871int intelfb_probe(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index b06a4a3ff08d..550374225388 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -38,7 +38,7 @@
38struct intel_hdmi_priv { 38struct intel_hdmi_priv {
39 u32 sdvox_reg; 39 u32 sdvox_reg;
40 u32 save_SDVOX; 40 u32 save_SDVOX;
41 int has_hdmi_sink; 41 bool has_hdmi_sink;
42}; 42};
43 43
44static void intel_hdmi_mode_set(struct drm_encoder *encoder, 44static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -128,6 +128,22 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
128 return true; 128 return true;
129} 129}
130 130
131static void
132intel_hdmi_sink_detect(struct drm_connector *connector)
133{
134 struct intel_output *intel_output = to_intel_output(connector);
135 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
136 struct edid *edid = NULL;
137
138 edid = drm_get_edid(&intel_output->base,
139 &intel_output->ddc_bus->adapter);
140 if (edid != NULL) {
141 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
142 kfree(edid);
143 intel_output->base.display_info.raw_edid = NULL;
144 }
145}
146
131static enum drm_connector_status 147static enum drm_connector_status
132intel_hdmi_detect(struct drm_connector *connector) 148intel_hdmi_detect(struct drm_connector *connector)
133{ 149{
@@ -158,9 +174,10 @@ intel_hdmi_detect(struct drm_connector *connector)
158 return connector_status_unknown; 174 return connector_status_unknown;
159 } 175 }
160 176
161 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) 177 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) {
178 intel_hdmi_sink_detect(connector);
162 return connector_status_connected; 179 return connector_status_connected;
163 else 180 } else
164 return connector_status_disconnected; 181 return connector_status_disconnected;
165} 182}
166 183
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 7b31f55f55c8..9913651c1e17 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1357,6 +1357,23 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
1357 intel_sdvo_read_response(intel_output, &response, 2); 1357 intel_sdvo_read_response(intel_output, &response, 2);
1358} 1358}
1359 1359
1360static void
1361intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1362{
1363 struct intel_output *intel_output = to_intel_output(connector);
1364 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1365 struct edid *edid = NULL;
1366
1367 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1368 edid = drm_get_edid(&intel_output->base,
1369 &intel_output->ddc_bus->adapter);
1370 if (edid != NULL) {
1371 sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
1372 kfree(edid);
1373 intel_output->base.display_info.raw_edid = NULL;
1374 }
1375}
1376
1360static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) 1377static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
1361{ 1378{
1362 u8 response[2]; 1379 u8 response[2];
@@ -1371,9 +1388,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1371 if (status != SDVO_CMD_STATUS_SUCCESS) 1388 if (status != SDVO_CMD_STATUS_SUCCESS)
1372 return connector_status_unknown; 1389 return connector_status_unknown;
1373 1390
1374 if ((response[0] != 0) || (response[1] != 0)) 1391 if ((response[0] != 0) || (response[1] != 0)) {
1392 intel_sdvo_hdmi_sink_detect(connector);
1375 return connector_status_connected; 1393 return connector_status_connected;
1376 else 1394 } else
1377 return connector_status_disconnected; 1395 return connector_status_disconnected;
1378} 1396}
1379 1397
diff --git a/drivers/md/dm-bio-list.h b/drivers/md/dm-bio-list.h
deleted file mode 100644
index 345098b4ca77..000000000000
--- a/drivers/md/dm-bio-list.h
+++ /dev/null
@@ -1,117 +0,0 @@
1/*
2 * Copyright (C) 2004 Red Hat UK Ltd.
3 *
4 * This file is released under the GPL.
5 */
6
7#ifndef DM_BIO_LIST_H
8#define DM_BIO_LIST_H
9
10#include <linux/bio.h>
11
12#ifdef CONFIG_BLOCK
13
14struct bio_list {
15 struct bio *head;
16 struct bio *tail;
17};
18
19static inline int bio_list_empty(const struct bio_list *bl)
20{
21 return bl->head == NULL;
22}
23
24static inline void bio_list_init(struct bio_list *bl)
25{
26 bl->head = bl->tail = NULL;
27}
28
29#define bio_list_for_each(bio, bl) \
30 for (bio = (bl)->head; bio; bio = bio->bi_next)
31
32static inline unsigned bio_list_size(const struct bio_list *bl)
33{
34 unsigned sz = 0;
35 struct bio *bio;
36
37 bio_list_for_each(bio, bl)
38 sz++;
39
40 return sz;
41}
42
43static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
44{
45 bio->bi_next = NULL;
46
47 if (bl->tail)
48 bl->tail->bi_next = bio;
49 else
50 bl->head = bio;
51
52 bl->tail = bio;
53}
54
55static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
56{
57 bio->bi_next = bl->head;
58
59 bl->head = bio;
60
61 if (!bl->tail)
62 bl->tail = bio;
63}
64
65static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
66{
67 if (!bl2->head)
68 return;
69
70 if (bl->tail)
71 bl->tail->bi_next = bl2->head;
72 else
73 bl->head = bl2->head;
74
75 bl->tail = bl2->tail;
76}
77
78static inline void bio_list_merge_head(struct bio_list *bl,
79 struct bio_list *bl2)
80{
81 if (!bl2->head)
82 return;
83
84 if (bl->head)
85 bl2->tail->bi_next = bl->head;
86 else
87 bl->tail = bl2->tail;
88
89 bl->head = bl2->head;
90}
91
92static inline struct bio *bio_list_pop(struct bio_list *bl)
93{
94 struct bio *bio = bl->head;
95
96 if (bio) {
97 bl->head = bl->head->bi_next;
98 if (!bl->head)
99 bl->tail = NULL;
100
101 bio->bi_next = NULL;
102 }
103
104 return bio;
105}
106
107static inline struct bio *bio_list_get(struct bio_list *bl)
108{
109 struct bio *bio = bl->head;
110
111 bl->head = bl->tail = NULL;
112
113 return bio;
114}
115
116#endif /* CONFIG_BLOCK */
117#endif
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 59ee1b015d2d..559dbb52bc85 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -15,8 +15,6 @@
15 15
16#include <linux/device-mapper.h> 16#include <linux/device-mapper.h>
17 17
18#include "dm-bio-list.h"
19
20#define DM_MSG_PREFIX "delay" 18#define DM_MSG_PREFIX "delay"
21 19
22struct delay_c { 20struct delay_c {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 095f77bf9681..6a386ab4f7eb 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -8,7 +8,6 @@
8#include <linux/device-mapper.h> 8#include <linux/device-mapper.h>
9 9
10#include "dm-path-selector.h" 10#include "dm-path-selector.h"
11#include "dm-bio-list.h"
12#include "dm-bio-record.h" 11#include "dm-bio-record.h"
13#include "dm-uevent.h" 12#include "dm-uevent.h"
14 13
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 536ef0bef154..076fbb4e967a 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -5,7 +5,6 @@
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
7 7
8#include "dm-bio-list.h"
9#include "dm-bio-record.h" 8#include "dm-bio-record.h"
10 9
11#include <linux/init.h> 10#include <linux/init.h>
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 59f8d9df9e1a..7b899be0b087 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -14,7 +14,6 @@
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15 15
16#include "dm.h" 16#include "dm.h"
17#include "dm-bio-list.h"
18 17
19#define DM_MSG_PREFIX "region hash" 18#define DM_MSG_PREFIX "region hash"
20 19
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 981a0413068f..d73f17fc7778 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -22,7 +22,6 @@
22#include <linux/workqueue.h> 22#include <linux/workqueue.h>
23 23
24#include "dm-exception-store.h" 24#include "dm-exception-store.h"
25#include "dm-bio-list.h"
26 25
27#define DM_MSG_PREFIX "snapshots" 26#define DM_MSG_PREFIX "snapshots"
28 27
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8a994be035ba..424f7b048c30 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -6,7 +6,6 @@
6 */ 6 */
7 7
8#include "dm.h" 8#include "dm.h"
9#include "dm-bio-list.h"
10#include "dm-uevent.h" 9#include "dm-uevent.h"
11 10
12#include <linux/init.h> 11#include <linux/init.h>
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 274b491a11c1..36df9109cde1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -35,7 +35,6 @@
35#include <linux/blkdev.h> 35#include <linux/blkdev.h>
36#include <linux/seq_file.h> 36#include <linux/seq_file.h>
37#include "md.h" 37#include "md.h"
38#include "dm-bio-list.h"
39#include "raid1.h" 38#include "raid1.h"
40#include "bitmap.h" 39#include "bitmap.h"
41 40
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e293d92641ac..81a54f17417e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -22,7 +22,6 @@
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/seq_file.h> 23#include <linux/seq_file.h>
24#include "md.h" 24#include "md.h"
25#include "dm-bio-list.h"
26#include "raid10.h" 25#include "raid10.h"
27#include "bitmap.h" 26#include "bitmap.h"
28 27
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index d0d0c2fee054..02f64d578641 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -692,6 +692,17 @@ static struct zorro_driver a2065_driver = {
692 .remove = __devexit_p(a2065_remove_one), 692 .remove = __devexit_p(a2065_remove_one),
693}; 693};
694 694
695static const struct net_device_ops lance_netdev_ops = {
696 .ndo_open = lance_open,
697 .ndo_stop = lance_close,
698 .ndo_start_xmit = lance_start_xmit,
699 .ndo_tx_timeout = lance_tx_timeout,
700 .ndo_set_multicast_list = lance_set_multicast,
701 .ndo_validate_addr = eth_validate_addr,
702 .ndo_change_mtu = eth_change_mtu,
703 .ndo_set_mac_address = eth_mac_addr,
704};
705
695static int __devinit a2065_init_one(struct zorro_dev *z, 706static int __devinit a2065_init_one(struct zorro_dev *z,
696 const struct zorro_device_id *ent) 707 const struct zorro_device_id *ent)
697{ 708{
@@ -753,12 +764,8 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
753 priv->rx_ring_mod_mask = RX_RING_MOD_MASK; 764 priv->rx_ring_mod_mask = RX_RING_MOD_MASK;
754 priv->tx_ring_mod_mask = TX_RING_MOD_MASK; 765 priv->tx_ring_mod_mask = TX_RING_MOD_MASK;
755 766
756 dev->open = &lance_open; 767 dev->netdev_ops = &lance_netdev_ops;
757 dev->stop = &lance_close;
758 dev->hard_start_xmit = &lance_start_xmit;
759 dev->tx_timeout = &lance_tx_timeout;
760 dev->watchdog_timeo = 5*HZ; 768 dev->watchdog_timeo = 5*HZ;
761 dev->set_multicast_list = &lance_set_multicast;
762 dev->dma = 0; 769 dev->dma = 0;
763 770
764 init_timer(&priv->multicast_timer); 771 init_timer(&priv->multicast_timer);
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index e1d72e06f3e1..58e8d522e5bc 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -155,6 +155,18 @@ static struct zorro_driver ariadne_driver = {
155 .remove = __devexit_p(ariadne_remove_one), 155 .remove = __devexit_p(ariadne_remove_one),
156}; 156};
157 157
158static const struct net_device_ops ariadne_netdev_ops = {
159 .ndo_open = ariadne_open,
160 .ndo_stop = ariadne_close,
161 .ndo_start_xmit = ariadne_start_xmit,
162 .ndo_tx_timeout = ariadne_tx_timeout,
163 .ndo_get_stats = ariadne_get_stats,
164 .ndo_set_multicast_list = set_multicast_list,
165 .ndo_validate_addr = eth_validate_addr,
166 .ndo_change_mtu = eth_change_mtu,
167 .ndo_set_mac_address = eth_mac_addr,
168};
169
158static int __devinit ariadne_init_one(struct zorro_dev *z, 170static int __devinit ariadne_init_one(struct zorro_dev *z,
159 const struct zorro_device_id *ent) 171 const struct zorro_device_id *ent)
160{ 172{
@@ -197,13 +209,8 @@ static int __devinit ariadne_init_one(struct zorro_dev *z,
197 dev->mem_start = ZTWO_VADDR(mem_start); 209 dev->mem_start = ZTWO_VADDR(mem_start);
198 dev->mem_end = dev->mem_start+ARIADNE_RAM_SIZE; 210 dev->mem_end = dev->mem_start+ARIADNE_RAM_SIZE;
199 211
200 dev->open = &ariadne_open; 212 dev->netdev_ops = &ariadne_netdev_ops;
201 dev->stop = &ariadne_close;
202 dev->hard_start_xmit = &ariadne_start_xmit;
203 dev->tx_timeout = &ariadne_tx_timeout;
204 dev->watchdog_timeo = 5*HZ; 213 dev->watchdog_timeo = 5*HZ;
205 dev->get_stats = &ariadne_get_stats;
206 dev->set_multicast_list = &set_multicast_list;
207 214
208 err = register_netdev(dev); 215 err = register_netdev(dev);
209 if (err) { 216 if (err) {
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 4bc6901b3819..627bc75da17d 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -665,6 +665,20 @@ static void __init am79c961_banner(void)
665 if (net_debug && version_printed++ == 0) 665 if (net_debug && version_printed++ == 0)
666 printk(KERN_INFO "%s", version); 666 printk(KERN_INFO "%s", version);
667} 667}
668static const struct net_device_ops am79c961_netdev_ops = {
669 .ndo_open = am79c961_open,
670 .ndo_stop = am79c961_close,
671 .ndo_start_xmit = am79c961_sendpacket,
672 .ndo_get_stats = am79c961_getstats,
673 .ndo_set_multicast_list = am79c961_setmulticastlist,
674 .ndo_tx_timeout = am79c961_timeout,
675 .ndo_validate_addr = eth_validate_addr,
676 .ndo_change_mtu = eth_change_mtu,
677 .ndo_set_mac_address = eth_mac_addr,
678#ifdef CONFIG_NET_POLL_CONTROLLER
679 .ndo_poll_controller = am79c961_poll_controller,
680#endif
681};
668 682
669static int __init am79c961_probe(struct platform_device *pdev) 683static int __init am79c961_probe(struct platform_device *pdev)
670{ 684{
@@ -732,15 +746,7 @@ static int __init am79c961_probe(struct platform_device *pdev)
732 if (am79c961_hw_init(dev)) 746 if (am79c961_hw_init(dev))
733 goto release; 747 goto release;
734 748
735 dev->open = am79c961_open; 749 dev->netdev_ops = &am79c961_netdev_ops;
736 dev->stop = am79c961_close;
737 dev->hard_start_xmit = am79c961_sendpacket;
738 dev->get_stats = am79c961_getstats;
739 dev->set_multicast_list = am79c961_setmulticastlist;
740 dev->tx_timeout = am79c961_timeout;
741#ifdef CONFIG_NET_POLL_CONTROLLER
742 dev->poll_controller = am79c961_poll_controller;
743#endif
744 750
745 ret = register_netdev(dev); 751 ret = register_netdev(dev);
746 if (ret == 0) { 752 if (ret == 0) {
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index 442938d50380..7f4bc8ae5462 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -577,7 +577,7 @@ static void at91ether_sethashtable(struct net_device *dev)
577/* 577/*
578 * Enable/Disable promiscuous and multicast modes. 578 * Enable/Disable promiscuous and multicast modes.
579 */ 579 */
580static void at91ether_set_rx_mode(struct net_device *dev) 580static void at91ether_set_multicast_list(struct net_device *dev)
581{ 581{
582 unsigned long cfg; 582 unsigned long cfg;
583 583
@@ -808,7 +808,7 @@ static int at91ether_close(struct net_device *dev)
808/* 808/*
809 * Transmit packet. 809 * Transmit packet.
810 */ 810 */
811static int at91ether_tx(struct sk_buff *skb, struct net_device *dev) 811static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
812{ 812{
813 struct at91_private *lp = netdev_priv(dev); 813 struct at91_private *lp = netdev_priv(dev);
814 814
@@ -828,7 +828,7 @@ static int at91ether_tx(struct sk_buff *skb, struct net_device *dev)
828 828
829 dev->trans_start = jiffies; 829 dev->trans_start = jiffies;
830 } else { 830 } else {
831 printk(KERN_ERR "at91_ether.c: at91ether_tx() called, but device is busy!\n"); 831 printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
832 return 1; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb) 832 return 1; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
833 on this skb, he also reports -ENETDOWN and printk's, so either 833 on this skb, he also reports -ENETDOWN and printk's, so either
834 we free and return(0) or don't free and return 1 */ 834 we free and return(0) or don't free and return 1 */
@@ -965,6 +965,21 @@ static void at91ether_poll_controller(struct net_device *dev)
965} 965}
966#endif 966#endif
967 967
968static const struct net_device_ops at91ether_netdev_ops = {
969 .ndo_open = at91ether_open,
970 .ndo_stop = at91ether_close,
971 .ndo_start_xmit = at91ether_start_xmit,
972 .ndo_get_stats = at91ether_stats,
973 .ndo_set_multicast_list = at91ether_set_multicast_list,
974 .ndo_set_mac_address = set_mac_address,
975 .ndo_do_ioctl = at91ether_ioctl,
976 .ndo_validate_addr = eth_validate_addr,
977 .ndo_change_mtu = eth_change_mtu,
978#ifdef CONFIG_NET_POLL_CONTROLLER
979 .ndo_poll_controller = at91ether_poll_controller,
980#endif
981};
982
968/* 983/*
969 * Initialize the ethernet interface 984 * Initialize the ethernet interface
970 */ 985 */
@@ -1005,17 +1020,8 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
1005 spin_lock_init(&lp->lock); 1020 spin_lock_init(&lp->lock);
1006 1021
1007 ether_setup(dev); 1022 ether_setup(dev);
1008 dev->open = at91ether_open; 1023 dev->netdev_ops = &at91ether_netdev_ops;
1009 dev->stop = at91ether_close;
1010 dev->hard_start_xmit = at91ether_tx;
1011 dev->get_stats = at91ether_stats;
1012 dev->set_multicast_list = at91ether_set_rx_mode;
1013 dev->set_mac_address = set_mac_address;
1014 dev->ethtool_ops = &at91ether_ethtool_ops; 1024 dev->ethtool_ops = &at91ether_ethtool_ops;
1015 dev->do_ioctl = at91ether_ioctl;
1016#ifdef CONFIG_NET_POLL_CONTROLLER
1017 dev->poll_controller = at91ether_poll_controller;
1018#endif
1019 1025
1020 SET_NETDEV_DEV(dev, &pdev->dev); 1026 SET_NETDEV_DEV(dev, &pdev->dev);
1021 1027
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index cc7708775da0..41736772c1dd 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -770,7 +770,18 @@ static struct ethtool_ops ep93xx_ethtool_ops = {
770 .get_link = ep93xx_get_link, 770 .get_link = ep93xx_get_link,
771}; 771};
772 772
773struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) 773static const struct net_device_ops ep93xx_netdev_ops = {
774 .ndo_open = ep93xx_open,
775 .ndo_stop = ep93xx_close,
776 .ndo_start_xmit = ep93xx_xmit,
777 .ndo_get_stats = ep93xx_get_stats,
778 .ndo_do_ioctl = ep93xx_ioctl,
779 .ndo_validate_addr = eth_validate_addr,
780 .ndo_change_mtu = eth_change_mtu,
781 .ndo_set_mac_address = eth_mac_addr,
782};
783
784static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
774{ 785{
775 struct net_device *dev; 786 struct net_device *dev;
776 787
@@ -780,12 +791,8 @@ struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
780 791
781 memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); 792 memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN);
782 793
783 dev->get_stats = ep93xx_get_stats;
784 dev->ethtool_ops = &ep93xx_ethtool_ops; 794 dev->ethtool_ops = &ep93xx_ethtool_ops;
785 dev->hard_start_xmit = ep93xx_xmit; 795 dev->netdev_ops = &ep93xx_netdev_ops;
786 dev->open = ep93xx_open;
787 dev->stop = ep93xx_close;
788 dev->do_ioctl = ep93xx_ioctl;
789 796
790 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 797 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
791 798
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index e380de454463..edf770f639fa 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -991,6 +991,18 @@ static void __devinit ether1_banner(void)
991 printk(KERN_INFO "%s", version); 991 printk(KERN_INFO "%s", version);
992} 992}
993 993
994static const struct net_device_ops ether1_netdev_ops = {
995 .ndo_open = ether1_open,
996 .ndo_stop = ether1_close,
997 .ndo_start_xmit = ether1_sendpacket,
998 .ndo_get_stats = ether1_getstats,
999 .ndo_set_multicast_list = ether1_setmulticastlist,
1000 .ndo_tx_timeout = ether1_timeout,
1001 .ndo_validate_addr = eth_validate_addr,
1002 .ndo_change_mtu = eth_change_mtu,
1003 .ndo_set_mac_address = eth_mac_addr,
1004};
1005
994static int __devinit 1006static int __devinit
995ether1_probe(struct expansion_card *ec, const struct ecard_id *id) 1007ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
996{ 1008{
@@ -1031,12 +1043,7 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
1031 goto free; 1043 goto free;
1032 } 1044 }
1033 1045
1034 dev->open = ether1_open; 1046 dev->netdev_ops = &ether1_netdev_ops;
1035 dev->stop = ether1_close;
1036 dev->hard_start_xmit = ether1_sendpacket;
1037 dev->get_stats = ether1_getstats;
1038 dev->set_multicast_list = ether1_setmulticastlist;
1039 dev->tx_timeout = ether1_timeout;
1040 dev->watchdog_timeo = 5 * HZ / 100; 1047 dev->watchdog_timeo = 5 * HZ / 100;
1041 1048
1042 ret = register_netdev(dev); 1049 ret = register_netdev(dev);
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 21a7bef12d3b..ec8a1ae1e887 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -770,6 +770,18 @@ static void __devinit ether3_banner(void)
770 printk(KERN_INFO "%s", version); 770 printk(KERN_INFO "%s", version);
771} 771}
772 772
773static const struct net_device_ops ether3_netdev_ops = {
774 .ndo_open = ether3_open,
775 .ndo_stop = ether3_close,
776 .ndo_start_xmit = ether3_sendpacket,
777 .ndo_get_stats = ether3_getstats,
778 .ndo_set_multicast_list = ether3_setmulticastlist,
779 .ndo_tx_timeout = ether3_timeout,
780 .ndo_validate_addr = eth_validate_addr,
781 .ndo_change_mtu = eth_change_mtu,
782 .ndo_set_mac_address = eth_mac_addr,
783};
784
773static int __devinit 785static int __devinit
774ether3_probe(struct expansion_card *ec, const struct ecard_id *id) 786ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
775{ 787{
@@ -846,12 +858,7 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
846 goto free; 858 goto free;
847 } 859 }
848 860
849 dev->open = ether3_open; 861 dev->netdev_ops = &ether3_netdev_ops;
850 dev->stop = ether3_close;
851 dev->hard_start_xmit = ether3_sendpacket;
852 dev->get_stats = ether3_getstats;
853 dev->set_multicast_list = ether3_setmulticastlist;
854 dev->tx_timeout = ether3_timeout;
855 dev->watchdog_timeo = 5 * HZ / 100; 862 dev->watchdog_timeo = 5 * HZ / 100;
856 863
857 ret = register_netdev(dev); 864 ret = register_netdev(dev);
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 2d81f6afcb58..5425ab0c38c0 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -453,6 +453,16 @@ static noinline int __init addr_accessible(volatile void *regp, int wordflag,
453 return( ret ); 453 return( ret );
454} 454}
455 455
456static const struct net_device_ops lance_netdev_ops = {
457 .ndo_open = lance_open,
458 .ndo_stop = lance_close,
459 .ndo_start_xmit = lance_start_xmit,
460 .ndo_set_multicast_list = set_multicast_list,
461 .ndo_set_mac_address = lance_set_mac_address,
462 .ndo_tx_timeout = lance_tx_timeout,
463 .ndo_validate_addr = eth_validate_addr,
464 .ndo_change_mtu = eth_change_mtu,
465};
456 466
457static unsigned long __init lance_probe1( struct net_device *dev, 467static unsigned long __init lance_probe1( struct net_device *dev,
458 struct lance_addr *init_rec ) 468 struct lance_addr *init_rec )
@@ -623,15 +633,9 @@ static unsigned long __init lance_probe1( struct net_device *dev,
623 if (did_version++ == 0) 633 if (did_version++ == 0)
624 DPRINTK( 1, ( version )); 634 DPRINTK( 1, ( version ));
625 635
626 /* The LANCE-specific entries in the device structure. */ 636 dev->netdev_ops = &lance_netdev_ops;
627 dev->open = &lance_open;
628 dev->hard_start_xmit = &lance_start_xmit;
629 dev->stop = &lance_close;
630 dev->set_multicast_list = &set_multicast_list;
631 dev->set_mac_address = &lance_set_mac_address;
632 637
633 /* XXX MSch */ 638 /* XXX MSch */
634 dev->tx_timeout = lance_tx_timeout;
635 dev->watchdog_timeo = TX_TIMEOUT; 639 dev->watchdog_timeo = TX_TIMEOUT;
636 640
637 return( 1 ); 641 return( 1 );
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 4274e4ac963b..d58c105fc779 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1004,12 +1004,12 @@ static void au1000_tx_timeout(struct net_device *dev)
1004 netif_wake_queue(dev); 1004 netif_wake_queue(dev);
1005} 1005}
1006 1006
1007static void set_rx_mode(struct net_device *dev) 1007static void au1000_multicast_list(struct net_device *dev)
1008{ 1008{
1009 struct au1000_private *aup = netdev_priv(dev); 1009 struct au1000_private *aup = netdev_priv(dev);
1010 1010
1011 if (au1000_debug > 4) 1011 if (au1000_debug > 4)
1012 printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags); 1012 printk("%s: au1000_multicast_list: flags=%x\n", dev->name, dev->flags);
1013 1013
1014 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1014 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1015 aup->mac->control |= MAC_PROMISCUOUS; 1015 aup->mac->control |= MAC_PROMISCUOUS;
@@ -1047,6 +1047,18 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1047 return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); 1047 return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd);
1048} 1048}
1049 1049
1050static const struct net_device_ops au1000_netdev_ops = {
1051 .ndo_open = au1000_open,
1052 .ndo_stop = au1000_close,
1053 .ndo_start_xmit = au1000_tx,
1054 .ndo_set_multicast_list = au1000_multicast_list,
1055 .ndo_do_ioctl = au1000_ioctl,
1056 .ndo_tx_timeout = au1000_tx_timeout,
1057 .ndo_set_mac_address = eth_mac_addr,
1058 .ndo_validate_addr = eth_validate_addr,
1059 .ndo_change_mtu = eth_change_mtu,
1060};
1061
1050static struct net_device * au1000_probe(int port_num) 1062static struct net_device * au1000_probe(int port_num)
1051{ 1063{
1052 static unsigned version_printed = 0; 1064 static unsigned version_printed = 0;
@@ -1197,13 +1209,8 @@ static struct net_device * au1000_probe(int port_num)
1197 1209
1198 dev->base_addr = base; 1210 dev->base_addr = base;
1199 dev->irq = irq; 1211 dev->irq = irq;
1200 dev->open = au1000_open; 1212 dev->netdev_ops = &au1000_netdev_ops;
1201 dev->hard_start_xmit = au1000_tx;
1202 dev->stop = au1000_close;
1203 dev->set_multicast_list = &set_rx_mode;
1204 dev->do_ioctl = &au1000_ioctl;
1205 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); 1213 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1206 dev->tx_timeout = au1000_tx_timeout;
1207 dev->watchdog_timeo = ETH_TX_TIMEOUT; 1214 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1208 1215
1209 /* 1216 /*
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 04f4b73fa8d8..9592f22e4c8c 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -319,7 +319,7 @@ be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
319 319
320 be_cmd_get_flow_control(&adapter->ctrl, &ecmd->tx_pause, 320 be_cmd_get_flow_control(&adapter->ctrl, &ecmd->tx_pause,
321 &ecmd->rx_pause); 321 &ecmd->rx_pause);
322 ecmd->autoneg = AUTONEG_ENABLE; 322 ecmd->autoneg = 0;
323} 323}
324 324
325static int 325static int
@@ -328,7 +328,7 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
328 struct be_adapter *adapter = netdev_priv(netdev); 328 struct be_adapter *adapter = netdev_priv(netdev);
329 int status; 329 int status;
330 330
331 if (ecmd->autoneg != AUTONEG_ENABLE) 331 if (ecmd->autoneg != 0)
332 return -EINVAL; 332 return -EINVAL;
333 333
334 status = be_cmd_set_flow_control(&adapter->ctrl, ecmd->tx_pause, 334 status = be_cmd_set_flow_control(&adapter->ctrl, ecmd->tx_pause,
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 9afe8092dfc4..9f971ed6b58d 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -979,6 +979,20 @@ static int bfin_mac_open(struct net_device *dev)
979 return 0; 979 return 0;
980} 980}
981 981
982static const struct net_device_ops bfin_mac_netdev_ops = {
983 .ndo_open = bfin_mac_open,
984 .ndo_stop = bfin_mac_close,
985 .ndo_start_xmit = bfin_mac_hard_start_xmit,
986 .ndo_set_mac_address = bfin_mac_set_mac_address,
987 .ndo_tx_timeout = bfin_mac_timeout,
988 .ndo_set_multicast_list = bfin_mac_set_multicast_list,
989 .ndo_validate_addr = eth_validate_addr,
990 .ndo_change_mtu = eth_change_mtu,
991#ifdef CONFIG_NET_POLL_CONTROLLER
992 .ndo_poll_controller = bfin_mac_poll,
993#endif
994};
995
982/* 996/*
983 * 997 *
984 * this makes the board clean up everything that it can 998 * this makes the board clean up everything that it can
@@ -1086,15 +1100,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1086 /* Fill in the fields of the device structure with ethernet values. */ 1100 /* Fill in the fields of the device structure with ethernet values. */
1087 ether_setup(ndev); 1101 ether_setup(ndev);
1088 1102
1089 ndev->open = bfin_mac_open; 1103 ndev->netdev_ops = &bfin_mac_netdev_ops;
1090 ndev->stop = bfin_mac_close;
1091 ndev->hard_start_xmit = bfin_mac_hard_start_xmit;
1092 ndev->set_mac_address = bfin_mac_set_mac_address;
1093 ndev->tx_timeout = bfin_mac_timeout;
1094 ndev->set_multicast_list = bfin_mac_set_multicast_list;
1095#ifdef CONFIG_NET_POLL_CONTROLLER
1096 ndev->poll_controller = bfin_mac_poll;
1097#endif
1098 ndev->ethtool_ops = &bfin_mac_ethtool_ops; 1104 ndev->ethtool_ops = &bfin_mac_ethtool_ops;
1099 1105
1100 spin_lock_init(&lp->lock); 1106 spin_lock_init(&lp->lock);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 8dc6fbb9a41e..553a89919778 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -370,8 +370,6 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
370 370
371 if (arp->op_code == htons(ARPOP_REPLY)) { 371 if (arp->op_code == htons(ARPOP_REPLY)) {
372 /* update rx hash table for this ARP */ 372 /* update rx hash table for this ARP */
373 printk("rar: update orig %s bond_dev %s\n", orig_dev->name,
374 bond_dev->name);
375 bond = netdev_priv(bond_dev); 373 bond = netdev_priv(bond_dev);
376 rlb_update_entry_from_arp(bond, arp); 374 rlb_update_entry_from_arp(bond, arp);
377 pr_debug("Server received an ARP Reply from client\n"); 375 pr_debug("Server received an ARP Reply from client\n");
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 99610f358c40..63369b6b14d4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2570,7 +2570,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2570 2570
2571 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { 2571 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
2572 if (!targets[i]) 2572 if (!targets[i])
2573 continue; 2573 break;
2574 pr_debug("basa: target %x\n", targets[i]); 2574 pr_debug("basa: target %x\n", targets[i]);
2575 if (list_empty(&bond->vlan_list)) { 2575 if (list_empty(&bond->vlan_list)) {
2576 pr_debug("basa: empty vlan: arp_send\n"); 2576 pr_debug("basa: empty vlan: arp_send\n");
@@ -2677,7 +2677,6 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
2677 int i; 2677 int i;
2678 __be32 *targets = bond->params.arp_targets; 2678 __be32 *targets = bond->params.arp_targets;
2679 2679
2680 targets = bond->params.arp_targets;
2681 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { 2680 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) {
2682 pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n", 2681 pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n",
2683 &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip)); 2682 &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip));
@@ -3303,7 +3302,7 @@ static void bond_info_show_master(struct seq_file *seq)
3303 3302
3304 for(i = 0; (i < BOND_MAX_ARP_TARGETS) ;i++) { 3303 for(i = 0; (i < BOND_MAX_ARP_TARGETS) ;i++) {
3305 if (!bond->params.arp_targets[i]) 3304 if (!bond->params.arp_targets[i])
3306 continue; 3305 break;
3307 if (printed) 3306 if (printed)
3308 seq_printf(seq, ","); 3307 seq_printf(seq, ",");
3309 seq_printf(seq, " %pI4", &bond->params.arp_targets[i]); 3308 seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 18cf4787874c..d28731535226 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -684,17 +684,15 @@ static ssize_t bonding_store_arp_targets(struct device *d,
684 goto out; 684 goto out;
685 } 685 }
686 /* look for an empty slot to put the target in, and check for dupes */ 686 /* look for an empty slot to put the target in, and check for dupes */
687 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { 687 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
688 if (targets[i] == newtarget) { /* duplicate */ 688 if (targets[i] == newtarget) { /* duplicate */
689 printk(KERN_ERR DRV_NAME 689 printk(KERN_ERR DRV_NAME
690 ": %s: ARP target %pI4 is already present\n", 690 ": %s: ARP target %pI4 is already present\n",
691 bond->dev->name, &newtarget); 691 bond->dev->name, &newtarget);
692 if (done)
693 targets[i] = 0;
694 ret = -EINVAL; 692 ret = -EINVAL;
695 goto out; 693 goto out;
696 } 694 }
697 if (targets[i] == 0 && !done) { 695 if (targets[i] == 0) {
698 printk(KERN_INFO DRV_NAME 696 printk(KERN_INFO DRV_NAME
699 ": %s: adding ARP target %pI4.\n", 697 ": %s: adding ARP target %pI4.\n",
700 bond->dev->name, &newtarget); 698 bond->dev->name, &newtarget);
@@ -720,12 +718,16 @@ static ssize_t bonding_store_arp_targets(struct device *d,
720 goto out; 718 goto out;
721 } 719 }
722 720
723 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { 721 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) {
724 if (targets[i] == newtarget) { 722 if (targets[i] == newtarget) {
723 int j;
725 printk(KERN_INFO DRV_NAME 724 printk(KERN_INFO DRV_NAME
726 ": %s: removing ARP target %pI4.\n", 725 ": %s: removing ARP target %pI4.\n",
727 bond->dev->name, &newtarget); 726 bond->dev->name, &newtarget);
728 targets[i] = 0; 727 for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++)
728 targets[j] = targets[j+1];
729
730 targets[j] = 0;
729 done = 1; 731 done = 1;
730 } 732 }
731 } 733 }
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index c9806c58b2fd..7a18dc7e5c7f 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -257,6 +257,23 @@ struct transceiver_ops transceivers[] =
257 257
258struct transceiver_ops* transceiver = &transceivers[0]; 258struct transceiver_ops* transceiver = &transceivers[0];
259 259
260static const struct net_device_ops e100_netdev_ops = {
261 .ndo_open = e100_open,
262 .ndo_stop = e100_close,
263 .ndo_start_xmit = e100_send_packet,
264 .ndo_tx_timeout = e100_tx_timeout,
265 .ndo_get_stats = e100_get_stats,
266 .ndo_set_multicast_list = set_multicast_list,
267 .ndo_do_ioctl = e100_ioctl,
268 .ndo_set_mac_address = e100_set_mac_address,
269 .ndo_validate_addr = eth_validate_addr,
270 .ndo_change_mtu = eth_change_mtu,
271 .ndo_set_config = e100_set_config,
272#ifdef CONFIG_NET_POLL_CONTROLLER
273 .ndo_poll_controller = e100_netpoll,
274#endif
275};
276
260#define tx_done(dev) (*R_DMA_CH0_CMD == 0) 277#define tx_done(dev) (*R_DMA_CH0_CMD == 0)
261 278
262/* 279/*
@@ -300,19 +317,8 @@ etrax_ethernet_init(void)
300 317
301 /* fill in our handlers so the network layer can talk to us in the future */ 318 /* fill in our handlers so the network layer can talk to us in the future */
302 319
303 dev->open = e100_open;
304 dev->hard_start_xmit = e100_send_packet;
305 dev->stop = e100_close;
306 dev->get_stats = e100_get_stats;
307 dev->set_multicast_list = set_multicast_list;
308 dev->set_mac_address = e100_set_mac_address;
309 dev->ethtool_ops = &e100_ethtool_ops; 320 dev->ethtool_ops = &e100_ethtool_ops;
310 dev->do_ioctl = e100_ioctl; 321 dev->netdev_ops = &e100_netdev_ops;
311 dev->set_config = e100_set_config;
312 dev->tx_timeout = e100_tx_timeout;
313#ifdef CONFIG_NET_POLL_CONTROLLER
314 dev->poll_controller = e100_netpoll;
315#endif
316 322
317 spin_lock_init(&np->lock); 323 spin_lock_init(&np->lock);
318 spin_lock_init(&np->led_lock); 324 spin_lock_init(&np->led_lock);
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 861c867fca87..b62405a69180 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -1010,6 +1010,17 @@ static void lance_set_multicast_retry(unsigned long _opaque)
1010 lance_set_multicast(dev); 1010 lance_set_multicast(dev);
1011} 1011}
1012 1012
1013static const struct net_device_ops lance_netdev_ops = {
1014 .ndo_open = lance_open,
1015 .ndo_stop = lance_close,
1016 .ndo_start_xmit = lance_start_xmit,
1017 .ndo_tx_timeout = lance_tx_timeout,
1018 .ndo_set_multicast_list = lance_set_multicast,
1019 .ndo_change_mtu = eth_change_mtu,
1020 .ndo_validate_addr = eth_validate_addr,
1021 .ndo_set_mac_address = eth_mac_addr,
1022};
1023
1013static int __init dec_lance_probe(struct device *bdev, const int type) 1024static int __init dec_lance_probe(struct device *bdev, const int type)
1014{ 1025{
1015 static unsigned version_printed; 1026 static unsigned version_printed;
@@ -1223,12 +1234,8 @@ static int __init dec_lance_probe(struct device *bdev, const int type)
1223 1234
1224 printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq); 1235 printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
1225 1236
1226 dev->open = &lance_open; 1237 dev->netdev_ops = &lance_netdev_ops;
1227 dev->stop = &lance_close;
1228 dev->hard_start_xmit = &lance_start_xmit;
1229 dev->tx_timeout = &lance_tx_timeout;
1230 dev->watchdog_timeo = 5*HZ; 1238 dev->watchdog_timeo = 5*HZ;
1231 dev->set_multicast_list = &lance_set_multicast;
1232 1239
1233 /* lp->ll is the location of the registers for lance card */ 1240 /* lp->ll is the location of the registers for lance card */
1234 lp->ll = ll; 1241 lp->ll = ll;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ddc5c533e89c..ef12931d302a 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -156,8 +156,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
156static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); 156static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
157static void e1000_restore_vlan(struct e1000_adapter *adapter); 157static void e1000_restore_vlan(struct e1000_adapter *adapter);
158 158
159static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
160#ifdef CONFIG_PM 159#ifdef CONFIG_PM
160static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
161static int e1000_resume(struct pci_dev *pdev); 161static int e1000_resume(struct pci_dev *pdev);
162#endif 162#endif
163static void e1000_shutdown(struct pci_dev *pdev); 163static void e1000_shutdown(struct pci_dev *pdev);
@@ -3834,7 +3834,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3834 struct e1000_buffer *buffer_info; 3834 struct e1000_buffer *buffer_info;
3835 unsigned int i, eop; 3835 unsigned int i, eop;
3836 unsigned int count = 0; 3836 unsigned int count = 0;
3837 bool cleaned; 3837 bool cleaned = false;
3838 unsigned int total_tx_bytes=0, total_tx_packets=0; 3838 unsigned int total_tx_bytes=0, total_tx_packets=0;
3839 3839
3840 i = tx_ring->next_to_clean; 3840 i = tx_ring->next_to_clean;
@@ -4601,7 +4601,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
4601 return 0; 4601 return 0;
4602} 4602}
4603 4603
4604static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4604static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4605{ 4605{
4606 struct net_device *netdev = pci_get_drvdata(pdev); 4606 struct net_device *netdev = pci_get_drvdata(pdev);
4607 struct e1000_adapter *adapter = netdev_priv(netdev); 4607 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -4664,22 +4664,18 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4664 4664
4665 ew32(WUC, E1000_WUC_PME_EN); 4665 ew32(WUC, E1000_WUC_PME_EN);
4666 ew32(WUFC, wufc); 4666 ew32(WUFC, wufc);
4667 pci_enable_wake(pdev, PCI_D3hot, 1);
4668 pci_enable_wake(pdev, PCI_D3cold, 1);
4669 } else { 4667 } else {
4670 ew32(WUC, 0); 4668 ew32(WUC, 0);
4671 ew32(WUFC, 0); 4669 ew32(WUFC, 0);
4672 pci_enable_wake(pdev, PCI_D3hot, 0);
4673 pci_enable_wake(pdev, PCI_D3cold, 0);
4674 } 4670 }
4675 4671
4676 e1000_release_manageability(adapter); 4672 e1000_release_manageability(adapter);
4677 4673
4674 *enable_wake = !!wufc;
4675
4678 /* make sure adapter isn't asleep if manageability is enabled */ 4676 /* make sure adapter isn't asleep if manageability is enabled */
4679 if (adapter->en_mng_pt) { 4677 if (adapter->en_mng_pt)
4680 pci_enable_wake(pdev, PCI_D3hot, 1); 4678 *enable_wake = true;
4681 pci_enable_wake(pdev, PCI_D3cold, 1);
4682 }
4683 4679
4684 if (hw->phy_type == e1000_phy_igp_3) 4680 if (hw->phy_type == e1000_phy_igp_3)
4685 e1000_phy_powerdown_workaround(hw); 4681 e1000_phy_powerdown_workaround(hw);
@@ -4693,12 +4689,29 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4693 4689
4694 pci_disable_device(pdev); 4690 pci_disable_device(pdev);
4695 4691
4696 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4697
4698 return 0; 4692 return 0;
4699} 4693}
4700 4694
4701#ifdef CONFIG_PM 4695#ifdef CONFIG_PM
4696static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4697{
4698 int retval;
4699 bool wake;
4700
4701 retval = __e1000_shutdown(pdev, &wake);
4702 if (retval)
4703 return retval;
4704
4705 if (wake) {
4706 pci_prepare_to_sleep(pdev);
4707 } else {
4708 pci_wake_from_d3(pdev, false);
4709 pci_set_power_state(pdev, PCI_D3hot);
4710 }
4711
4712 return 0;
4713}
4714
4702static int e1000_resume(struct pci_dev *pdev) 4715static int e1000_resume(struct pci_dev *pdev)
4703{ 4716{
4704 struct net_device *netdev = pci_get_drvdata(pdev); 4717 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4753,7 +4766,14 @@ static int e1000_resume(struct pci_dev *pdev)
4753 4766
4754static void e1000_shutdown(struct pci_dev *pdev) 4767static void e1000_shutdown(struct pci_dev *pdev)
4755{ 4768{
4756 e1000_suspend(pdev, PMSG_SUSPEND); 4769 bool wake;
4770
4771 __e1000_shutdown(pdev, &wake);
4772
4773 if (system_state == SYSTEM_POWER_OFF) {
4774 pci_wake_from_d3(pdev, wake);
4775 pci_set_power_state(pdev, PCI_D3hot);
4776 }
4757} 4777}
4758 4778
4759#ifdef CONFIG_NET_POLL_CONTROLLER 4779#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 409b58cad0e5..1693ed116b16 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -621,7 +621,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
621 struct e1000_buffer *buffer_info; 621 struct e1000_buffer *buffer_info;
622 unsigned int i, eop; 622 unsigned int i, eop;
623 unsigned int count = 0; 623 unsigned int count = 0;
624 bool cleaned; 624 bool cleaned = false;
625 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 625 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
626 626
627 i = tx_ring->next_to_clean; 627 i = tx_ring->next_to_clean;
@@ -4346,7 +4346,7 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4346 } 4346 }
4347} 4347}
4348 4348
4349static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4349static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4350{ 4350{
4351 struct net_device *netdev = pci_get_drvdata(pdev); 4351 struct net_device *netdev = pci_get_drvdata(pdev);
4352 struct e1000_adapter *adapter = netdev_priv(netdev); 4352 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -4409,20 +4409,16 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4409 4409
4410 ew32(WUC, E1000_WUC_PME_EN); 4410 ew32(WUC, E1000_WUC_PME_EN);
4411 ew32(WUFC, wufc); 4411 ew32(WUFC, wufc);
4412 pci_enable_wake(pdev, PCI_D3hot, 1);
4413 pci_enable_wake(pdev, PCI_D3cold, 1);
4414 } else { 4412 } else {
4415 ew32(WUC, 0); 4413 ew32(WUC, 0);
4416 ew32(WUFC, 0); 4414 ew32(WUFC, 0);
4417 pci_enable_wake(pdev, PCI_D3hot, 0);
4418 pci_enable_wake(pdev, PCI_D3cold, 0);
4419 } 4415 }
4420 4416
4417 *enable_wake = !!wufc;
4418
4421 /* make sure adapter isn't asleep if manageability is enabled */ 4419 /* make sure adapter isn't asleep if manageability is enabled */
4422 if (adapter->flags & FLAG_MNG_PT_ENABLED) { 4420 if (adapter->flags & FLAG_MNG_PT_ENABLED)
4423 pci_enable_wake(pdev, PCI_D3hot, 1); 4421 *enable_wake = true;
4424 pci_enable_wake(pdev, PCI_D3cold, 1);
4425 }
4426 4422
4427 if (adapter->hw.phy.type == e1000_phy_igp_3) 4423 if (adapter->hw.phy.type == e1000_phy_igp_3)
4428 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 4424 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
@@ -4435,6 +4431,26 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4435 4431
4436 pci_disable_device(pdev); 4432 pci_disable_device(pdev);
4437 4433
4434 return 0;
4435}
4436
4437static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
4438{
4439 if (sleep && wake) {
4440 pci_prepare_to_sleep(pdev);
4441 return;
4442 }
4443
4444 pci_wake_from_d3(pdev, wake);
4445 pci_set_power_state(pdev, PCI_D3hot);
4446}
4447
4448static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
4449 bool wake)
4450{
4451 struct net_device *netdev = pci_get_drvdata(pdev);
4452 struct e1000_adapter *adapter = netdev_priv(netdev);
4453
4438 /* 4454 /*
4439 * The pci-e switch on some quad port adapters will report a 4455 * The pci-e switch on some quad port adapters will report a
4440 * correctable error when the MAC transitions from D0 to D3. To 4456 * correctable error when the MAC transitions from D0 to D3. To
@@ -4450,14 +4466,12 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4450 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, 4466 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
4451 (devctl & ~PCI_EXP_DEVCTL_CERE)); 4467 (devctl & ~PCI_EXP_DEVCTL_CERE));
4452 4468
4453 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4469 e1000_power_off(pdev, sleep, wake);
4454 4470
4455 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); 4471 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
4456 } else { 4472 } else {
4457 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4473 e1000_power_off(pdev, sleep, wake);
4458 } 4474 }
4459
4460 return 0;
4461} 4475}
4462 4476
4463static void e1000e_disable_l1aspm(struct pci_dev *pdev) 4477static void e1000e_disable_l1aspm(struct pci_dev *pdev)
@@ -4486,6 +4500,18 @@ static void e1000e_disable_l1aspm(struct pci_dev *pdev)
4486} 4500}
4487 4501
4488#ifdef CONFIG_PM 4502#ifdef CONFIG_PM
4503static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4504{
4505 int retval;
4506 bool wake;
4507
4508 retval = __e1000_shutdown(pdev, &wake);
4509 if (!retval)
4510 e1000_complete_shutdown(pdev, true, wake);
4511
4512 return retval;
4513}
4514
4489static int e1000_resume(struct pci_dev *pdev) 4515static int e1000_resume(struct pci_dev *pdev)
4490{ 4516{
4491 struct net_device *netdev = pci_get_drvdata(pdev); 4517 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4549,7 +4575,12 @@ static int e1000_resume(struct pci_dev *pdev)
4549 4575
4550static void e1000_shutdown(struct pci_dev *pdev) 4576static void e1000_shutdown(struct pci_dev *pdev)
4551{ 4577{
4552 e1000_suspend(pdev, PMSG_SUSPEND); 4578 bool wake = false;
4579
4580 __e1000_shutdown(pdev, &wake);
4581
4582 if (system_state == SYSTEM_POWER_OFF)
4583 e1000_complete_shutdown(pdev, false, wake);
4553} 4584}
4554 4585
4555#ifdef CONFIG_NET_POLL_CONTROLLER 4586#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index ac0c5b438e0a..604c844d0769 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -3080,7 +3080,8 @@ static const struct net_device_ops ehea_netdev_ops = {
3080 .ndo_change_mtu = ehea_change_mtu, 3080 .ndo_change_mtu = ehea_change_mtu,
3081 .ndo_vlan_rx_register = ehea_vlan_rx_register, 3081 .ndo_vlan_rx_register = ehea_vlan_rx_register,
3082 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid, 3082 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
3083 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid 3083 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
3084 .ndo_tx_timeout = ehea_tx_watchdog,
3084}; 3085};
3085 3086
3086struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, 3087struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
@@ -3142,7 +3143,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3142 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX 3143 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3143 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER 3144 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3144 | NETIF_F_LLTX; 3145 | NETIF_F_LLTX;
3145 dev->tx_timeout = &ehea_tx_watchdog;
3146 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; 3146 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3147 3147
3148 INIT_WORK(&port->reset_task, ehea_reset_port); 3148 INIT_WORK(&port->reset_task, ehea_reset_port);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index d37465020bcc..11d5db16ed9c 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3745,14 +3745,14 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3745 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3745 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3746 } 3746 }
3747 spin_unlock_irqrestore(&np->lock, flags); 3747 spin_unlock_irqrestore(&np->lock, flags);
3748 __napi_complete(napi); 3748 napi_complete(napi);
3749 return rx_work; 3749 return rx_work;
3750 } 3750 }
3751 3751
3752 if (rx_work < budget) { 3752 if (rx_work < budget) {
3753 /* re-enable interrupts 3753 /* re-enable interrupts
3754 (msix not enabled in napi) */ 3754 (msix not enabled in napi) */
3755 __napi_complete(napi); 3755 napi_complete(napi);
3756 3756
3757 writel(np->irqmask, base + NvRegIrqMask); 3757 writel(np->irqmask, base + NvRegIrqMask);
3758 } 3758 }
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index b037ce9857bf..a9cbc3191a2a 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1019,6 +1019,22 @@ out_put_phy:
1019#define IS_FEC(match) 0 1019#define IS_FEC(match) 0
1020#endif 1020#endif
1021 1021
1022static const struct net_device_ops fs_enet_netdev_ops = {
1023 .ndo_open = fs_enet_open,
1024 .ndo_stop = fs_enet_close,
1025 .ndo_get_stats = fs_enet_get_stats,
1026 .ndo_start_xmit = fs_enet_start_xmit,
1027 .ndo_tx_timeout = fs_timeout,
1028 .ndo_set_multicast_list = fs_set_multicast_list,
1029 .ndo_do_ioctl = fs_ioctl,
1030 .ndo_validate_addr = eth_validate_addr,
1031 .ndo_set_mac_address = eth_mac_addr,
1032 .ndo_change_mtu = eth_change_mtu,
1033#ifdef CONFIG_NET_POLL_CONTROLLER
1034 .ndo_poll_controller = fs_enet_netpoll,
1035#endif
1036};
1037
1022static int __devinit fs_enet_probe(struct of_device *ofdev, 1038static int __devinit fs_enet_probe(struct of_device *ofdev,
1023 const struct of_device_id *match) 1039 const struct of_device_id *match)
1024{ 1040{
@@ -1093,22 +1109,13 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1093 fep->tx_ring = fpi->tx_ring; 1109 fep->tx_ring = fpi->tx_ring;
1094 fep->rx_ring = fpi->rx_ring; 1110 fep->rx_ring = fpi->rx_ring;
1095 1111
1096 ndev->open = fs_enet_open; 1112 ndev->netdev_ops = &fs_enet_netdev_ops;
1097 ndev->hard_start_xmit = fs_enet_start_xmit;
1098 ndev->tx_timeout = fs_timeout;
1099 ndev->watchdog_timeo = 2 * HZ; 1113 ndev->watchdog_timeo = 2 * HZ;
1100 ndev->stop = fs_enet_close;
1101 ndev->get_stats = fs_enet_get_stats;
1102 ndev->set_multicast_list = fs_set_multicast_list;
1103#ifdef CONFIG_NET_POLL_CONTROLLER
1104 ndev->poll_controller = fs_enet_netpoll;
1105#endif
1106 if (fpi->use_napi) 1114 if (fpi->use_napi)
1107 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, 1115 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
1108 fpi->napi_weight); 1116 fpi->napi_weight);
1109 1117
1110 ndev->ethtool_ops = &fs_ethtool_ops; 1118 ndev->ethtool_ops = &fs_ethtool_ops;
1111 ndev->do_ioctl = fs_ioctl;
1112 1119
1113 init_timer(&fep->phy_timer_list); 1120 init_timer(&fep->phy_timer_list);
1114 1121
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 65f55877be95..b2c49679bba7 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1583,8 +1583,10 @@ static void gfar_reset_task(struct work_struct *work)
1583 struct net_device *dev = priv->ndev; 1583 struct net_device *dev = priv->ndev;
1584 1584
1585 if (dev->flags & IFF_UP) { 1585 if (dev->flags & IFF_UP) {
1586 netif_stop_queue(dev);
1586 stop_gfar(dev); 1587 stop_gfar(dev);
1587 startup_gfar(dev); 1588 startup_gfar(dev);
1589 netif_start_queue(dev);
1588 } 1590 }
1589 1591
1590 netif_tx_schedule_all(dev); 1592 netif_tx_schedule_all(dev);
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 77e4b5b52fc8..806533c831c7 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2686,6 +2686,32 @@ static int __devinit emac_init_config(struct emac_instance *dev)
2686 return 0; 2686 return 0;
2687} 2687}
2688 2688
2689static const struct net_device_ops emac_netdev_ops = {
2690 .ndo_open = emac_open,
2691 .ndo_stop = emac_close,
2692 .ndo_get_stats = emac_stats,
2693 .ndo_set_multicast_list = emac_set_multicast_list,
2694 .ndo_do_ioctl = emac_ioctl,
2695 .ndo_tx_timeout = emac_tx_timeout,
2696 .ndo_validate_addr = eth_validate_addr,
2697 .ndo_set_mac_address = eth_mac_addr,
2698 .ndo_start_xmit = emac_start_xmit,
2699 .ndo_change_mtu = eth_change_mtu,
2700};
2701
2702static const struct net_device_ops emac_gige_netdev_ops = {
2703 .ndo_open = emac_open,
2704 .ndo_stop = emac_close,
2705 .ndo_get_stats = emac_stats,
2706 .ndo_set_multicast_list = emac_set_multicast_list,
2707 .ndo_do_ioctl = emac_ioctl,
2708 .ndo_tx_timeout = emac_tx_timeout,
2709 .ndo_validate_addr = eth_validate_addr,
2710 .ndo_set_mac_address = eth_mac_addr,
2711 .ndo_start_xmit = emac_start_xmit_sg,
2712 .ndo_change_mtu = emac_change_mtu,
2713};
2714
2689static int __devinit emac_probe(struct of_device *ofdev, 2715static int __devinit emac_probe(struct of_device *ofdev,
2690 const struct of_device_id *match) 2716 const struct of_device_id *match)
2691{ 2717{
@@ -2827,23 +2853,14 @@ static int __devinit emac_probe(struct of_device *ofdev,
2827 if (err != 0) 2853 if (err != 0)
2828 goto err_detach_tah; 2854 goto err_detach_tah;
2829 2855
2830 /* Fill in the driver function table */
2831 ndev->open = &emac_open;
2832 if (dev->tah_dev) 2856 if (dev->tah_dev)
2833 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 2857 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2834 ndev->tx_timeout = &emac_tx_timeout;
2835 ndev->watchdog_timeo = 5 * HZ; 2858 ndev->watchdog_timeo = 5 * HZ;
2836 ndev->stop = &emac_close;
2837 ndev->get_stats = &emac_stats;
2838 ndev->set_multicast_list = &emac_set_multicast_list;
2839 ndev->do_ioctl = &emac_ioctl;
2840 if (emac_phy_supports_gige(dev->phy_mode)) { 2859 if (emac_phy_supports_gige(dev->phy_mode)) {
2841 ndev->hard_start_xmit = &emac_start_xmit_sg; 2860 ndev->netdev_ops = &emac_gige_netdev_ops;
2842 ndev->change_mtu = &emac_change_mtu;
2843 dev->commac.ops = &emac_commac_sg_ops; 2861 dev->commac.ops = &emac_commac_sg_ops;
2844 } else { 2862 } else
2845 ndev->hard_start_xmit = &emac_start_xmit; 2863 ndev->netdev_ops = &emac_netdev_ops;
2846 }
2847 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); 2864 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2848 2865
2849 netif_carrier_off(ndev); 2866 netif_carrier_off(ndev);
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index f4c315b5a900..472f3f124840 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -111,7 +111,7 @@ void igb_clear_vfta(struct e1000_hw *hw)
111 * Writes value at the given offset in the register array which stores 111 * Writes value at the given offset in the register array which stores
112 * the VLAN filter table. 112 * the VLAN filter table.
113 **/ 113 **/
114void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) 114static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
115{ 115{
116 array_wr32(E1000_VFTA, offset, value); 116 array_wr32(E1000_VFTA, offset, value);
117 wrfl(); 117 wrfl();
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index a34de5269637..1d690b4c9ae4 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -66,7 +66,6 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
66s32 igb_check_alt_mac_addr(struct e1000_hw *hw); 66s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
67void igb_reset_adaptive(struct e1000_hw *hw); 67void igb_reset_adaptive(struct e1000_hw *hw);
68void igb_update_adaptive(struct e1000_hw *hw); 68void igb_update_adaptive(struct e1000_hw *hw);
69void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
70 69
71bool igb_enable_mng_pass_thru(struct e1000_hw *hw); 70bool igb_enable_mng_pass_thru(struct e1000_hw *hw);
72 71
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index fe71c7ddaa05..840782fb5736 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -188,7 +188,7 @@ out:
188 * returns SUCCESS if it successfully received a message notification and 188 * returns SUCCESS if it successfully received a message notification and
189 * copied it into the receive buffer. 189 * copied it into the receive buffer.
190 **/ 190 **/
191s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) 191static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
192{ 192{
193 struct e1000_mbx_info *mbx = &hw->mbx; 193 struct e1000_mbx_info *mbx = &hw->mbx;
194 s32 ret_val = -E1000_ERR_MBX; 194 s32 ret_val = -E1000_ERR_MBX;
@@ -214,7 +214,7 @@ out:
214 * returns SUCCESS if it successfully copied message into the buffer and 214 * returns SUCCESS if it successfully copied message into the buffer and
215 * received an ack to that message within delay * timeout period 215 * received an ack to that message within delay * timeout period
216 **/ 216 **/
217s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) 217static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
218{ 218{
219 struct e1000_mbx_info *mbx = &hw->mbx; 219 struct e1000_mbx_info *mbx = &hw->mbx;
220 s32 ret_val = 0; 220 s32 ret_val = 0;
@@ -232,19 +232,6 @@ out:
232 return ret_val; 232 return ret_val;
233} 233}
234 234
235/**
236 * e1000_init_mbx_ops_generic - Initialize NVM function pointers
237 * @hw: pointer to the HW structure
238 *
239 * Setups up the function pointers to no-op functions
240 **/
241void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
242{
243 struct e1000_mbx_info *mbx = &hw->mbx;
244 mbx->ops.read_posted = igb_read_posted_mbx;
245 mbx->ops.write_posted = igb_write_posted_mbx;
246}
247
248static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) 235static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
249{ 236{
250 u32 mbvficr = rd32(E1000_MBVFICR); 237 u32 mbvficr = rd32(E1000_MBVFICR);
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h
index 6ec9890a8f7a..ebc02ea3f198 100644
--- a/drivers/net/igb/e1000_mbx.h
+++ b/drivers/net/igb/e1000_mbx.h
@@ -67,8 +67,6 @@
67 67
68s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); 68s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16);
69s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); 69s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16);
70s32 igb_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
71s32 igb_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
72s32 igb_check_for_msg(struct e1000_hw *, u16); 70s32 igb_check_for_msg(struct e1000_hw *, u16);
73s32 igb_check_for_ack(struct e1000_hw *, u16); 71s32 igb_check_for_ack(struct e1000_hw *, u16);
74s32 igb_check_for_rst(struct e1000_hw *, u16); 72s32 igb_check_for_rst(struct e1000_hw *, u16);
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index db7274e62228..08c801490c72 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1261,25 +1261,32 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1261 int i; 1261 int i;
1262 unsigned char mac_addr[ETH_ALEN]; 1262 unsigned char mac_addr[ETH_ALEN];
1263 1263
1264 if (num_vfs) 1264 if (num_vfs) {
1265 adapter->vf_data = kcalloc(num_vfs, 1265 adapter->vf_data = kcalloc(num_vfs,
1266 sizeof(struct vf_data_storage), 1266 sizeof(struct vf_data_storage),
1267 GFP_KERNEL); 1267 GFP_KERNEL);
1268 if (!adapter->vf_data) { 1268 if (!adapter->vf_data) {
1269 dev_err(&pdev->dev, "Could not allocate VF private " 1269 dev_err(&pdev->dev,
1270 "data - IOV enable failed\n"); 1270 "Could not allocate VF private data - "
1271 } else { 1271 "IOV enable failed\n");
1272 err = pci_enable_sriov(pdev, num_vfs);
1273 if (!err) {
1274 adapter->vfs_allocated_count = num_vfs;
1275 dev_info(&pdev->dev, "%d vfs allocated\n", num_vfs);
1276 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1277 random_ether_addr(mac_addr);
1278 igb_set_vf_mac(adapter, i, mac_addr);
1279 }
1280 } else { 1272 } else {
1281 kfree(adapter->vf_data); 1273 err = pci_enable_sriov(pdev, num_vfs);
1282 adapter->vf_data = NULL; 1274 if (!err) {
1275 adapter->vfs_allocated_count = num_vfs;
1276 dev_info(&pdev->dev,
1277 "%d vfs allocated\n",
1278 num_vfs);
1279 for (i = 0;
1280 i < adapter->vfs_allocated_count;
1281 i++) {
1282 random_ether_addr(mac_addr);
1283 igb_set_vf_mac(adapter, i,
1284 mac_addr);
1285 }
1286 } else {
1287 kfree(adapter->vf_data);
1288 adapter->vf_data = NULL;
1289 }
1283 } 1290 }
1284 } 1291 }
1285 } 1292 }
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index 936ed2a9435f..4bff35e46871 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -321,14 +321,11 @@ extern void igbvf_set_ethtool_ops(struct net_device *);
321extern int igbvf_up(struct igbvf_adapter *); 321extern int igbvf_up(struct igbvf_adapter *);
322extern void igbvf_down(struct igbvf_adapter *); 322extern void igbvf_down(struct igbvf_adapter *);
323extern void igbvf_reinit_locked(struct igbvf_adapter *); 323extern void igbvf_reinit_locked(struct igbvf_adapter *);
324extern void igbvf_reset(struct igbvf_adapter *);
325extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *); 324extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
326extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *); 325extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
327extern void igbvf_free_rx_resources(struct igbvf_ring *); 326extern void igbvf_free_rx_resources(struct igbvf_ring *);
328extern void igbvf_free_tx_resources(struct igbvf_ring *); 327extern void igbvf_free_tx_resources(struct igbvf_ring *);
329extern void igbvf_update_stats(struct igbvf_adapter *); 328extern void igbvf_update_stats(struct igbvf_adapter *);
330extern void igbvf_set_interrupt_capability(struct igbvf_adapter *);
331extern void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
332 329
333extern unsigned int copybreak; 330extern unsigned int copybreak;
334 331
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index c5648420dedf..b774666ad3cf 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -52,6 +52,9 @@ static const char igbvf_driver_string[] =
52static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; 52static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
53 53
54static int igbvf_poll(struct napi_struct *napi, int budget); 54static int igbvf_poll(struct napi_struct *napi, int budget);
55static void igbvf_reset(struct igbvf_adapter *);
56static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
57static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
55 58
56static struct igbvf_info igbvf_vf_info = { 59static struct igbvf_info igbvf_vf_info = {
57 .mac = e1000_vfadapt, 60 .mac = e1000_vfadapt,
@@ -990,7 +993,7 @@ static void igbvf_configure_msix(struct igbvf_adapter *adapter)
990 e1e_flush(); 993 e1e_flush();
991} 994}
992 995
993void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) 996static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
994{ 997{
995 if (adapter->msix_entries) { 998 if (adapter->msix_entries) {
996 pci_disable_msix(adapter->pdev); 999 pci_disable_msix(adapter->pdev);
@@ -1005,7 +1008,7 @@ void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
1005 * Attempt to configure interrupts using the best available 1008 * Attempt to configure interrupts using the best available
1006 * capabilities of the hardware and kernel. 1009 * capabilities of the hardware and kernel.
1007 **/ 1010 **/
1008void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) 1011static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
1009{ 1012{
1010 int err = -ENOMEM; 1013 int err = -ENOMEM;
1011 int i; 1014 int i;
@@ -1447,7 +1450,7 @@ static void igbvf_configure(struct igbvf_adapter *adapter)
1447 * set/changed during runtime. After reset the device needs to be 1450 * set/changed during runtime. After reset the device needs to be
1448 * properly configured for Rx, Tx etc. 1451 * properly configured for Rx, Tx etc.
1449 */ 1452 */
1450void igbvf_reset(struct igbvf_adapter *adapter) 1453static void igbvf_reset(struct igbvf_adapter *adapter)
1451{ 1454{
1452 struct e1000_mac_info *mac = &adapter->hw.mac; 1455 struct e1000_mac_info *mac = &adapter->hw.mac;
1453 struct net_device *netdev = adapter->netdev; 1456 struct net_device *netdev = adapter->netdev;
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index aa246c93279d..2a4faf9ade69 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -44,7 +44,7 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool);
44 * e1000_init_mac_params_vf - Inits MAC params 44 * e1000_init_mac_params_vf - Inits MAC params
45 * @hw: pointer to the HW structure 45 * @hw: pointer to the HW structure
46 **/ 46 **/
47s32 e1000_init_mac_params_vf(struct e1000_hw *hw) 47static s32 e1000_init_mac_params_vf(struct e1000_hw *hw)
48{ 48{
49 struct e1000_mac_info *mac = &hw->mac; 49 struct e1000_mac_info *mac = &hw->mac;
50 50
diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
index ec07228f9478..1e8ce3741a67 100644
--- a/drivers/net/igbvf/vf.h
+++ b/drivers/net/igbvf/vf.h
@@ -259,7 +259,6 @@ struct e1000_hw {
259/* These functions must be implemented by drivers */ 259/* These functions must be implemented by drivers */
260void e1000_rlpml_set_vf(struct e1000_hw *, u16); 260void e1000_rlpml_set_vf(struct e1000_hw *, u16);
261void e1000_init_function_pointers_vf(struct e1000_hw *hw); 261void e1000_init_function_pointers_vf(struct e1000_hw *hw);
262s32 e1000_init_mac_params_vf(struct e1000_hw *hw);
263 262
264 263
265#endif /* _E1000_VF_H_ */ 264#endif /* _E1000_VF_H_ */
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index cbc63ff13add..c5593f4665a4 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1214,6 +1214,19 @@ static void __devinit ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
1214} 1214}
1215#endif 1215#endif
1216 1216
1217static const struct net_device_ops ioc3_netdev_ops = {
1218 .ndo_open = ioc3_open,
1219 .ndo_stop = ioc3_close,
1220 .ndo_start_xmit = ioc3_start_xmit,
1221 .ndo_tx_timeout = ioc3_timeout,
1222 .ndo_get_stats = ioc3_get_stats,
1223 .ndo_set_multicast_list = ioc3_set_multicast_list,
1224 .ndo_do_ioctl = ioc3_ioctl,
1225 .ndo_validate_addr = eth_validate_addr,
1226 .ndo_set_mac_address = ioc3_set_mac_address,
1227 .ndo_change_mtu = eth_change_mtu,
1228};
1229
1217static int __devinit ioc3_probe(struct pci_dev *pdev, 1230static int __devinit ioc3_probe(struct pci_dev *pdev,
1218 const struct pci_device_id *ent) 1231 const struct pci_device_id *ent)
1219{ 1232{
@@ -1310,15 +1323,8 @@ static int __devinit ioc3_probe(struct pci_dev *pdev,
1310 ioc3_get_eaddr(ip); 1323 ioc3_get_eaddr(ip);
1311 1324
1312 /* The IOC3-specific entries in the device structure. */ 1325 /* The IOC3-specific entries in the device structure. */
1313 dev->open = ioc3_open;
1314 dev->hard_start_xmit = ioc3_start_xmit;
1315 dev->tx_timeout = ioc3_timeout;
1316 dev->watchdog_timeo = 5 * HZ; 1326 dev->watchdog_timeo = 5 * HZ;
1317 dev->stop = ioc3_close; 1327 dev->netdev_ops = &ioc3_netdev_ops;
1318 dev->get_stats = ioc3_get_stats;
1319 dev->do_ioctl = ioc3_ioctl;
1320 dev->set_multicast_list = ioc3_set_multicast_list;
1321 dev->set_mac_address = ioc3_set_mac_address;
1322 dev->ethtool_ops = &ioc3_ethtool_ops; 1328 dev->ethtool_ops = &ioc3_ethtool_ops;
1323 dev->features = NETIF_F_IP_CSUM; 1329 dev->features = NETIF_F_IP_CSUM;
1324 1330
diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c
index 3126678bdd3c..73585fd8f29f 100644
--- a/drivers/net/isa-skeleton.c
+++ b/drivers/net/isa-skeleton.c
@@ -181,6 +181,18 @@ out:
181} 181}
182#endif 182#endif
183 183
184static const struct net_device_ops netcard_netdev_ops = {
185 .ndo_open = net_open,
186 .ndo_stop = net_close,
187 .ndo_start_xmit = net_send_packet,
188 .ndo_get_stats = net_get_stats,
189 .ndo_set_multicast_list = set_multicast_list,
190 .ndo_tx_timeout = net_tx_timeout,
191 .ndo_validate_addr = eth_validate_addr,
192 .ndo_set_mac_address = eth_mac_addr,
193 .ndo_change_mtu = eth_change_mtu,
194};
195
184/* 196/*
185 * This is the real probe routine. Linux has a history of friendly device 197 * This is the real probe routine. Linux has a history of friendly device
186 * probes on the ISA bus. A good device probes avoids doing writes, and 198 * probes on the ISA bus. A good device probes avoids doing writes, and
@@ -303,13 +315,7 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr)
303 np = netdev_priv(dev); 315 np = netdev_priv(dev);
304 spin_lock_init(&np->lock); 316 spin_lock_init(&np->lock);
305 317
306 dev->open = net_open; 318 dev->netdev_ops = &netcard_netdev_ops;
307 dev->stop = net_close;
308 dev->hard_start_xmit = net_send_packet;
309 dev->get_stats = net_get_stats;
310 dev->set_multicast_list = &set_multicast_list;
311
312 dev->tx_timeout = &net_tx_timeout;
313 dev->watchdog_timeo = MY_TX_TIMEOUT; 319 dev->watchdog_timeo = MY_TX_TIMEOUT;
314 320
315 err = register_netdev(dev); 321 err = register_netdev(dev);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index de4db0dc7879..4791238c3f6e 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -885,61 +885,6 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
885} 885}
886 886
887/** 887/**
888 * ixgbe_blink_led_start_82598 - Blink LED based on index.
889 * @hw: pointer to hardware structure
890 * @index: led number to blink
891 **/
892static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index)
893{
894 ixgbe_link_speed speed = 0;
895 bool link_up = 0;
896 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
897 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
898
899 /*
900 * Link must be up to auto-blink the LEDs on the 82598EB MAC;
901 * force it if link is down.
902 */
903 hw->mac.ops.check_link(hw, &speed, &link_up, false);
904
905 if (!link_up) {
906 autoc_reg |= IXGBE_AUTOC_FLU;
907 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
908 msleep(10);
909 }
910
911 led_reg &= ~IXGBE_LED_MODE_MASK(index);
912 led_reg |= IXGBE_LED_BLINK(index);
913 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
914 IXGBE_WRITE_FLUSH(hw);
915
916 return 0;
917}
918
919/**
920 * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index.
921 * @hw: pointer to hardware structure
922 * @index: led number to stop blinking
923 **/
924static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index)
925{
926 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
927 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
928
929 autoc_reg &= ~IXGBE_AUTOC_FLU;
930 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
931 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
932
933 led_reg &= ~IXGBE_LED_MODE_MASK(index);
934 led_reg &= ~IXGBE_LED_BLINK(index);
935 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
936 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
937 IXGBE_WRITE_FLUSH(hw);
938
939 return 0;
940}
941
942/**
943 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register 888 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
944 * @hw: pointer to hardware structure 889 * @hw: pointer to hardware structure
945 * @reg: analog register to read 890 * @reg: analog register to read
@@ -1128,8 +1073,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1128 .get_link_capabilities = &ixgbe_get_link_capabilities_82598, 1073 .get_link_capabilities = &ixgbe_get_link_capabilities_82598,
1129 .led_on = &ixgbe_led_on_generic, 1074 .led_on = &ixgbe_led_on_generic,
1130 .led_off = &ixgbe_led_off_generic, 1075 .led_off = &ixgbe_led_off_generic,
1131 .blink_led_start = &ixgbe_blink_led_start_82598, 1076 .blink_led_start = &ixgbe_blink_led_start_generic,
1132 .blink_led_stop = &ixgbe_blink_led_stop_82598, 1077 .blink_led_stop = &ixgbe_blink_led_stop_generic,
1133 .set_rar = &ixgbe_set_rar_generic, 1078 .set_rar = &ixgbe_set_rar_generic,
1134 .clear_rar = &ixgbe_clear_rar_generic, 1079 .clear_rar = &ixgbe_clear_rar_generic,
1135 .set_vmdq = &ixgbe_set_vmdq_82598, 1080 .set_vmdq = &ixgbe_set_vmdq_82598,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index beae7e012609..29771fbaa42d 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -68,8 +68,6 @@ s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
68s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, 68s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan,
69 u32 vind, bool vlan_on); 69 u32 vind, bool vlan_on);
70s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw); 70s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw);
71s32 ixgbe_blink_led_stop_82599(struct ixgbe_hw *hw, u32 index);
72s32 ixgbe_blink_led_start_82599(struct ixgbe_hw *hw, u32 index);
73s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw); 71s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw);
74s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); 72s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
75s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); 73s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
@@ -991,40 +989,6 @@ s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw)
991} 989}
992 990
993/** 991/**
994 * ixgbe_blink_led_start_82599 - Blink LED based on index.
995 * @hw: pointer to hardware structure
996 * @index: led number to blink
997 **/
998s32 ixgbe_blink_led_start_82599(struct ixgbe_hw *hw, u32 index)
999{
1000 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1001
1002 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1003 led_reg |= IXGBE_LED_BLINK(index);
1004 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1005 IXGBE_WRITE_FLUSH(hw);
1006
1007 return 0;
1008}
1009
1010/**
1011 * ixgbe_blink_led_stop_82599 - Stop blinking LED based on index.
1012 * @hw: pointer to hardware structure
1013 * @index: led number to stop blinking
1014 **/
1015s32 ixgbe_blink_led_stop_82599(struct ixgbe_hw *hw, u32 index)
1016{
1017 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1018
1019 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1020 led_reg &= ~IXGBE_LED_BLINK(index);
1021 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1022 IXGBE_WRITE_FLUSH(hw);
1023
1024 return 0;
1025}
1026
1027/**
1028 * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array 992 * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array
1029 * @hw: pointer to hardware structure 993 * @hw: pointer to hardware structure
1030 **/ 994 **/
@@ -1243,8 +1207,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
1243 .get_link_capabilities = &ixgbe_get_link_capabilities_82599, 1207 .get_link_capabilities = &ixgbe_get_link_capabilities_82599,
1244 .led_on = &ixgbe_led_on_generic, 1208 .led_on = &ixgbe_led_on_generic,
1245 .led_off = &ixgbe_led_off_generic, 1209 .led_off = &ixgbe_led_off_generic,
1246 .blink_led_start = &ixgbe_blink_led_start_82599, 1210 .blink_led_start = &ixgbe_blink_led_start_generic,
1247 .blink_led_stop = &ixgbe_blink_led_stop_82599, 1211 .blink_led_stop = &ixgbe_blink_led_stop_generic,
1248 .set_rar = &ixgbe_set_rar_generic, 1212 .set_rar = &ixgbe_set_rar_generic,
1249 .clear_rar = &ixgbe_clear_rar_generic, 1213 .clear_rar = &ixgbe_clear_rar_generic,
1250 .set_vmdq = &ixgbe_set_vmdq_82599, 1214 .set_vmdq = &ixgbe_set_vmdq_82599,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 63ab6671d08e..5567519676d5 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -2071,3 +2071,58 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2071 2071
2072 return 0; 2072 return 0;
2073} 2073}
2074
2075/**
2076 * ixgbe_blink_led_start_generic - Blink LED based on index.
2077 * @hw: pointer to hardware structure
2078 * @index: led number to blink
2079 **/
2080s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2081{
2082 ixgbe_link_speed speed = 0;
2083 bool link_up = 0;
2084 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2085 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2086
2087 /*
2088 * Link must be up to auto-blink the LEDs;
2089 * Force it if link is down.
2090 */
2091 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2092
2093 if (!link_up) {
2094 autoc_reg |= IXGBE_AUTOC_FLU;
2095 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2096 msleep(10);
2097 }
2098
2099 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2100 led_reg |= IXGBE_LED_BLINK(index);
2101 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2102 IXGBE_WRITE_FLUSH(hw);
2103
2104 return 0;
2105}
2106
2107/**
2108 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2109 * @hw: pointer to hardware structure
2110 * @index: led number to stop blinking
2111 **/
2112s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2113{
2114 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2115 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2116
2117 autoc_reg &= ~IXGBE_AUTOC_FLU;
2118 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2119 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2120
2121 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2122 led_reg &= ~IXGBE_LED_BLINK(index);
2123 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2124 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2125 IXGBE_WRITE_FLUSH(hw);
2126
2127 return 0;
2128}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 24f73e719c3f..dd260890ad0a 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -76,6 +76,9 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
76s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val); 76s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
77s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val); 77s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
78 78
79s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
80s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
81
79#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) 82#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
80 83
81#ifndef writeq 84#ifndef writeq
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index aafc120f164e..f0a20facc650 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -943,6 +943,24 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
943} 943}
944 944
945 945
946static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
947 struct ethtool_wolinfo *wol)
948{
949 struct ixgbe_hw *hw = &adapter->hw;
950 int retval = 1;
951
952 switch(hw->device_id) {
953 case IXGBE_DEV_ID_82599_KX4:
954 retval = 0;
955 break;
956 default:
957 wol->supported = 0;
958 retval = 0;
959 }
960
961 return retval;
962}
963
946static void ixgbe_get_wol(struct net_device *netdev, 964static void ixgbe_get_wol(struct net_device *netdev,
947 struct ethtool_wolinfo *wol) 965 struct ethtool_wolinfo *wol)
948{ 966{
@@ -952,7 +970,8 @@ static void ixgbe_get_wol(struct net_device *netdev,
952 WAKE_BCAST | WAKE_MAGIC; 970 WAKE_BCAST | WAKE_MAGIC;
953 wol->wolopts = 0; 971 wol->wolopts = 0;
954 972
955 if (!device_can_wakeup(&adapter->pdev->dev)) 973 if (ixgbe_wol_exclusion(adapter, wol) ||
974 !device_can_wakeup(&adapter->pdev->dev))
956 return; 975 return;
957 976
958 if (adapter->wol & IXGBE_WUFC_EX) 977 if (adapter->wol & IXGBE_WUFC_EX)
@@ -974,6 +993,9 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
974 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 993 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
975 return -EOPNOTSUPP; 994 return -EOPNOTSUPP;
976 995
996 if (ixgbe_wol_exclusion(adapter, wol))
997 return wol->wolopts ? -EOPNOTSUPP : 0;
998
977 adapter->wol = 0; 999 adapter->wol = 0;
978 1000
979 if (wol->wolopts & WAKE_UCAST) 1001 if (wol->wolopts & WAKE_UCAST)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 9ef128ae6458..febde45cf9fa 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -2723,17 +2723,21 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
2723 **/ 2723 **/
2724static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 2724static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2725{ 2725{
2726 /* Start with base case */
2727 adapter->num_rx_queues = 1;
2728 adapter->num_tx_queues = 1;
2729
2730#ifdef CONFIG_IXGBE_DCB 2726#ifdef CONFIG_IXGBE_DCB
2731 if (ixgbe_set_dcb_queues(adapter)) 2727 if (ixgbe_set_dcb_queues(adapter))
2732 return; 2728 goto done;
2733 2729
2734#endif 2730#endif
2735 if (ixgbe_set_rss_queues(adapter)) 2731 if (ixgbe_set_rss_queues(adapter))
2736 return; 2732 goto done;
2733
2734 /* fallback to base case */
2735 adapter->num_rx_queues = 1;
2736 adapter->num_tx_queues = 1;
2737
2738done:
2739 /* Notify the stack of the (possibly) reduced Tx Queue count. */
2740 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
2737} 2741}
2738 2742
2739static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 2743static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
@@ -2992,9 +2996,6 @@ try_msi:
2992 } 2996 }
2993 2997
2994out: 2998out:
2995 /* Notify the stack of the (possibly) reduced Tx Queue count. */
2996 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
2997
2998 return err; 2999 return err;
2999} 3000}
3000 3001
@@ -3611,9 +3612,9 @@ static int ixgbe_resume(struct pci_dev *pdev)
3611 3612
3612 return 0; 3613 return 0;
3613} 3614}
3614
3615#endif /* CONFIG_PM */ 3615#endif /* CONFIG_PM */
3616static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) 3616
3617static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
3617{ 3618{
3618 struct net_device *netdev = pci_get_drvdata(pdev); 3619 struct net_device *netdev = pci_get_drvdata(pdev);
3619 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3620 struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -3672,18 +3673,46 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
3672 pci_enable_wake(pdev, PCI_D3cold, 0); 3673 pci_enable_wake(pdev, PCI_D3cold, 0);
3673 } 3674 }
3674 3675
3676 *enable_wake = !!wufc;
3677
3675 ixgbe_release_hw_control(adapter); 3678 ixgbe_release_hw_control(adapter);
3676 3679
3677 pci_disable_device(pdev); 3680 pci_disable_device(pdev);
3678 3681
3679 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3682 return 0;
3683}
3684
3685#ifdef CONFIG_PM
3686static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
3687{
3688 int retval;
3689 bool wake;
3690
3691 retval = __ixgbe_shutdown(pdev, &wake);
3692 if (retval)
3693 return retval;
3694
3695 if (wake) {
3696 pci_prepare_to_sleep(pdev);
3697 } else {
3698 pci_wake_from_d3(pdev, false);
3699 pci_set_power_state(pdev, PCI_D3hot);
3700 }
3680 3701
3681 return 0; 3702 return 0;
3682} 3703}
3704#endif /* CONFIG_PM */
3683 3705
3684static void ixgbe_shutdown(struct pci_dev *pdev) 3706static void ixgbe_shutdown(struct pci_dev *pdev)
3685{ 3707{
3686 ixgbe_suspend(pdev, PMSG_SUSPEND); 3708 bool wake;
3709
3710 __ixgbe_shutdown(pdev, &wake);
3711
3712 if (system_state == SYSTEM_POWER_OFF) {
3713 pci_wake_from_d3(pdev, wake);
3714 pci_set_power_state(pdev, PCI_D3hot);
3715 }
3687} 3716}
3688 3717
3689/** 3718/**
@@ -4342,7 +4371,7 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4342 int count = 0; 4371 int count = 0;
4343 unsigned int f; 4372 unsigned int f;
4344 4373
4345 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; 4374 r_idx = skb->queue_mapping;
4346 tx_ring = &adapter->tx_ring[r_idx]; 4375 tx_ring = &adapter->tx_ring[r_idx];
4347 4376
4348 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 4377 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index 380a1a54d530..384e072de2e7 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -168,6 +168,17 @@ writereg(struct net_device *dev, int portno, int value)
168 nubus_writew(swab16(value), dev->mem_start + portno); 168 nubus_writew(swab16(value), dev->mem_start + portno);
169} 169}
170 170
171static const struct net_device_ops mac89x0_netdev_ops = {
172 .ndo_open = net_open,
173 .ndo_stop = net_close,
174 .ndo_start_xmit = net_send_packet,
175 .ndo_get_stats = net_get_stats,
176 .ndo_set_multicast_list = set_multicast_list,
177 .ndo_set_mac_address = set_mac_address,
178 .ndo_validate_addr = eth_validate_addr,
179 .ndo_change_mtu = eth_change_mtu,
180};
181
171/* Probe for the CS8900 card in slot E. We won't bother looking 182/* Probe for the CS8900 card in slot E. We won't bother looking
172 anywhere else until we have a really good reason to do so. */ 183 anywhere else until we have a really good reason to do so. */
173struct net_device * __init mac89x0_probe(int unit) 184struct net_device * __init mac89x0_probe(int unit)
@@ -280,12 +291,7 @@ struct net_device * __init mac89x0_probe(int unit)
280 291
281 printk(" IRQ %d ADDR %pM\n", dev->irq, dev->dev_addr); 292 printk(" IRQ %d ADDR %pM\n", dev->irq, dev->dev_addr);
282 293
283 dev->open = net_open; 294 dev->netdev_ops = &mac89x0_netdev_ops;
284 dev->stop = net_close;
285 dev->hard_start_xmit = net_send_packet;
286 dev->get_stats = net_get_stats;
287 dev->set_multicast_list = &set_multicast_list;
288 dev->set_mac_address = &set_mac_address;
289 295
290 err = register_netdev(dev); 296 err = register_netdev(dev);
291 if (err) 297 if (err)
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index f50501013b1c..46073de290cf 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -1100,6 +1100,18 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1100 return phy_mii_ioctl(phydev, if_mii(rq), cmd); 1100 return phy_mii_ioctl(phydev, if_mii(rq), cmd);
1101} 1101}
1102 1102
1103static const struct net_device_ops macb_netdev_ops = {
1104 .ndo_open = macb_open,
1105 .ndo_stop = macb_close,
1106 .ndo_start_xmit = macb_start_xmit,
1107 .ndo_set_multicast_list = macb_set_rx_mode,
1108 .ndo_get_stats = macb_get_stats,
1109 .ndo_do_ioctl = macb_ioctl,
1110 .ndo_validate_addr = eth_validate_addr,
1111 .ndo_change_mtu = eth_change_mtu,
1112 .ndo_set_mac_address = eth_mac_addr,
1113};
1114
1103static int __init macb_probe(struct platform_device *pdev) 1115static int __init macb_probe(struct platform_device *pdev)
1104{ 1116{
1105 struct eth_platform_data *pdata; 1117 struct eth_platform_data *pdata;
@@ -1175,12 +1187,7 @@ static int __init macb_probe(struct platform_device *pdev)
1175 goto err_out_iounmap; 1187 goto err_out_iounmap;
1176 } 1188 }
1177 1189
1178 dev->open = macb_open; 1190 dev->netdev_ops = &macb_netdev_ops;
1179 dev->stop = macb_close;
1180 dev->hard_start_xmit = macb_start_xmit;
1181 dev->get_stats = macb_get_stats;
1182 dev->set_multicast_list = macb_set_rx_mode;
1183 dev->do_ioctl = macb_ioctl;
1184 netif_napi_add(dev, &bp->napi, macb_poll, 64); 1191 netif_napi_add(dev, &bp->napi, macb_poll, 64);
1185 dev->ethtool_ops = &macb_ethtool_ops; 1192 dev->ethtool_ops = &macb_ethtool_ops;
1186 1193
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index 527166e35d56..acd143da161d 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -167,6 +167,18 @@ static int macsonic_close(struct net_device* dev)
167 return err; 167 return err;
168} 168}
169 169
170static const struct net_device_ops macsonic_netdev_ops = {
171 .ndo_open = macsonic_open,
172 .ndo_stop = macsonic_close,
173 .ndo_start_xmit = sonic_send_packet,
174 .ndo_set_multicast_list = sonic_multicast_list,
175 .ndo_tx_timeout = sonic_tx_timeout,
176 .ndo_get_stats = sonic_get_stats,
177 .ndo_validate_addr = eth_validate_addr,
178 .ndo_change_mtu = eth_change_mtu,
179 .ndo_set_mac_address = eth_mac_addr,
180};
181
170static int __init macsonic_init(struct net_device *dev) 182static int __init macsonic_init(struct net_device *dev)
171{ 183{
172 struct sonic_local* lp = netdev_priv(dev); 184 struct sonic_local* lp = netdev_priv(dev);
@@ -198,12 +210,7 @@ static int __init macsonic_init(struct net_device *dev)
198 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS 210 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
199 * SONIC_BUS_SCALE(lp->dma_bitmode)); 211 * SONIC_BUS_SCALE(lp->dma_bitmode));
200 212
201 dev->open = macsonic_open; 213 dev->netdev_ops = &macsonic_netdev_ops;
202 dev->stop = macsonic_close;
203 dev->hard_start_xmit = sonic_send_packet;
204 dev->get_stats = sonic_get_stats;
205 dev->set_multicast_list = &sonic_multicast_list;
206 dev->tx_timeout = sonic_tx_timeout;
207 dev->watchdog_timeo = TX_TIMEOUT; 214 dev->watchdog_timeo = TX_TIMEOUT;
208 215
209 /* 216 /*
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 9eed126a82f0..f2c4a665e93f 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -2447,6 +2447,7 @@ static int myri10ge_open(struct net_device *dev)
2447 lro_mgr->lro_arr = ss->rx_done.lro_desc; 2447 lro_mgr->lro_arr = ss->rx_done.lro_desc;
2448 lro_mgr->get_frag_header = myri10ge_get_frag_header; 2448 lro_mgr->get_frag_header = myri10ge_get_frag_header;
2449 lro_mgr->max_aggr = myri10ge_lro_max_pkts; 2449 lro_mgr->max_aggr = myri10ge_lro_max_pkts;
2450 lro_mgr->frag_align_pad = 2;
2450 if (lro_mgr->max_aggr > MAX_SKB_FRAGS) 2451 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2451 lro_mgr->max_aggr = MAX_SKB_FRAGS; 2452 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2452 2453
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index cf24cc34debe..e7070515d2e3 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -19,6 +19,7 @@
19#include <linux/mii.h> 19#include <linux/mii.h>
20#include <linux/phy.h> 20#include <linux/phy.h>
21#include <linux/phy_fixed.h> 21#include <linux/phy_fixed.h>
22#include <linux/err.h>
22 23
23#define MII_REGS_NUM 29 24#define MII_REGS_NUM 29
24 25
@@ -207,8 +208,8 @@ static int __init fixed_mdio_bus_init(void)
207 int ret; 208 int ret;
208 209
209 pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); 210 pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0);
210 if (!pdev) { 211 if (IS_ERR(pdev)) {
211 ret = -ENOMEM; 212 ret = PTR_ERR(pdev);
212 goto err_pdev; 213 goto err_pdev;
213 } 214 }
214 215
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index eb6411c4694f..7a3ec9d39a9a 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -69,6 +69,11 @@
69#define MII_M1111_COPPER 0 69#define MII_M1111_COPPER 0
70#define MII_M1111_FIBER 1 70#define MII_M1111_FIBER 1
71 71
72#define MII_88E1121_PHY_LED_CTRL 16
73#define MII_88E1121_PHY_LED_PAGE 3
74#define MII_88E1121_PHY_LED_DEF 0x0030
75#define MII_88E1121_PHY_PAGE 22
76
72#define MII_M1011_PHY_STATUS 0x11 77#define MII_M1011_PHY_STATUS 0x11
73#define MII_M1011_PHY_STATUS_1000 0x8000 78#define MII_M1011_PHY_STATUS_1000 0x8000
74#define MII_M1011_PHY_STATUS_100 0x4000 79#define MII_M1011_PHY_STATUS_100 0x4000
@@ -154,6 +159,30 @@ static int marvell_config_aneg(struct phy_device *phydev)
154 return err; 159 return err;
155} 160}
156 161
162static int m88e1121_config_aneg(struct phy_device *phydev)
163{
164 int err, temp;
165
166 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
167 if (err < 0)
168 return err;
169
170 err = phy_write(phydev, MII_M1011_PHY_SCR,
171 MII_M1011_PHY_SCR_AUTO_CROSS);
172 if (err < 0)
173 return err;
174
175 temp = phy_read(phydev, MII_88E1121_PHY_PAGE);
176
177 phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
178 phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
179 phy_write(phydev, MII_88E1121_PHY_PAGE, temp);
180
181 err = genphy_config_aneg(phydev);
182
183 return err;
184}
185
157static int m88e1111_config_init(struct phy_device *phydev) 186static int m88e1111_config_init(struct phy_device *phydev)
158{ 187{
159 int err; 188 int err;
@@ -429,6 +458,18 @@ static int marvell_read_status(struct phy_device *phydev)
429 return 0; 458 return 0;
430} 459}
431 460
461static int m88e1121_did_interrupt(struct phy_device *phydev)
462{
463 int imask;
464
465 imask = phy_read(phydev, MII_M1011_IEVENT);
466
467 if (imask & MII_M1011_IMASK_INIT)
468 return 1;
469
470 return 0;
471}
472
432static struct phy_driver marvell_drivers[] = { 473static struct phy_driver marvell_drivers[] = {
433 { 474 {
434 .phy_id = 0x01410c60, 475 .phy_id = 0x01410c60,
@@ -482,6 +523,19 @@ static struct phy_driver marvell_drivers[] = {
482 .driver = {.owner = THIS_MODULE,}, 523 .driver = {.owner = THIS_MODULE,},
483 }, 524 },
484 { 525 {
526 .phy_id = 0x01410cb0,
527 .phy_id_mask = 0xfffffff0,
528 .name = "Marvell 88E1121R",
529 .features = PHY_GBIT_FEATURES,
530 .flags = PHY_HAS_INTERRUPT,
531 .config_aneg = &m88e1121_config_aneg,
532 .read_status = &marvell_read_status,
533 .ack_interrupt = &marvell_ack_interrupt,
534 .config_intr = &marvell_config_intr,
535 .did_interrupt = &m88e1121_did_interrupt,
536 .driver = { .owner = THIS_MODULE },
537 },
538 {
485 .phy_id = 0x01410cd0, 539 .phy_id = 0x01410cd0,
486 .phy_id_mask = 0xfffffff0, 540 .phy_id_mask = 0xfffffff0,
487 .name = "Marvell 88E1145", 541 .name = "Marvell 88E1145",
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3ff1f425f1bb..61755cbd978e 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -434,7 +434,7 @@ void phy_start_machine(struct phy_device *phydev,
434 phydev->adjust_state = handler; 434 phydev->adjust_state = handler;
435 435
436 INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine); 436 INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine);
437 schedule_delayed_work(&phydev->state_queue, jiffies + HZ); 437 schedule_delayed_work(&phydev->state_queue, HZ);
438} 438}
439 439
440/** 440/**
@@ -655,6 +655,10 @@ static void phy_change(struct work_struct *work)
655 struct phy_device *phydev = 655 struct phy_device *phydev =
656 container_of(work, struct phy_device, phy_queue); 656 container_of(work, struct phy_device, phy_queue);
657 657
658 if (phydev->drv->did_interrupt &&
659 !phydev->drv->did_interrupt(phydev))
660 goto ignore;
661
658 err = phy_disable_interrupts(phydev); 662 err = phy_disable_interrupts(phydev);
659 663
660 if (err) 664 if (err)
@@ -681,6 +685,11 @@ static void phy_change(struct work_struct *work)
681 685
682 return; 686 return;
683 687
688ignore:
689 atomic_dec(&phydev->irq_disable);
690 enable_irq(phydev->irq);
691 return;
692
684irq_enable_err: 693irq_enable_err:
685 disable_irq(phydev->irq); 694 disable_irq(phydev->irq);
686 atomic_inc(&phydev->irq_disable); 695 atomic_inc(&phydev->irq_disable);
@@ -937,6 +946,5 @@ static void phy_state_machine(struct work_struct *work)
937 if (err < 0) 946 if (err < 0)
938 phy_error(phydev); 947 phy_error(phydev);
939 948
940 schedule_delayed_work(&phydev->state_queue, 949 schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ);
941 jiffies + PHY_STATE_TIME * HZ);
942} 950}
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index dee23b159df2..7269a426051c 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -448,9 +448,6 @@ static void efx_init_channels(struct efx_nic *efx)
448 448
449 WARN_ON(channel->rx_pkt != NULL); 449 WARN_ON(channel->rx_pkt != NULL);
450 efx_rx_strategy(channel); 450 efx_rx_strategy(channel);
451
452 netif_napi_add(channel->napi_dev, &channel->napi_str,
453 efx_poll, napi_weight);
454 } 451 }
455} 452}
456 453
@@ -1321,6 +1318,8 @@ static int efx_init_napi(struct efx_nic *efx)
1321 1318
1322 efx_for_each_channel(channel, efx) { 1319 efx_for_each_channel(channel, efx) {
1323 channel->napi_dev = efx->net_dev; 1320 channel->napi_dev = efx->net_dev;
1321 netif_napi_add(channel->napi_dev, &channel->napi_str,
1322 efx_poll, napi_weight);
1324 } 1323 }
1325 return 0; 1324 return 0;
1326} 1325}
@@ -1330,6 +1329,8 @@ static void efx_fini_napi(struct efx_nic *efx)
1330 struct efx_channel *channel; 1329 struct efx_channel *channel;
1331 1330
1332 efx_for_each_channel(channel, efx) { 1331 efx_for_each_channel(channel, efx) {
1332 if (channel->napi_dev)
1333 netif_napi_del(&channel->napi_str);
1333 channel->napi_dev = NULL; 1334 channel->napi_dev = NULL;
1334 } 1335 }
1335} 1336}
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index d4629ab2c614..466a8abb0053 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1176,9 +1176,9 @@ void falcon_sim_phy_event(struct efx_nic *efx)
1176 1176
1177 EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE); 1177 EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE);
1178 if (EFX_IS10G(efx)) 1178 if (EFX_IS10G(efx))
1179 EFX_SET_OWORD_FIELD(phy_event, XG_PHY_INTR, 1); 1179 EFX_SET_QWORD_FIELD(phy_event, XG_PHY_INTR, 1);
1180 else 1180 else
1181 EFX_SET_OWORD_FIELD(phy_event, G_PHY0_INTR, 1); 1181 EFX_SET_QWORD_FIELD(phy_event, G_PHY0_INTR, 1);
1182 1182
1183 falcon_generate_event(&efx->channel[0], &phy_event); 1183 falcon_generate_event(&efx->channel[0], &phy_event);
1184} 1184}
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 7b1882765a0c..3ab28bb00c12 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1188,6 +1188,19 @@ out:
1188 return ret; 1188 return ret;
1189} 1189}
1190 1190
1191static const struct net_device_ops sh_eth_netdev_ops = {
1192 .ndo_open = sh_eth_open,
1193 .ndo_stop = sh_eth_close,
1194 .ndo_start_xmit = sh_eth_start_xmit,
1195 .ndo_get_stats = sh_eth_get_stats,
1196 .ndo_set_multicast_list = sh_eth_set_multicast_list,
1197 .ndo_tx_timeout = sh_eth_tx_timeout,
1198 .ndo_do_ioctl = sh_eth_do_ioctl,
1199 .ndo_validate_addr = eth_validate_addr,
1200 .ndo_set_mac_address = eth_mac_addr,
1201 .ndo_change_mtu = eth_change_mtu,
1202};
1203
1191static int sh_eth_drv_probe(struct platform_device *pdev) 1204static int sh_eth_drv_probe(struct platform_device *pdev)
1192{ 1205{
1193 int ret, i, devno = 0; 1206 int ret, i, devno = 0;
@@ -1240,13 +1253,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1240 mdp->edmac_endian = pd->edmac_endian; 1253 mdp->edmac_endian = pd->edmac_endian;
1241 1254
1242 /* set function */ 1255 /* set function */
1243 ndev->open = sh_eth_open; 1256 ndev->netdev_ops = &sh_eth_netdev_ops;
1244 ndev->hard_start_xmit = sh_eth_start_xmit;
1245 ndev->stop = sh_eth_close;
1246 ndev->get_stats = sh_eth_get_stats;
1247 ndev->set_multicast_list = sh_eth_set_multicast_list;
1248 ndev->do_ioctl = sh_eth_do_ioctl;
1249 ndev->tx_timeout = sh_eth_tx_timeout;
1250 ndev->watchdog_timeo = TX_TIMEOUT; 1257 ndev->watchdog_timeo = TX_TIMEOUT;
1251 1258
1252 mdp->post_rx = POST_RX >> (devno << 1); 1259 mdp->post_rx = POST_RX >> (devno << 1);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index b8978d4af1b7..c11cdd08ec57 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2674,7 +2674,7 @@ static int skge_down(struct net_device *dev)
2674 if (netif_msg_ifdown(skge)) 2674 if (netif_msg_ifdown(skge))
2675 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 2675 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
2676 2676
2677 netif_stop_queue(dev); 2677 netif_tx_disable(dev);
2678 2678
2679 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) 2679 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)
2680 del_timer_sync(&skge->link_timer); 2680 del_timer_sync(&skge->link_timer);
@@ -2881,7 +2881,6 @@ static void skge_tx_clean(struct net_device *dev)
2881 } 2881 }
2882 2882
2883 skge->tx_ring.to_clean = e; 2883 skge->tx_ring.to_clean = e;
2884 netif_wake_queue(dev);
2885} 2884}
2886 2885
2887static void skge_tx_timeout(struct net_device *dev) 2886static void skge_tx_timeout(struct net_device *dev)
@@ -2893,6 +2892,7 @@ static void skge_tx_timeout(struct net_device *dev)
2893 2892
2894 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); 2893 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
2895 skge_tx_clean(dev); 2894 skge_tx_clean(dev);
2895 netif_wake_queue(dev);
2896} 2896}
2897 2897
2898static int skge_change_mtu(struct net_device *dev, int new_mtu) 2898static int skge_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index e0d84772771c..a39c0b9ba8b6 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -331,6 +331,18 @@ out:
331 return ERR_PTR(err); 331 return ERR_PTR(err);
332} 332}
333 333
334static const struct net_device_ops sun3_82586_netdev_ops = {
335 .ndo_open = sun3_82586_open,
336 .ndo_stop = sun3_82586_close,
337 .ndo_start_xmit = sun3_82586_send_packet,
338 .ndo_set_multicast_list = set_multicast_list,
339 .ndo_tx_timeout = sun3_82586_timeout,
340 .ndo_get_stats = sun3_82586_get_stats,
341 .ndo_validate_addr = eth_validate_addr,
342 .ndo_set_mac_address = eth_mac_addr,
343 .ndo_change_mtu = eth_change_mtu,
344};
345
334static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr) 346static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr)
335{ 347{
336 int i, size, retval; 348 int i, size, retval;
@@ -381,13 +393,8 @@ static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr)
381 393
382 printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq); 394 printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq);
383 395
384 dev->open = sun3_82586_open; 396 dev->netdev_ops = &sun3_82586_netdev_ops;
385 dev->stop = sun3_82586_close;
386 dev->get_stats = sun3_82586_get_stats;
387 dev->tx_timeout = sun3_82586_timeout;
388 dev->watchdog_timeo = HZ/20; 397 dev->watchdog_timeo = HZ/20;
389 dev->hard_start_xmit = sun3_82586_send_packet;
390 dev->set_multicast_list = set_multicast_list;
391 398
392 dev->if_port = 0; 399 dev->if_port = 0;
393 return 0; 400 return 0;
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index d91e95b237b7..0ce2db6ce2bf 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -862,6 +862,22 @@ static int __devinit tc35815_init_dev_addr(struct net_device *dev)
862 return 0; 862 return 0;
863} 863}
864 864
865static const struct net_device_ops tc35815_netdev_ops = {
866 .ndo_open = tc35815_open,
867 .ndo_stop = tc35815_close,
868 .ndo_start_xmit = tc35815_send_packet,
869 .ndo_get_stats = tc35815_get_stats,
870 .ndo_set_multicast_list = tc35815_set_multicast_list,
871 .ndo_tx_timeout = tc35815_tx_timeout,
872 .ndo_do_ioctl = tc35815_ioctl,
873 .ndo_validate_addr = eth_validate_addr,
874 .ndo_change_mtu = eth_change_mtu,
875 .ndo_set_mac_address = eth_mac_addr,
876#ifdef CONFIG_NET_POLL_CONTROLLER
877 .ndo_poll_controller = tc35815_poll_controller,
878#endif
879};
880
865static int __devinit tc35815_init_one(struct pci_dev *pdev, 881static int __devinit tc35815_init_one(struct pci_dev *pdev,
866 const struct pci_device_id *ent) 882 const struct pci_device_id *ent)
867{ 883{
@@ -904,21 +920,12 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev,
904 ioaddr = pcim_iomap_table(pdev)[1]; 920 ioaddr = pcim_iomap_table(pdev)[1];
905 921
906 /* Initialize the device structure. */ 922 /* Initialize the device structure. */
907 dev->open = tc35815_open; 923 dev->netdev_ops = &tc35815_netdev_ops;
908 dev->hard_start_xmit = tc35815_send_packet;
909 dev->stop = tc35815_close;
910 dev->get_stats = tc35815_get_stats;
911 dev->set_multicast_list = tc35815_set_multicast_list;
912 dev->do_ioctl = tc35815_ioctl;
913 dev->ethtool_ops = &tc35815_ethtool_ops; 924 dev->ethtool_ops = &tc35815_ethtool_ops;
914 dev->tx_timeout = tc35815_tx_timeout;
915 dev->watchdog_timeo = TC35815_TX_TIMEOUT; 925 dev->watchdog_timeo = TC35815_TX_TIMEOUT;
916#ifdef TC35815_NAPI 926#ifdef TC35815_NAPI
917 netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); 927 netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
918#endif 928#endif
919#ifdef CONFIG_NET_POLL_CONTROLLER
920 dev->poll_controller = tc35815_poll_controller;
921#endif
922 929
923 dev->irq = pdev->irq; 930 dev->irq = pdev->irq;
924 dev->base_addr = (unsigned long)ioaddr; 931 dev->base_addr = (unsigned long)ioaddr;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 6a736dda3ee2..7a837c465960 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -12443,8 +12443,13 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
12443 /* Next, try NVRAM. */ 12443 /* Next, try NVRAM. */
12444 if (!tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && 12444 if (!tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
12445 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { 12445 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
12446 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); 12446 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12447 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); 12447 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12448 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12449 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12450 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12451 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12452
12448 } 12453 }
12449 /* Finally just fetch it out of the MAC control regs. */ 12454 /* Finally just fetch it out of the MAC control regs. */
12450 else { 12455 else {
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index bb43e7fb2a50..0f78f99f9b20 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -1561,6 +1561,18 @@ static const struct ethtool_ops tsi108_ethtool_ops = {
1561 .set_settings = tsi108_set_settings, 1561 .set_settings = tsi108_set_settings,
1562}; 1562};
1563 1563
1564static const struct net_device_ops tsi108_netdev_ops = {
1565 .ndo_open = tsi108_open,
1566 .ndo_stop = tsi108_close,
1567 .ndo_start_xmit = tsi108_send_packet,
1568 .ndo_set_multicast_list = tsi108_set_rx_mode,
1569 .ndo_get_stats = tsi108_get_stats,
1570 .ndo_do_ioctl = tsi108_do_ioctl,
1571 .ndo_set_mac_address = tsi108_set_mac,
1572 .ndo_validate_addr = eth_validate_addr,
1573 .ndo_change_mtu = eth_change_mtu,
1574};
1575
1564static int 1576static int
1565tsi108_init_one(struct platform_device *pdev) 1577tsi108_init_one(struct platform_device *pdev)
1566{ 1578{
@@ -1616,14 +1628,8 @@ tsi108_init_one(struct platform_device *pdev)
1616 data->phy_type = einfo->phy_type; 1628 data->phy_type = einfo->phy_type;
1617 data->irq_num = einfo->irq_num; 1629 data->irq_num = einfo->irq_num;
1618 data->id = pdev->id; 1630 data->id = pdev->id;
1619 dev->open = tsi108_open;
1620 dev->stop = tsi108_close;
1621 dev->hard_start_xmit = tsi108_send_packet;
1622 dev->set_mac_address = tsi108_set_mac;
1623 dev->set_multicast_list = tsi108_set_rx_mode;
1624 dev->get_stats = tsi108_get_stats;
1625 netif_napi_add(dev, &data->napi, tsi108_poll, 64); 1631 netif_napi_add(dev, &data->napi, tsi108_poll, 64);
1626 dev->do_ioctl = tsi108_do_ioctl; 1632 dev->netdev_ops = &tsi108_netdev_ops;
1627 dev->ethtool_ops = &tsi108_ethtool_ops; 1633 dev->ethtool_ops = &tsi108_ethtool_ops;
1628 1634
1629 /* Apparently, the Linux networking code won't use scatter-gather 1635 /* Apparently, the Linux networking code won't use scatter-gather
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a1b0697340ba..16716aef184c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -518,7 +518,7 @@ static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
518 int err; 518 int err;
519 519
520 /* Under a page? Don't bother with paged skb. */ 520 /* Under a page? Don't bother with paged skb. */
521 if (prepad + len < PAGE_SIZE) 521 if (prepad + len < PAGE_SIZE || !linear)
522 linear = len; 522 linear = len;
523 523
524 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 524 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
@@ -565,7 +565,8 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
565 565
566 if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { 566 if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
567 align = NET_IP_ALIGN; 567 align = NET_IP_ALIGN;
568 if (unlikely(len < ETH_HLEN)) 568 if (unlikely(len < ETH_HLEN ||
569 (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
569 return -EINVAL; 570 return -EINVAL;
570 } 571 }
571 572
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index fb53ef872df3..754a4b182c1d 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -377,7 +377,7 @@ static void velocity_print_info(struct velocity_info *vptr);
377static int velocity_open(struct net_device *dev); 377static int velocity_open(struct net_device *dev);
378static int velocity_change_mtu(struct net_device *dev, int mtu); 378static int velocity_change_mtu(struct net_device *dev, int mtu);
379static int velocity_xmit(struct sk_buff *skb, struct net_device *dev); 379static int velocity_xmit(struct sk_buff *skb, struct net_device *dev);
380static int velocity_intr(int irq, void *dev_instance); 380static irqreturn_t velocity_intr(int irq, void *dev_instance);
381static void velocity_set_multi(struct net_device *dev); 381static void velocity_set_multi(struct net_device *dev);
382static struct net_device_stats *velocity_get_stats(struct net_device *dev); 382static struct net_device_stats *velocity_get_stats(struct net_device *dev);
383static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 383static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -2215,7 +2215,7 @@ out:
2215 * efficiently as possible. 2215 * efficiently as possible.
2216 */ 2216 */
2217 2217
2218static int velocity_intr(int irq, void *dev_instance) 2218static irqreturn_t velocity_intr(int irq, void *dev_instance)
2219{ 2219{
2220 struct net_device *dev = dev_instance; 2220 struct net_device *dev = dev_instance;
2221 struct velocity_info *vptr = netdev_priv(dev); 2221 struct velocity_info *vptr = netdev_priv(dev);
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c
index a12a7211c982..5a4ad156f63e 100644
--- a/drivers/net/xtsonic.c
+++ b/drivers/net/xtsonic.c
@@ -108,6 +108,18 @@ static int xtsonic_close(struct net_device *dev)
108 return err; 108 return err;
109} 109}
110 110
111static const struct net_device_ops xtsonic_netdev_ops = {
112 .ndo_open = xtsonic_open,
113 .ndo_stop = xtsonic_close,
114 .ndo_start_xmit = sonic_send_packet,
115 .ndo_get_stats = sonic_get_stats,
116 .ndo_set_multicast_list = sonic_multicast_list,
117 .ndo_tx_timeout = sonic_tx_timeout,
118 .ndo_validate_addr = eth_validate_addr,
119 .ndo_change_mtu = eth_change_mtu,
120 .ndo_set_mac_address = eth_mac_addr,
121};
122
111static int __init sonic_probe1(struct net_device *dev) 123static int __init sonic_probe1(struct net_device *dev)
112{ 124{
113 static unsigned version_printed = 0; 125 static unsigned version_printed = 0;
@@ -205,12 +217,7 @@ static int __init sonic_probe1(struct net_device *dev)
205 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS 217 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
206 * SONIC_BUS_SCALE(lp->dma_bitmode)); 218 * SONIC_BUS_SCALE(lp->dma_bitmode));
207 219
208 dev->open = xtsonic_open; 220 dev->netdev_ops = &xtsonic_netdev_ops;
209 dev->stop = xtsonic_close;
210 dev->hard_start_xmit = sonic_send_packet;
211 dev->get_stats = sonic_get_stats;
212 dev->set_multicast_list = &sonic_multicast_list;
213 dev->tx_timeout = sonic_tx_timeout;
214 dev->watchdog_timeo = TX_TIMEOUT; 221 dev->watchdog_timeo = TX_TIMEOUT;
215 222
216 /* 223 /*
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 4fa3bb2ddfe4..33e5ade774ca 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -434,7 +434,8 @@ static void __init superio_parport_init(void)
434 0 /*base_hi*/, 434 0 /*base_hi*/,
435 PAR_IRQ, 435 PAR_IRQ,
436 PARPORT_DMA_NONE /* dma */, 436 PARPORT_DMA_NONE /* dma */,
437 NULL /*struct pci_dev* */) ) 437 NULL /*struct pci_dev* */),
438 0 /* shared irq flags */ )
438 439
439 printk(KERN_WARNING PFX "Probing parallel port failed.\n"); 440 printk(KERN_WARNING PFX "Probing parallel port failed.\n");
440#endif /* CONFIG_PARPORT_PC */ 441#endif /* CONFIG_PARPORT_PC */
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 0570794ccf1c..d1815272c435 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -20,6 +20,7 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/buffer_head.h> 21#include <linux/buffer_head.h>
22#include <linux/hdreg.h> 22#include <linux/hdreg.h>
23#include <linux/async.h>
23 24
24#include <asm/ccwdev.h> 25#include <asm/ccwdev.h>
25#include <asm/ebcdic.h> 26#include <asm/ebcdic.h>
@@ -480,8 +481,10 @@ static void dasd_change_state(struct dasd_device *device)
480 if (rc && rc != -EAGAIN) 481 if (rc && rc != -EAGAIN)
481 device->target = device->state; 482 device->target = device->state;
482 483
483 if (device->state == device->target) 484 if (device->state == device->target) {
484 wake_up(&dasd_init_waitq); 485 wake_up(&dasd_init_waitq);
486 dasd_put_device(device);
487 }
485 488
486 /* let user-space know that the device status changed */ 489 /* let user-space know that the device status changed */
487 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 490 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
@@ -513,12 +516,15 @@ void dasd_kick_device(struct dasd_device *device)
513 */ 516 */
514void dasd_set_target_state(struct dasd_device *device, int target) 517void dasd_set_target_state(struct dasd_device *device, int target)
515{ 518{
519 dasd_get_device(device);
516 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 520 /* If we are in probeonly mode stop at DASD_STATE_READY. */
517 if (dasd_probeonly && target > DASD_STATE_READY) 521 if (dasd_probeonly && target > DASD_STATE_READY)
518 target = DASD_STATE_READY; 522 target = DASD_STATE_READY;
519 if (device->target != target) { 523 if (device->target != target) {
520 if (device->state == target) 524 if (device->state == target) {
521 wake_up(&dasd_init_waitq); 525 wake_up(&dasd_init_waitq);
526 dasd_put_device(device);
527 }
522 device->target = target; 528 device->target = target;
523 } 529 }
524 if (device->state != device->target) 530 if (device->state != device->target)
@@ -2148,6 +2154,22 @@ dasd_exit(void)
2148 * SECTION: common functions for ccw_driver use 2154 * SECTION: common functions for ccw_driver use
2149 */ 2155 */
2150 2156
2157static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
2158{
2159 struct ccw_device *cdev = data;
2160 int ret;
2161
2162 ret = ccw_device_set_online(cdev);
2163 if (ret)
2164 pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2165 dev_name(&cdev->dev), ret);
2166 else {
2167 struct dasd_device *device = dasd_device_from_cdev(cdev);
2168 wait_event(dasd_init_waitq, _wait_for_device(device));
2169 dasd_put_device(device);
2170 }
2171}
2172
2151/* 2173/*
2152 * Initial attempt at a probe function. this can be simplified once 2174 * Initial attempt at a probe function. this can be simplified once
2153 * the other detection code is gone. 2175 * the other detection code is gone.
@@ -2180,10 +2202,7 @@ int dasd_generic_probe(struct ccw_device *cdev,
2180 */ 2202 */
2181 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 2203 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
2182 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 2204 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
2183 ret = ccw_device_set_online(cdev); 2205 async_schedule(dasd_generic_auto_online, cdev);
2184 if (ret)
2185 pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2186 dev_name(&cdev->dev), ret);
2187 return 0; 2206 return 0;
2188} 2207}
2189 2208
@@ -2290,13 +2309,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
2290 } else 2309 } else
2291 pr_debug("dasd_generic device %s found\n", 2310 pr_debug("dasd_generic device %s found\n",
2292 dev_name(&cdev->dev)); 2311 dev_name(&cdev->dev));
2293
2294 /* FIXME: we have to wait for the root device but we don't want
2295 * to wait for each single device but for all at once. */
2296 wait_event(dasd_init_waitq, _wait_for_device(device));
2297
2298 dasd_put_device(device); 2312 dasd_put_device(device);
2299
2300 return rc; 2313 return rc;
2301} 2314}
2302 2315
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 21254793c604..cb52da033f06 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2019,15 +2019,23 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2019 ccw++; 2019 ccw++;
2020 recid += count; 2020 recid += count;
2021 new_track = 0; 2021 new_track = 0;
2022 /* first idaw for a ccw may start anywhere */
2023 if (!idaw_dst)
2024 idaw_dst = dst;
2022 } 2025 }
2023 /* If we start a new idaw, everything is fine and the 2026 /* If we start a new idaw, we must make sure that it
2024 * start of the new idaw is the start of this segment. 2027 * starts on an IDA_BLOCK_SIZE boundary.
2025 * If we continue an idaw, we must make sure that the 2028 * If we continue an idaw, we must make sure that the
2026 * current segment begins where the so far accumulated 2029 * current segment begins where the so far accumulated
2027 * idaw ends 2030 * idaw ends
2028 */ 2031 */
2029 if (!idaw_dst) 2032 if (!idaw_dst) {
2030 idaw_dst = dst; 2033 if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
2034 dasd_sfree_request(cqr, startdev);
2035 return ERR_PTR(-ERANGE);
2036 } else
2037 idaw_dst = dst;
2038 }
2031 if ((idaw_dst + idaw_len) != dst) { 2039 if ((idaw_dst + idaw_len) != dst) {
2032 dasd_sfree_request(cqr, startdev); 2040 dasd_sfree_request(cqr, startdev);
2033 return ERR_PTR(-ERANGE); 2041 return ERR_PTR(-ERANGE);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 9e8a2914259b..accd957454e7 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -881,42 +881,6 @@ no_handler:
881 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 881 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
882} 882}
883 883
884static void qdio_call_shutdown(struct work_struct *work)
885{
886 struct ccw_device_private *priv;
887 struct ccw_device *cdev;
888
889 priv = container_of(work, struct ccw_device_private, kick_work);
890 cdev = priv->cdev;
891 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
892 put_device(&cdev->dev);
893}
894
895static void qdio_int_error(struct ccw_device *cdev)
896{
897 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
898
899 switch (irq_ptr->state) {
900 case QDIO_IRQ_STATE_INACTIVE:
901 case QDIO_IRQ_STATE_CLEANUP:
902 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
903 break;
904 case QDIO_IRQ_STATE_ESTABLISHED:
905 case QDIO_IRQ_STATE_ACTIVE:
906 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
907 if (get_device(&cdev->dev)) {
908 /* Can't call shutdown from interrupt context. */
909 PREPARE_WORK(&cdev->private->kick_work,
910 qdio_call_shutdown);
911 queue_work(ccw_device_work, &cdev->private->kick_work);
912 }
913 break;
914 default:
915 WARN_ON(1);
916 }
917 wake_up(&cdev->private->wait_q);
918}
919
920static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, 884static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat,
921 int dstat) 885 int dstat)
922{ 886{
@@ -973,10 +937,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
973 switch (PTR_ERR(irb)) { 937 switch (PTR_ERR(irb)) {
974 case -EIO: 938 case -EIO:
975 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); 939 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
976 return; 940 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
977 case -ETIMEDOUT: 941 wake_up(&cdev->private->wait_q);
978 DBF_ERROR("%4x IO timeout", irq_ptr->schid.sch_no);
979 qdio_int_error(cdev);
980 return; 942 return;
981 default: 943 default:
982 WARN_ON(1); 944 WARN_ON(1);
@@ -1001,7 +963,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1001 case QDIO_IRQ_STATE_ACTIVE: 963 case QDIO_IRQ_STATE_ACTIVE:
1002 if (cstat & SCHN_STAT_PCI) { 964 if (cstat & SCHN_STAT_PCI) {
1003 qdio_int_handler_pci(irq_ptr); 965 qdio_int_handler_pci(irq_ptr);
1004 /* no state change so no need to wake up wait_q */
1005 return; 966 return;
1006 } 967 }
1007 if ((cstat & ~SCHN_STAT_PCI) || dstat) { 968 if ((cstat & ~SCHN_STAT_PCI) || dstat) {
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index e6d1fc8c54f1..a85ad05e8548 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -383,18 +383,22 @@ static int jsf_ioctl_program(void __user *arg)
383 return 0; 383 return 0;
384} 384}
385 385
386static int jsf_ioctl(struct inode *inode, struct file *f, unsigned int cmd, 386static long jsf_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
387 unsigned long arg)
388{ 387{
388 lock_kernel();
389 int error = -ENOTTY; 389 int error = -ENOTTY;
390 void __user *argp = (void __user *)arg; 390 void __user *argp = (void __user *)arg;
391 391
392 if (!capable(CAP_SYS_ADMIN)) 392 if (!capable(CAP_SYS_ADMIN)) {
393 unlock_kernel();
393 return -EPERM; 394 return -EPERM;
395 }
394 switch (cmd) { 396 switch (cmd) {
395 case JSFLASH_IDENT: 397 case JSFLASH_IDENT:
396 if (copy_to_user(argp, &jsf0.id, JSFIDSZ)) 398 if (copy_to_user(argp, &jsf0.id, JSFIDSZ)) {
399 unlock_kernel();
397 return -EFAULT; 400 return -EFAULT;
401 }
398 break; 402 break;
399 case JSFLASH_ERASE: 403 case JSFLASH_ERASE:
400 error = jsf_ioctl_erase(arg); 404 error = jsf_ioctl_erase(arg);
@@ -404,6 +408,7 @@ static int jsf_ioctl(struct inode *inode, struct file *f, unsigned int cmd,
404 break; 408 break;
405 } 409 }
406 410
411 unlock_kernel();
407 return error; 412 return error;
408} 413}
409 414
@@ -439,7 +444,7 @@ static const struct file_operations jsf_fops = {
439 .llseek = jsf_lseek, 444 .llseek = jsf_lseek,
440 .read = jsf_read, 445 .read = jsf_read,
441 .write = jsf_write, 446 .write = jsf_write,
442 .ioctl = jsf_ioctl, 447 .unlocked_ioctl = jsf_ioctl,
443 .mmap = jsf_mmap, 448 .mmap = jsf_mmap,
444 .open = jsf_open, 449 .open = jsf_open,
445 .release = jsf_release, 450 .release = jsf_release,
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 27993c37775d..2c56fd56ec63 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -197,9 +197,8 @@ static struct uctrl_driver {
197static void uctrl_get_event_status(struct uctrl_driver *); 197static void uctrl_get_event_status(struct uctrl_driver *);
198static void uctrl_get_external_status(struct uctrl_driver *); 198static void uctrl_get_external_status(struct uctrl_driver *);
199 199
200static int 200static long
201uctrl_ioctl(struct inode *inode, struct file *file, 201uctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
202 unsigned int cmd, unsigned long arg)
203{ 202{
204 switch (cmd) { 203 switch (cmd) {
205 default: 204 default:
@@ -226,7 +225,7 @@ static irqreturn_t uctrl_interrupt(int irq, void *dev_id)
226static const struct file_operations uctrl_fops = { 225static const struct file_operations uctrl_fops = {
227 .owner = THIS_MODULE, 226 .owner = THIS_MODULE,
228 .llseek = no_llseek, 227 .llseek = no_llseek,
229 .ioctl = uctrl_ioctl, 228 .unlocked_ioctl = uctrl_ioctl,
230 .open = uctrl_open, 229 .open = uctrl_open,
231}; 230};
232 231
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index b1bd3fc7bae8..36fd2e75da1c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1394,7 +1394,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1394 */ 1394 */
1395 cmd->sense_buffer[8] = 0; /* Information */ 1395 cmd->sense_buffer[8] = 0; /* Information */
1396 cmd->sense_buffer[9] = 0xa; /* Add. length */ 1396 cmd->sense_buffer[9] = 0xa; /* Add. length */
1397 do_div(bghm, cmd->device->sector_size); 1397 bghm /= cmd->device->sector_size;
1398 1398
1399 failing_sector = scsi_get_lba(cmd); 1399 failing_sector = scsi_get_lba(cmd);
1400 failing_sector += bghm; 1400 failing_sector += bghm;
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 7fb9b5c4669a..12d13d99b6f0 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -44,6 +44,7 @@ struct intc_handle_int {
44struct intc_desc_int { 44struct intc_desc_int {
45 struct list_head list; 45 struct list_head list;
46 struct sys_device sysdev; 46 struct sys_device sysdev;
47 pm_message_t state;
47 unsigned long *reg; 48 unsigned long *reg;
48#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
49 unsigned long *smp; 50 unsigned long *smp;
@@ -786,18 +787,44 @@ static int intc_suspend(struct sys_device *dev, pm_message_t state)
786 /* get intc controller associated with this sysdev */ 787 /* get intc controller associated with this sysdev */
787 d = container_of(dev, struct intc_desc_int, sysdev); 788 d = container_of(dev, struct intc_desc_int, sysdev);
788 789
789 /* enable wakeup irqs belonging to this intc controller */ 790 switch (state.event) {
790 for_each_irq_desc(irq, desc) { 791 case PM_EVENT_ON:
791 if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip)) 792 if (d->state.event != PM_EVENT_FREEZE)
792 intc_enable(irq); 793 break;
794 for_each_irq_desc(irq, desc) {
795 if (desc->chip != &d->chip)
796 continue;
797 if (desc->status & IRQ_DISABLED)
798 intc_disable(irq);
799 else
800 intc_enable(irq);
801 }
802 break;
803 case PM_EVENT_FREEZE:
804 /* nothing has to be done */
805 break;
806 case PM_EVENT_SUSPEND:
807 /* enable wakeup irqs belonging to this intc controller */
808 for_each_irq_desc(irq, desc) {
809 if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
810 intc_enable(irq);
811 }
812 break;
793 } 813 }
814 d->state = state;
794 815
795 return 0; 816 return 0;
796} 817}
797 818
819static int intc_resume(struct sys_device *dev)
820{
821 return intc_suspend(dev, PMSG_ON);
822}
823
798static struct sysdev_class intc_sysdev_class = { 824static struct sysdev_class intc_sysdev_class = {
799 .name = "intc", 825 .name = "intc",
800 .suspend = intc_suspend, 826 .suspend = intc_suspend,
827 .resume = intc_resume,
801}; 828};
802 829
803/* register this intc as sysdev to allow suspend/resume */ 830/* register this intc as sysdev to allow suspend/resume */
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 869d47cb6db3..0a69c0977e3f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -546,10 +546,6 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
546 tty->driver_data = acm; 546 tty->driver_data = acm;
547 acm->tty = tty; 547 acm->tty = tty;
548 548
549 /* force low_latency on so that our tty_push actually forces the data through,
550 otherwise it is scheduled, and with high data rates data can get lost. */
551 tty->low_latency = 1;
552
553 if (usb_autopm_get_interface(acm->control) < 0) 549 if (usb_autopm_get_interface(acm->control) < 0)
554 goto early_bail; 550 goto early_bail;
555 else 551 else
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 2620bf6fe5e1..9c4c700c7cc6 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1215,20 +1215,22 @@ static void ti_bulk_in_callback(struct urb *urb)
1215 } 1215 }
1216 1216
1217 tty = tty_port_tty_get(&port->port); 1217 tty = tty_port_tty_get(&port->port);
1218 if (tty && urb->actual_length) { 1218 if (tty) {
1219 usb_serial_debug_data(debug, dev, __func__, 1219 if (urb->actual_length) {
1220 urb->actual_length, urb->transfer_buffer); 1220 usb_serial_debug_data(debug, dev, __func__,
1221 1221 urb->actual_length, urb->transfer_buffer);
1222 if (!tport->tp_is_open) 1222
1223 dbg("%s - port closed, dropping data", __func__); 1223 if (!tport->tp_is_open)
1224 else 1224 dbg("%s - port closed, dropping data",
1225 ti_recv(&urb->dev->dev, tty, 1225 __func__);
1226 else
1227 ti_recv(&urb->dev->dev, tty,
1226 urb->transfer_buffer, 1228 urb->transfer_buffer,
1227 urb->actual_length); 1229 urb->actual_length);
1228 1230 spin_lock(&tport->tp_lock);
1229 spin_lock(&tport->tp_lock); 1231 tport->tp_icount.rx += urb->actual_length;
1230 tport->tp_icount.rx += urb->actual_length; 1232 spin_unlock(&tport->tp_lock);
1231 spin_unlock(&tport->tp_lock); 1233 }
1232 tty_kref_put(tty); 1234 tty_kref_put(tty);
1233 } 1235 }
1234 1236
diff --git a/fs/bio.c b/fs/bio.c
index e0c9e545bbfa..cd42bb882f30 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -348,6 +348,24 @@ err:
348 return NULL; 348 return NULL;
349} 349}
350 350
351/**
352 * bio_alloc - allocate a bio for I/O
353 * @gfp_mask: the GFP_ mask given to the slab allocator
354 * @nr_iovecs: number of iovecs to pre-allocate
355 *
356 * Description:
357 * bio_alloc will allocate a bio and associated bio_vec array that can hold
358 * at least @nr_iovecs entries. Allocations will be done from the
359 * fs_bio_set. Also see @bio_alloc_bioset.
360 *
361 * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
362 * a bio. This is due to the mempool guarantees. To make this work, callers
363 * must never allocate more than 1 bio at the time from this pool. Callers
364 * that need to allocate more than 1 bio must always submit the previously
365 * allocate bio for IO before attempting to allocate a new one. Failure to
366 * do so can cause livelocks under memory pressure.
367 *
368 **/
351struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) 369struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
352{ 370{
353 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 371 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
diff --git a/fs/buffer.c b/fs/buffer.c
index 13edf7ad3ff1..b3e5be7514f5 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -360,7 +360,7 @@ still_busy:
360 * Completion handler for block_write_full_page() - pages which are unlocked 360 * Completion handler for block_write_full_page() - pages which are unlocked
361 * during I/O, and which have PageWriteback cleared upon I/O completion. 361 * during I/O, and which have PageWriteback cleared upon I/O completion.
362 */ 362 */
363static void end_buffer_async_write(struct buffer_head *bh, int uptodate) 363void end_buffer_async_write(struct buffer_head *bh, int uptodate)
364{ 364{
365 char b[BDEVNAME_SIZE]; 365 char b[BDEVNAME_SIZE];
366 unsigned long flags; 366 unsigned long flags;
@@ -438,11 +438,17 @@ static void mark_buffer_async_read(struct buffer_head *bh)
438 set_buffer_async_read(bh); 438 set_buffer_async_read(bh);
439} 439}
440 440
441void mark_buffer_async_write(struct buffer_head *bh) 441void mark_buffer_async_write_endio(struct buffer_head *bh,
442 bh_end_io_t *handler)
442{ 443{
443 bh->b_end_io = end_buffer_async_write; 444 bh->b_end_io = handler;
444 set_buffer_async_write(bh); 445 set_buffer_async_write(bh);
445} 446}
447
448void mark_buffer_async_write(struct buffer_head *bh)
449{
450 mark_buffer_async_write_endio(bh, end_buffer_async_write);
451}
446EXPORT_SYMBOL(mark_buffer_async_write); 452EXPORT_SYMBOL(mark_buffer_async_write);
447 453
448 454
@@ -547,7 +553,7 @@ repeat:
547 return err; 553 return err;
548} 554}
549 555
550void do_thaw_all(unsigned long unused) 556void do_thaw_all(struct work_struct *work)
551{ 557{
552 struct super_block *sb; 558 struct super_block *sb;
553 char b[BDEVNAME_SIZE]; 559 char b[BDEVNAME_SIZE];
@@ -567,6 +573,7 @@ restart:
567 goto restart; 573 goto restart;
568 } 574 }
569 spin_unlock(&sb_lock); 575 spin_unlock(&sb_lock);
576 kfree(work);
570 printk(KERN_WARNING "Emergency Thaw complete\n"); 577 printk(KERN_WARNING "Emergency Thaw complete\n");
571} 578}
572 579
@@ -577,7 +584,13 @@ restart:
577 */ 584 */
578void emergency_thaw_all(void) 585void emergency_thaw_all(void)
579{ 586{
580 pdflush_operation(do_thaw_all, 0); 587 struct work_struct *work;
588
589 work = kmalloc(sizeof(*work), GFP_ATOMIC);
590 if (work) {
591 INIT_WORK(work, do_thaw_all);
592 schedule_work(work);
593 }
581} 594}
582 595
583/** 596/**
@@ -1608,7 +1621,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
1608 * unplugging the device queue. 1621 * unplugging the device queue.
1609 */ 1622 */
1610static int __block_write_full_page(struct inode *inode, struct page *page, 1623static int __block_write_full_page(struct inode *inode, struct page *page,
1611 get_block_t *get_block, struct writeback_control *wbc) 1624 get_block_t *get_block, struct writeback_control *wbc,
1625 bh_end_io_t *handler)
1612{ 1626{
1613 int err; 1627 int err;
1614 sector_t block; 1628 sector_t block;
@@ -1693,7 +1707,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1693 continue; 1707 continue;
1694 } 1708 }
1695 if (test_clear_buffer_dirty(bh)) { 1709 if (test_clear_buffer_dirty(bh)) {
1696 mark_buffer_async_write(bh); 1710 mark_buffer_async_write_endio(bh, handler);
1697 } else { 1711 } else {
1698 unlock_buffer(bh); 1712 unlock_buffer(bh);
1699 } 1713 }
@@ -1746,7 +1760,7 @@ recover:
1746 if (buffer_mapped(bh) && buffer_dirty(bh) && 1760 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1747 !buffer_delay(bh)) { 1761 !buffer_delay(bh)) {
1748 lock_buffer(bh); 1762 lock_buffer(bh);
1749 mark_buffer_async_write(bh); 1763 mark_buffer_async_write_endio(bh, handler);
1750 } else { 1764 } else {
1751 /* 1765 /*
1752 * The buffer may have been set dirty during 1766 * The buffer may have been set dirty during
@@ -2672,7 +2686,8 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
2672out: 2686out:
2673 ret = mpage_writepage(page, get_block, wbc); 2687 ret = mpage_writepage(page, get_block, wbc);
2674 if (ret == -EAGAIN) 2688 if (ret == -EAGAIN)
2675 ret = __block_write_full_page(inode, page, get_block, wbc); 2689 ret = __block_write_full_page(inode, page, get_block, wbc,
2690 end_buffer_async_write);
2676 return ret; 2691 return ret;
2677} 2692}
2678EXPORT_SYMBOL(nobh_writepage); 2693EXPORT_SYMBOL(nobh_writepage);
@@ -2830,9 +2845,10 @@ out:
2830 2845
2831/* 2846/*
2832 * The generic ->writepage function for buffer-backed address_spaces 2847 * The generic ->writepage function for buffer-backed address_spaces
2848 * this form passes in the end_io handler used to finish the IO.
2833 */ 2849 */
2834int block_write_full_page(struct page *page, get_block_t *get_block, 2850int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2835 struct writeback_control *wbc) 2851 struct writeback_control *wbc, bh_end_io_t *handler)
2836{ 2852{
2837 struct inode * const inode = page->mapping->host; 2853 struct inode * const inode = page->mapping->host;
2838 loff_t i_size = i_size_read(inode); 2854 loff_t i_size = i_size_read(inode);
@@ -2841,7 +2857,8 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2841 2857
2842 /* Is the page fully inside i_size? */ 2858 /* Is the page fully inside i_size? */
2843 if (page->index < end_index) 2859 if (page->index < end_index)
2844 return __block_write_full_page(inode, page, get_block, wbc); 2860 return __block_write_full_page(inode, page, get_block, wbc,
2861 handler);
2845 2862
2846 /* Is the page fully outside i_size? (truncate in progress) */ 2863 /* Is the page fully outside i_size? (truncate in progress) */
2847 offset = i_size & (PAGE_CACHE_SIZE-1); 2864 offset = i_size & (PAGE_CACHE_SIZE-1);
@@ -2864,9 +2881,20 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2864 * writes to that region are not written out to the file." 2881 * writes to that region are not written out to the file."
2865 */ 2882 */
2866 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2883 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2867 return __block_write_full_page(inode, page, get_block, wbc); 2884 return __block_write_full_page(inode, page, get_block, wbc, handler);
2868} 2885}
2869 2886
2887/*
2888 * The generic ->writepage function for buffer-backed address_spaces
2889 */
2890int block_write_full_page(struct page *page, get_block_t *get_block,
2891 struct writeback_control *wbc)
2892{
2893 return block_write_full_page_endio(page, get_block, wbc,
2894 end_buffer_async_write);
2895}
2896
2897
2870sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 2898sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2871 get_block_t *get_block) 2899 get_block_t *get_block)
2872{ 2900{
@@ -3335,9 +3363,11 @@ EXPORT_SYMBOL(block_read_full_page);
3335EXPORT_SYMBOL(block_sync_page); 3363EXPORT_SYMBOL(block_sync_page);
3336EXPORT_SYMBOL(block_truncate_page); 3364EXPORT_SYMBOL(block_truncate_page);
3337EXPORT_SYMBOL(block_write_full_page); 3365EXPORT_SYMBOL(block_write_full_page);
3366EXPORT_SYMBOL(block_write_full_page_endio);
3338EXPORT_SYMBOL(cont_write_begin); 3367EXPORT_SYMBOL(cont_write_begin);
3339EXPORT_SYMBOL(end_buffer_read_sync); 3368EXPORT_SYMBOL(end_buffer_read_sync);
3340EXPORT_SYMBOL(end_buffer_write_sync); 3369EXPORT_SYMBOL(end_buffer_write_sync);
3370EXPORT_SYMBOL(end_buffer_async_write);
3341EXPORT_SYMBOL(file_fsync); 3371EXPORT_SYMBOL(file_fsync);
3342EXPORT_SYMBOL(generic_block_bmap); 3372EXPORT_SYMBOL(generic_block_bmap);
3343EXPORT_SYMBOL(generic_cont_expand_simple); 3373EXPORT_SYMBOL(generic_cont_expand_simple);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index da258e7249cc..05763bbc2050 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -307,8 +307,6 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev,
307 struct bio *bio; 307 struct bio *bio;
308 308
309 bio = bio_alloc(GFP_KERNEL, nr_vecs); 309 bio = bio_alloc(GFP_KERNEL, nr_vecs);
310 if (bio == NULL)
311 return -ENOMEM;
312 310
313 bio->bi_bdev = bdev; 311 bio->bi_bdev = bdev;
314 bio->bi_sector = first_sector; 312 bio->bi_sector = first_sector;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 6132353dcf62..2a1cb0979768 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2416,8 +2416,6 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2416 len = ee_len; 2416 len = ee_len;
2417 2417
2418 bio = bio_alloc(GFP_NOIO, len); 2418 bio = bio_alloc(GFP_NOIO, len);
2419 if (!bio)
2420 return -ENOMEM;
2421 bio->bi_sector = ee_pblock; 2419 bio->bi_sector = ee_pblock;
2422 bio->bi_bdev = inode->i_sb->s_bdev; 2420 bio->bi_bdev = inode->i_sb->s_bdev;
2423 2421
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 2b25133524a3..06f30e965676 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -938,9 +938,9 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
938} 938}
939 939
940static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf, 940static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
941 unsigned *nbytesp, int write) 941 size_t *nbytesp, int write)
942{ 942{
943 unsigned nbytes = *nbytesp; 943 size_t nbytes = *nbytesp;
944 unsigned long user_addr = (unsigned long) buf; 944 unsigned long user_addr = (unsigned long) buf;
945 unsigned offset = user_addr & ~PAGE_MASK; 945 unsigned offset = user_addr & ~PAGE_MASK;
946 int npages; 946 int npages;
@@ -955,7 +955,7 @@ static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
955 return 0; 955 return 0;
956 } 956 }
957 957
958 nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); 958 nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
959 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 959 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
960 npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ); 960 npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ);
961 down_read(&current->mm->mmap_sem); 961 down_read(&current->mm->mmap_sem);
@@ -1298,6 +1298,8 @@ static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
1298 if (vma->vm_flags & VM_MAYSHARE) 1298 if (vma->vm_flags & VM_MAYSHARE)
1299 return -ENODEV; 1299 return -ENODEV;
1300 1300
1301 invalidate_inode_pages2(file->f_mapping);
1302
1301 return generic_file_mmap(file, vma); 1303 return generic_file_mmap(file, vma);
1302} 1304}
1303 1305
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 3984e47d1d33..1afd9f26bcb1 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -597,7 +597,6 @@ __acquires(&gl->gl_spin)
597 597
598 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); 598 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
599 599
600 down_read(&gfs2_umount_flush_sem);
601 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && 600 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
602 gl->gl_demote_state != gl->gl_state) { 601 gl->gl_demote_state != gl->gl_state) {
603 if (find_first_holder(gl)) 602 if (find_first_holder(gl))
@@ -614,15 +613,14 @@ __acquires(&gl->gl_spin)
614 if (ret == 0) 613 if (ret == 0)
615 goto out_unlock; 614 goto out_unlock;
616 if (ret == 2) 615 if (ret == 2)
617 goto out_sem; 616 goto out;
618 gh = find_first_waiter(gl); 617 gh = find_first_waiter(gl);
619 gl->gl_target = gh->gh_state; 618 gl->gl_target = gh->gh_state;
620 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 619 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
621 do_error(gl, 0); /* Fail queued try locks */ 620 do_error(gl, 0); /* Fail queued try locks */
622 } 621 }
623 do_xmote(gl, gh, gl->gl_target); 622 do_xmote(gl, gh, gl->gl_target);
624out_sem: 623out:
625 up_read(&gfs2_umount_flush_sem);
626 return; 624 return;
627 625
628out_sched: 626out_sched:
@@ -631,7 +629,7 @@ out_sched:
631 gfs2_glock_put(gl); 629 gfs2_glock_put(gl);
632out_unlock: 630out_unlock:
633 clear_bit(GLF_LOCK, &gl->gl_flags); 631 clear_bit(GLF_LOCK, &gl->gl_flags);
634 goto out_sem; 632 goto out;
635} 633}
636 634
637static void glock_work_func(struct work_struct *work) 635static void glock_work_func(struct work_struct *work)
@@ -641,6 +639,7 @@ static void glock_work_func(struct work_struct *work)
641 639
642 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 640 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
643 finish_xmote(gl, gl->gl_reply); 641 finish_xmote(gl, gl->gl_reply);
642 down_read(&gfs2_umount_flush_sem);
644 spin_lock(&gl->gl_spin); 643 spin_lock(&gl->gl_spin);
645 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 644 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
646 gl->gl_state != LM_ST_UNLOCKED && 645 gl->gl_state != LM_ST_UNLOCKED &&
@@ -653,6 +652,7 @@ static void glock_work_func(struct work_struct *work)
653 } 652 }
654 run_queue(gl, 0); 653 run_queue(gl, 0);
655 spin_unlock(&gl->gl_spin); 654 spin_unlock(&gl->gl_spin);
655 up_read(&gfs2_umount_flush_sem);
656 if (!delay || 656 if (!delay ||
657 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 657 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
658 gfs2_glock_put(gl); 658 gfs2_glock_put(gl);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 7b277d449155..5a31d426116f 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -137,15 +137,15 @@ void gfs2_set_iop(struct inode *inode)
137 if (S_ISREG(mode)) { 137 if (S_ISREG(mode)) {
138 inode->i_op = &gfs2_file_iops; 138 inode->i_op = &gfs2_file_iops;
139 if (gfs2_localflocks(sdp)) 139 if (gfs2_localflocks(sdp))
140 inode->i_fop = gfs2_file_fops_nolock; 140 inode->i_fop = &gfs2_file_fops_nolock;
141 else 141 else
142 inode->i_fop = gfs2_file_fops; 142 inode->i_fop = &gfs2_file_fops;
143 } else if (S_ISDIR(mode)) { 143 } else if (S_ISDIR(mode)) {
144 inode->i_op = &gfs2_dir_iops; 144 inode->i_op = &gfs2_dir_iops;
145 if (gfs2_localflocks(sdp)) 145 if (gfs2_localflocks(sdp))
146 inode->i_fop = gfs2_dir_fops_nolock; 146 inode->i_fop = &gfs2_dir_fops_nolock;
147 else 147 else
148 inode->i_fop = gfs2_dir_fops; 148 inode->i_fop = &gfs2_dir_fops;
149 } else if (S_ISLNK(mode)) { 149 } else if (S_ISLNK(mode)) {
150 inode->i_op = &gfs2_symlink_iops; 150 inode->i_op = &gfs2_symlink_iops;
151 } else { 151 } else {
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index dca4fee3078b..c30be2b66580 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -101,21 +101,23 @@ void gfs2_dinode_print(const struct gfs2_inode *ip);
101extern const struct inode_operations gfs2_file_iops; 101extern const struct inode_operations gfs2_file_iops;
102extern const struct inode_operations gfs2_dir_iops; 102extern const struct inode_operations gfs2_dir_iops;
103extern const struct inode_operations gfs2_symlink_iops; 103extern const struct inode_operations gfs2_symlink_iops;
104extern const struct file_operations *gfs2_file_fops_nolock; 104extern const struct file_operations gfs2_file_fops_nolock;
105extern const struct file_operations *gfs2_dir_fops_nolock; 105extern const struct file_operations gfs2_dir_fops_nolock;
106 106
107extern void gfs2_set_inode_flags(struct inode *inode); 107extern void gfs2_set_inode_flags(struct inode *inode);
108 108
109#ifdef CONFIG_GFS2_FS_LOCKING_DLM 109#ifdef CONFIG_GFS2_FS_LOCKING_DLM
110extern const struct file_operations *gfs2_file_fops; 110extern const struct file_operations gfs2_file_fops;
111extern const struct file_operations *gfs2_dir_fops; 111extern const struct file_operations gfs2_dir_fops;
112
112static inline int gfs2_localflocks(const struct gfs2_sbd *sdp) 113static inline int gfs2_localflocks(const struct gfs2_sbd *sdp)
113{ 114{
114 return sdp->sd_args.ar_localflocks; 115 return sdp->sd_args.ar_localflocks;
115} 116}
116#else /* Single node only */ 117#else /* Single node only */
117#define gfs2_file_fops NULL 118#define gfs2_file_fops gfs2_file_fops_nolock
118#define gfs2_dir_fops NULL 119#define gfs2_dir_fops gfs2_dir_fops_nolock
120
119static inline int gfs2_localflocks(const struct gfs2_sbd *sdp) 121static inline int gfs2_localflocks(const struct gfs2_sbd *sdp)
120{ 122{
121 return 1; 123 return 1;
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index 70b9b8548945..101caf3ee861 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -705,7 +705,7 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
705 } 705 }
706} 706}
707 707
708const struct file_operations *gfs2_file_fops = &(const struct file_operations){ 708const struct file_operations gfs2_file_fops = {
709 .llseek = gfs2_llseek, 709 .llseek = gfs2_llseek,
710 .read = do_sync_read, 710 .read = do_sync_read,
711 .aio_read = generic_file_aio_read, 711 .aio_read = generic_file_aio_read,
@@ -723,7 +723,7 @@ const struct file_operations *gfs2_file_fops = &(const struct file_operations){
723 .setlease = gfs2_setlease, 723 .setlease = gfs2_setlease,
724}; 724};
725 725
726const struct file_operations *gfs2_dir_fops = &(const struct file_operations){ 726const struct file_operations gfs2_dir_fops = {
727 .readdir = gfs2_readdir, 727 .readdir = gfs2_readdir,
728 .unlocked_ioctl = gfs2_ioctl, 728 .unlocked_ioctl = gfs2_ioctl,
729 .open = gfs2_open, 729 .open = gfs2_open,
@@ -735,7 +735,7 @@ const struct file_operations *gfs2_dir_fops = &(const struct file_operations){
735 735
736#endif /* CONFIG_GFS2_FS_LOCKING_DLM */ 736#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
737 737
738const struct file_operations *gfs2_file_fops_nolock = &(const struct file_operations){ 738const struct file_operations gfs2_file_fops_nolock = {
739 .llseek = gfs2_llseek, 739 .llseek = gfs2_llseek,
740 .read = do_sync_read, 740 .read = do_sync_read,
741 .aio_read = generic_file_aio_read, 741 .aio_read = generic_file_aio_read,
@@ -751,7 +751,7 @@ const struct file_operations *gfs2_file_fops_nolock = &(const struct file_operat
751 .setlease = generic_setlease, 751 .setlease = generic_setlease,
752}; 752};
753 753
754const struct file_operations *gfs2_dir_fops_nolock = &(const struct file_operations){ 754const struct file_operations gfs2_dir_fops_nolock = {
755 .readdir = gfs2_readdir, 755 .readdir = gfs2_readdir,
756 .unlocked_ioctl = gfs2_ioctl, 756 .unlocked_ioctl = gfs2_ioctl,
757 .open = gfs2_open, 757 .open = gfs2_open,
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 51883b3ad89c..650a730707b7 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -272,11 +272,6 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector)
272 lock_page(page); 272 lock_page(page);
273 273
274 bio = bio_alloc(GFP_NOFS, 1); 274 bio = bio_alloc(GFP_NOFS, 1);
275 if (unlikely(!bio)) {
276 __free_page(page);
277 return -ENOBUFS;
278 }
279
280 bio->bi_sector = sector * (sb->s_blocksize >> 9); 275 bio->bi_sector = sector * (sb->s_blocksize >> 9);
281 bio->bi_bdev = sb->s_bdev; 276 bio->bi_bdev = sb->s_bdev;
282 bio_add_page(bio, page, PAGE_SIZE, 0); 277 bio_add_page(bio, page, PAGE_SIZE, 0);
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index abd5429ae285..1c70fa5168d6 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -371,6 +371,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
371 ip = ghs[1].gh_gl->gl_object; 371 ip = ghs[1].gh_gl->gl_object;
372 372
373 ip->i_disksize = size; 373 ip->i_disksize = size;
374 i_size_write(inode, size);
374 375
375 error = gfs2_meta_inode_buffer(ip, &dibh); 376 error = gfs2_meta_inode_buffer(ip, &dibh);
376 377
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 8d53f66b5bcc..152e6c4a0dca 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -81,7 +81,7 @@ struct gfs2_quota_change_host {
81 81
82static LIST_HEAD(qd_lru_list); 82static LIST_HEAD(qd_lru_list);
83static atomic_t qd_lru_count = ATOMIC_INIT(0); 83static atomic_t qd_lru_count = ATOMIC_INIT(0);
84static spinlock_t qd_lru_lock = SPIN_LOCK_UNLOCKED; 84static DEFINE_SPINLOCK(qd_lru_lock);
85 85
86int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask) 86int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask)
87{ 87{
@@ -1364,7 +1364,7 @@ int gfs2_quotad(void *data)
1364 refrigerator(); 1364 refrigerator();
1365 t = min(quotad_timeo, statfs_timeo); 1365 t = min(quotad_timeo, statfs_timeo);
1366 1366
1367 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_UNINTERRUPTIBLE); 1367 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1368 spin_lock(&sdp->sd_trunc_lock); 1368 spin_lock(&sdp->sd_trunc_lock);
1369 empty = list_empty(&sdp->sd_trunc_list); 1369 empty = list_empty(&sdp->sd_trunc_list);
1370 spin_unlock(&sdp->sd_trunc_lock); 1370 spin_unlock(&sdp->sd_trunc_lock);
diff --git a/fs/inode.c b/fs/inode.c
index d06d6d268de9..6ad14a1cd8c9 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1470,42 +1470,6 @@ static void __wait_on_freeing_inode(struct inode *inode)
1470 spin_lock(&inode_lock); 1470 spin_lock(&inode_lock);
1471} 1471}
1472 1472
1473/*
1474 * We rarely want to lock two inodes that do not have a parent/child
1475 * relationship (such as directory, child inode) simultaneously. The
1476 * vast majority of file systems should be able to get along fine
1477 * without this. Do not use these functions except as a last resort.
1478 */
1479void inode_double_lock(struct inode *inode1, struct inode *inode2)
1480{
1481 if (inode1 == NULL || inode2 == NULL || inode1 == inode2) {
1482 if (inode1)
1483 mutex_lock(&inode1->i_mutex);
1484 else if (inode2)
1485 mutex_lock(&inode2->i_mutex);
1486 return;
1487 }
1488
1489 if (inode1 < inode2) {
1490 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
1491 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
1492 } else {
1493 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
1494 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
1495 }
1496}
1497EXPORT_SYMBOL(inode_double_lock);
1498
1499void inode_double_unlock(struct inode *inode1, struct inode *inode2)
1500{
1501 if (inode1)
1502 mutex_unlock(&inode1->i_mutex);
1503
1504 if (inode2 && inode2 != inode1)
1505 mutex_unlock(&inode2->i_mutex);
1506}
1507EXPORT_SYMBOL(inode_double_unlock);
1508
1509static __initdata unsigned long ihash_entries; 1473static __initdata unsigned long ihash_entries;
1510static int __init set_ihash_entries(char *str) 1474static int __init set_ihash_entries(char *str)
1511{ 1475{
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 24638e059bf3..064279e33bbb 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -688,6 +688,8 @@ static const struct nilfs_bmap_ptr_operations nilfs_bmap_ptr_ops_gc = {
688 .bpop_translate = NULL, 688 .bpop_translate = NULL,
689}; 689};
690 690
691static struct lock_class_key nilfs_bmap_dat_lock_key;
692
691/** 693/**
692 * nilfs_bmap_read - read a bmap from an inode 694 * nilfs_bmap_read - read a bmap from an inode
693 * @bmap: bmap 695 * @bmap: bmap
@@ -715,6 +717,7 @@ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode)
715 bmap->b_pops = &nilfs_bmap_ptr_ops_p; 717 bmap->b_pops = &nilfs_bmap_ptr_ops_p;
716 bmap->b_last_allocated_key = 0; /* XXX: use macro */ 718 bmap->b_last_allocated_key = 0; /* XXX: use macro */
717 bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; 719 bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT;
720 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key);
718 break; 721 break;
719 case NILFS_CPFILE_INO: 722 case NILFS_CPFILE_INO:
720 case NILFS_SUFILE_INO: 723 case NILFS_SUFILE_INO:
@@ -772,6 +775,7 @@ void nilfs_bmap_init_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap)
772{ 775{
773 memcpy(gcbmap, bmap, sizeof(union nilfs_bmap_union)); 776 memcpy(gcbmap, bmap, sizeof(union nilfs_bmap_union));
774 init_rwsem(&gcbmap->b_sem); 777 init_rwsem(&gcbmap->b_sem);
778 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key);
775 gcbmap->b_inode = &NILFS_BMAP_I(gcbmap)->vfs_inode; 779 gcbmap->b_inode = &NILFS_BMAP_I(gcbmap)->vfs_inode;
776} 780}
777 781
@@ -779,5 +783,6 @@ void nilfs_bmap_commit_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap)
779{ 783{
780 memcpy(bmap, gcbmap, sizeof(union nilfs_bmap_union)); 784 memcpy(bmap, gcbmap, sizeof(union nilfs_bmap_union));
781 init_rwsem(&bmap->b_sem); 785 init_rwsem(&bmap->b_sem);
786 lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key);
782 bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; 787 bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode;
783} 788}
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 7558c977db02..3d0c18a16db1 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -35,11 +35,6 @@
35#include "bmap_union.h" 35#include "bmap_union.h"
36 36
37/* 37/*
38 * NILFS filesystem version
39 */
40#define NILFS_VERSION "2.0.5"
41
42/*
43 * nilfs inode data in memory 38 * nilfs inode data in memory
44 */ 39 */
45struct nilfs_inode_info { 40struct nilfs_inode_info {
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 6ade0963fc1d..4fc081e47d70 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -413,7 +413,6 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
413 struct nilfs_segment_entry *ent, *n; 413 struct nilfs_segment_entry *ent, *n;
414 struct inode *sufile = nilfs->ns_sufile; 414 struct inode *sufile = nilfs->ns_sufile;
415 __u64 segnum[4]; 415 __u64 segnum[4];
416 time_t mtime;
417 int err; 416 int err;
418 int i; 417 int i;
419 418
@@ -442,24 +441,13 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
442 * Collecting segments written after the latest super root. 441 * Collecting segments written after the latest super root.
443 * These are marked dirty to avoid being reallocated in the next write. 442 * These are marked dirty to avoid being reallocated in the next write.
444 */ 443 */
445 mtime = get_seconds();
446 list_for_each_entry_safe(ent, n, head, list) { 444 list_for_each_entry_safe(ent, n, head, list) {
447 if (ent->segnum == segnum[0]) { 445 if (ent->segnum != segnum[0]) {
448 list_del(&ent->list); 446 err = nilfs_sufile_scrap(sufile, ent->segnum);
449 nilfs_free_segment_entry(ent); 447 if (unlikely(err))
450 continue; 448 goto failed;
451 }
452 err = nilfs_open_segment_entry(ent, sufile);
453 if (unlikely(err))
454 goto failed;
455 if (!nilfs_segment_usage_dirty(ent->raw_su)) {
456 /* make the segment garbage */
457 ent->raw_su->su_nblocks = cpu_to_le32(0);
458 ent->raw_su->su_lastmod = cpu_to_le32(mtime);
459 nilfs_segment_usage_set_dirty(ent->raw_su);
460 } 449 }
461 list_del(&ent->list); 450 list_del(&ent->list);
462 nilfs_close_segment_entry(ent, sufile);
463 nilfs_free_segment_entry(ent); 451 nilfs_free_segment_entry(ent);
464 } 452 }
465 453
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index c774cf397e2f..98e68677f045 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -93,6 +93,52 @@ nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
93 create, NULL, bhp); 93 create, NULL, bhp);
94} 94}
95 95
96static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
97 u64 ncleanadd, u64 ndirtyadd)
98{
99 struct nilfs_sufile_header *header;
100 void *kaddr;
101
102 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
103 header = kaddr + bh_offset(header_bh);
104 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
105 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
106 kunmap_atomic(kaddr, KM_USER0);
107
108 nilfs_mdt_mark_buffer_dirty(header_bh);
109}
110
111int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
112 void (*dofunc)(struct inode *, __u64,
113 struct buffer_head *,
114 struct buffer_head *))
115{
116 struct buffer_head *header_bh, *bh;
117 int ret;
118
119 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
120 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
121 __func__, (unsigned long long)segnum);
122 return -EINVAL;
123 }
124 down_write(&NILFS_MDT(sufile)->mi_sem);
125
126 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
127 if (ret < 0)
128 goto out_sem;
129
130 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
131 if (!ret) {
132 dofunc(sufile, segnum, header_bh, bh);
133 brelse(bh);
134 }
135 brelse(header_bh);
136
137 out_sem:
138 up_write(&NILFS_MDT(sufile)->mi_sem);
139 return ret;
140}
141
96/** 142/**
97 * nilfs_sufile_alloc - allocate a segment 143 * nilfs_sufile_alloc - allocate a segment
98 * @sufile: inode of segment usage file 144 * @sufile: inode of segment usage file
@@ -113,7 +159,6 @@ nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
113int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) 159int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
114{ 160{
115 struct buffer_head *header_bh, *su_bh; 161 struct buffer_head *header_bh, *su_bh;
116 struct the_nilfs *nilfs;
117 struct nilfs_sufile_header *header; 162 struct nilfs_sufile_header *header;
118 struct nilfs_segment_usage *su; 163 struct nilfs_segment_usage *su;
119 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 164 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
@@ -124,8 +169,6 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
124 169
125 down_write(&NILFS_MDT(sufile)->mi_sem); 170 down_write(&NILFS_MDT(sufile)->mi_sem);
126 171
127 nilfs = NILFS_MDT(sufile)->mi_nilfs;
128
129 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 172 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
130 if (ret < 0) 173 if (ret < 0)
131 goto out_sem; 174 goto out_sem;
@@ -192,165 +235,84 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
192 return ret; 235 return ret;
193} 236}
194 237
195/** 238void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
196 * nilfs_sufile_cancel_free - 239 struct buffer_head *header_bh,
197 * @sufile: inode of segment usage file 240 struct buffer_head *su_bh)
198 * @segnum: segment number
199 *
200 * Description:
201 *
202 * Return Value: On success, 0 is returned. On error, one of the following
203 * negative error codes is returned.
204 *
205 * %-EIO - I/O error.
206 *
207 * %-ENOMEM - Insufficient amount of memory available.
208 */
209int nilfs_sufile_cancel_free(struct inode *sufile, __u64 segnum)
210{ 241{
211 struct buffer_head *header_bh, *su_bh;
212 struct the_nilfs *nilfs;
213 struct nilfs_sufile_header *header;
214 struct nilfs_segment_usage *su; 242 struct nilfs_segment_usage *su;
215 void *kaddr; 243 void *kaddr;
216 int ret;
217
218 down_write(&NILFS_MDT(sufile)->mi_sem);
219
220 nilfs = NILFS_MDT(sufile)->mi_nilfs;
221
222 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
223 if (ret < 0)
224 goto out_sem;
225
226 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &su_bh);
227 if (ret < 0)
228 goto out_header;
229 244
230 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 245 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
231 su = nilfs_sufile_block_get_segment_usage( 246 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
232 sufile, segnum, su_bh, kaddr);
233 if (unlikely(!nilfs_segment_usage_clean(su))) { 247 if (unlikely(!nilfs_segment_usage_clean(su))) {
234 printk(KERN_WARNING "%s: segment %llu must be clean\n", 248 printk(KERN_WARNING "%s: segment %llu must be clean\n",
235 __func__, (unsigned long long)segnum); 249 __func__, (unsigned long long)segnum);
236 kunmap_atomic(kaddr, KM_USER0); 250 kunmap_atomic(kaddr, KM_USER0);
237 goto out_su_bh; 251 return;
238 } 252 }
239 nilfs_segment_usage_set_dirty(su); 253 nilfs_segment_usage_set_dirty(su);
240 kunmap_atomic(kaddr, KM_USER0); 254 kunmap_atomic(kaddr, KM_USER0);
241 255
242 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 256 nilfs_sufile_mod_counter(header_bh, -1, 1);
243 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr);
244 le64_add_cpu(&header->sh_ncleansegs, -1);
245 le64_add_cpu(&header->sh_ndirtysegs, 1);
246 kunmap_atomic(kaddr, KM_USER0);
247
248 nilfs_mdt_mark_buffer_dirty(header_bh);
249 nilfs_mdt_mark_buffer_dirty(su_bh); 257 nilfs_mdt_mark_buffer_dirty(su_bh);
250 nilfs_mdt_mark_dirty(sufile); 258 nilfs_mdt_mark_dirty(sufile);
251
252 out_su_bh:
253 brelse(su_bh);
254 out_header:
255 brelse(header_bh);
256 out_sem:
257 up_write(&NILFS_MDT(sufile)->mi_sem);
258 return ret;
259} 259}
260 260
261/** 261void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
262 * nilfs_sufile_freev - free segments 262 struct buffer_head *header_bh,
263 * @sufile: inode of segment usage file 263 struct buffer_head *su_bh)
264 * @segnum: array of segment numbers
265 * @nsegs: number of segments
266 *
267 * Description: nilfs_sufile_freev() frees segments specified by @segnum and
268 * @nsegs, which must have been returned by a previous call to
269 * nilfs_sufile_alloc().
270 *
271 * Return Value: On success, 0 is returned. On error, one of the following
272 * negative error codes is returned.
273 *
274 * %-EIO - I/O error.
275 *
276 * %-ENOMEM - Insufficient amount of memory available.
277 */
278#define NILFS_SUFILE_FREEV_PREALLOC 16
279int nilfs_sufile_freev(struct inode *sufile, __u64 *segnum, size_t nsegs)
280{ 264{
281 struct buffer_head *header_bh, **su_bh,
282 *su_bh_prealloc[NILFS_SUFILE_FREEV_PREALLOC];
283 struct the_nilfs *nilfs;
284 struct nilfs_sufile_header *header;
285 struct nilfs_segment_usage *su; 265 struct nilfs_segment_usage *su;
286 void *kaddr; 266 void *kaddr;
287 int ret, i; 267 int clean, dirty;
288 268
289 down_write(&NILFS_MDT(sufile)->mi_sem); 269 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
290 270 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
291 nilfs = NILFS_MDT(sufile)->mi_nilfs; 271 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
292 272 su->su_nblocks == cpu_to_le32(0)) {
293 /* prepare resources */
294 if (nsegs <= NILFS_SUFILE_FREEV_PREALLOC)
295 su_bh = su_bh_prealloc;
296 else {
297 su_bh = kmalloc(sizeof(*su_bh) * nsegs, GFP_NOFS);
298 if (su_bh == NULL) {
299 ret = -ENOMEM;
300 goto out_sem;
301 }
302 }
303
304 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
305 if (ret < 0)
306 goto out_su_bh;
307 for (i = 0; i < nsegs; i++) {
308 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum[i],
309 0, &su_bh[i]);
310 if (ret < 0)
311 goto out_bh;
312 }
313
314 /* free segments */
315 for (i = 0; i < nsegs; i++) {
316 kaddr = kmap_atomic(su_bh[i]->b_page, KM_USER0);
317 su = nilfs_sufile_block_get_segment_usage(
318 sufile, segnum[i], su_bh[i], kaddr);
319 WARN_ON(nilfs_segment_usage_error(su));
320 nilfs_segment_usage_set_clean(su);
321 kunmap_atomic(kaddr, KM_USER0); 273 kunmap_atomic(kaddr, KM_USER0);
322 nilfs_mdt_mark_buffer_dirty(su_bh[i]); 274 return;
323 } 275 }
324 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 276 clean = nilfs_segment_usage_clean(su);
325 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr); 277 dirty = nilfs_segment_usage_dirty(su);
326 le64_add_cpu(&header->sh_ncleansegs, nsegs); 278
327 le64_add_cpu(&header->sh_ndirtysegs, -(u64)nsegs); 279 /* make the segment garbage */
280 su->su_lastmod = cpu_to_le64(0);
281 su->su_nblocks = cpu_to_le32(0);
282 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
328 kunmap_atomic(kaddr, KM_USER0); 283 kunmap_atomic(kaddr, KM_USER0);
329 nilfs_mdt_mark_buffer_dirty(header_bh); 284
285 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
286 nilfs_mdt_mark_buffer_dirty(su_bh);
330 nilfs_mdt_mark_dirty(sufile); 287 nilfs_mdt_mark_dirty(sufile);
288}
331 289
332 out_bh: 290void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
333 for (i--; i >= 0; i--) 291 struct buffer_head *header_bh,
334 brelse(su_bh[i]); 292 struct buffer_head *su_bh)
335 brelse(header_bh); 293{
294 struct nilfs_segment_usage *su;
295 void *kaddr;
296 int sudirty;
336 297
337 out_su_bh: 298 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
338 if (su_bh != su_bh_prealloc) 299 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
339 kfree(su_bh); 300 if (nilfs_segment_usage_clean(su)) {
301 printk(KERN_WARNING "%s: segment %llu is already clean\n",
302 __func__, (unsigned long long)segnum);
303 kunmap_atomic(kaddr, KM_USER0);
304 return;
305 }
306 WARN_ON(nilfs_segment_usage_error(su));
307 WARN_ON(!nilfs_segment_usage_dirty(su));
340 308
341 out_sem: 309 sudirty = nilfs_segment_usage_dirty(su);
342 up_write(&NILFS_MDT(sufile)->mi_sem); 310 nilfs_segment_usage_set_clean(su);
343 return ret; 311 kunmap_atomic(kaddr, KM_USER0);
344} 312 nilfs_mdt_mark_buffer_dirty(su_bh);
345 313
346/** 314 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
347 * nilfs_sufile_free - 315 nilfs_mdt_mark_dirty(sufile);
348 * @sufile:
349 * @segnum:
350 */
351int nilfs_sufile_free(struct inode *sufile, __u64 segnum)
352{
353 return nilfs_sufile_freev(sufile, &segnum, 1);
354} 316}
355 317
356/** 318/**
@@ -500,72 +462,28 @@ int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
500 return ret; 462 return ret;
501} 463}
502 464
503/** 465void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
504 * nilfs_sufile_set_error - mark a segment as erroneous 466 struct buffer_head *header_bh,
505 * @sufile: inode of segment usage file 467 struct buffer_head *su_bh)
506 * @segnum: segment number
507 *
508 * Description: nilfs_sufile_set_error() marks the segment specified by
509 * @segnum as erroneous. The error segment will never be used again.
510 *
511 * Return Value: On success, 0 is returned. On error, one of the following
512 * negative error codes is returned.
513 *
514 * %-EIO - I/O error.
515 *
516 * %-ENOMEM - Insufficient amount of memory available.
517 *
518 * %-EINVAL - Invalid segment usage number.
519 */
520int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum)
521{ 468{
522 struct buffer_head *header_bh, *su_bh;
523 struct nilfs_segment_usage *su; 469 struct nilfs_segment_usage *su;
524 struct nilfs_sufile_header *header;
525 void *kaddr; 470 void *kaddr;
526 int ret; 471 int suclean;
527
528 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
529 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
530 __func__, (unsigned long long)segnum);
531 return -EINVAL;
532 }
533 down_write(&NILFS_MDT(sufile)->mi_sem);
534
535 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
536 if (ret < 0)
537 goto out_sem;
538 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &su_bh);
539 if (ret < 0)
540 goto out_header;
541 472
542 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 473 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
543 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 474 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
544 if (nilfs_segment_usage_error(su)) { 475 if (nilfs_segment_usage_error(su)) {
545 kunmap_atomic(kaddr, KM_USER0); 476 kunmap_atomic(kaddr, KM_USER0);
546 brelse(su_bh); 477 return;
547 goto out_header;
548 } 478 }
549 479 suclean = nilfs_segment_usage_clean(su);
550 nilfs_segment_usage_set_error(su); 480 nilfs_segment_usage_set_error(su);
551 kunmap_atomic(kaddr, KM_USER0); 481 kunmap_atomic(kaddr, KM_USER0);
552 brelse(su_bh);
553 482
554 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 483 if (suclean)
555 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr); 484 nilfs_sufile_mod_counter(header_bh, -1, 0);
556 le64_add_cpu(&header->sh_ndirtysegs, -1);
557 kunmap_atomic(kaddr, KM_USER0);
558 nilfs_mdt_mark_buffer_dirty(header_bh);
559 nilfs_mdt_mark_buffer_dirty(su_bh); 485 nilfs_mdt_mark_buffer_dirty(su_bh);
560 nilfs_mdt_mark_dirty(sufile); 486 nilfs_mdt_mark_dirty(sufile);
561 brelse(su_bh);
562
563 out_header:
564 brelse(header_bh);
565
566 out_sem:
567 up_write(&NILFS_MDT(sufile)->mi_sem);
568 return ret;
569} 487}
570 488
571/** 489/**
@@ -625,7 +543,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum,
625 si[i + j].sui_nblocks = le32_to_cpu(su->su_nblocks); 543 si[i + j].sui_nblocks = le32_to_cpu(su->su_nblocks);
626 si[i + j].sui_flags = le32_to_cpu(su->su_flags) & 544 si[i + j].sui_flags = le32_to_cpu(su->su_flags) &
627 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); 545 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
628 if (nilfs_segment_is_active(nilfs, segnum + i + j)) 546 if (nilfs_segment_is_active(nilfs, segnum + j))
629 si[i + j].sui_flags |= 547 si[i + j].sui_flags |=
630 (1UL << NILFS_SEGMENT_USAGE_ACTIVE); 548 (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
631 } 549 }
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h
index d595f33a768d..a2e2efd4ade1 100644
--- a/fs/nilfs2/sufile.h
+++ b/fs/nilfs2/sufile.h
@@ -36,9 +36,6 @@ static inline unsigned long nilfs_sufile_get_nsegments(struct inode *sufile)
36} 36}
37 37
38int nilfs_sufile_alloc(struct inode *, __u64 *); 38int nilfs_sufile_alloc(struct inode *, __u64 *);
39int nilfs_sufile_cancel_free(struct inode *, __u64);
40int nilfs_sufile_freev(struct inode *, __u64 *, size_t);
41int nilfs_sufile_free(struct inode *, __u64);
42int nilfs_sufile_get_segment_usage(struct inode *, __u64, 39int nilfs_sufile_get_segment_usage(struct inode *, __u64,
43 struct nilfs_segment_usage **, 40 struct nilfs_segment_usage **,
44 struct buffer_head **); 41 struct buffer_head **);
@@ -46,9 +43,83 @@ void nilfs_sufile_put_segment_usage(struct inode *, __u64,
46 struct buffer_head *); 43 struct buffer_head *);
47int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *); 44int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *);
48int nilfs_sufile_get_ncleansegs(struct inode *, unsigned long *); 45int nilfs_sufile_get_ncleansegs(struct inode *, unsigned long *);
49int nilfs_sufile_set_error(struct inode *, __u64);
50ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, struct nilfs_suinfo *, 46ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, struct nilfs_suinfo *,
51 size_t); 47 size_t);
52 48
49int nilfs_sufile_update(struct inode *, __u64, int,
50 void (*dofunc)(struct inode *, __u64,
51 struct buffer_head *,
52 struct buffer_head *));
53void nilfs_sufile_do_cancel_free(struct inode *, __u64, struct buffer_head *,
54 struct buffer_head *);
55void nilfs_sufile_do_scrap(struct inode *, __u64, struct buffer_head *,
56 struct buffer_head *);
57void nilfs_sufile_do_free(struct inode *, __u64, struct buffer_head *,
58 struct buffer_head *);
59void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *,
60 struct buffer_head *);
61
62/**
63 * nilfs_sufile_cancel_free -
64 * @sufile: inode of segment usage file
65 * @segnum: segment number
66 *
67 * Description:
68 *
69 * Return Value: On success, 0 is returned. On error, one of the following
70 * negative error codes is returned.
71 *
72 * %-EIO - I/O error.
73 *
74 * %-ENOMEM - Insufficient amount of memory available.
75 */
76static inline int nilfs_sufile_cancel_free(struct inode *sufile, __u64 segnum)
77{
78 return nilfs_sufile_update(sufile, segnum, 0,
79 nilfs_sufile_do_cancel_free);
80}
81
82/**
83 * nilfs_sufile_scrap - make a segment garbage
84 * @sufile: inode of segment usage file
85 * @segnum: segment number to be freed
86 */
87static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum)
88{
89 return nilfs_sufile_update(sufile, segnum, 1, nilfs_sufile_do_scrap);
90}
91
92/**
93 * nilfs_sufile_free - free segment
94 * @sufile: inode of segment usage file
95 * @segnum: segment number to be freed
96 */
97static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum)
98{
99 return nilfs_sufile_update(sufile, segnum, 0, nilfs_sufile_do_free);
100}
101
102/**
103 * nilfs_sufile_set_error - mark a segment as erroneous
104 * @sufile: inode of segment usage file
105 * @segnum: segment number
106 *
107 * Description: nilfs_sufile_set_error() marks the segment specified by
108 * @segnum as erroneous. The error segment will never be used again.
109 *
110 * Return Value: On success, 0 is returned. On error, one of the following
111 * negative error codes is returned.
112 *
113 * %-EIO - I/O error.
114 *
115 * %-ENOMEM - Insufficient amount of memory available.
116 *
117 * %-EINVAL - Invalid segment usage number.
118 */
119static inline int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum)
120{
121 return nilfs_sufile_update(sufile, segnum, 0,
122 nilfs_sufile_do_set_error);
123}
53 124
54#endif /* _NILFS_SUFILE_H */ 125#endif /* _NILFS_SUFILE_H */
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index e117e1ea9bff..6989b03e97ab 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -63,7 +63,6 @@
63MODULE_AUTHOR("NTT Corp."); 63MODULE_AUTHOR("NTT Corp.");
64MODULE_DESCRIPTION("A New Implementation of the Log-structured Filesystem " 64MODULE_DESCRIPTION("A New Implementation of the Log-structured Filesystem "
65 "(NILFS)"); 65 "(NILFS)");
66MODULE_VERSION(NILFS_VERSION);
67MODULE_LICENSE("GPL"); 66MODULE_LICENSE("GPL");
68 67
69static int nilfs_remount(struct super_block *sb, int *flags, char *data); 68static int nilfs_remount(struct super_block *sb, int *flags, char *data);
@@ -476,11 +475,12 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
476{ 475{
477 struct super_block *sb = dentry->d_sb; 476 struct super_block *sb = dentry->d_sb;
478 struct nilfs_sb_info *sbi = NILFS_SB(sb); 477 struct nilfs_sb_info *sbi = NILFS_SB(sb);
478 struct the_nilfs *nilfs = sbi->s_nilfs;
479 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
479 unsigned long long blocks; 480 unsigned long long blocks;
480 unsigned long overhead; 481 unsigned long overhead;
481 unsigned long nrsvblocks; 482 unsigned long nrsvblocks;
482 sector_t nfreeblocks; 483 sector_t nfreeblocks;
483 struct the_nilfs *nilfs = sbi->s_nilfs;
484 int err; 484 int err;
485 485
486 /* 486 /*
@@ -514,6 +514,9 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
514 buf->f_files = atomic_read(&sbi->s_inodes_count); 514 buf->f_files = atomic_read(&sbi->s_inodes_count);
515 buf->f_ffree = 0; /* nilfs_count_free_inodes(sb); */ 515 buf->f_ffree = 0; /* nilfs_count_free_inodes(sb); */
516 buf->f_namelen = NILFS_NAME_LEN; 516 buf->f_namelen = NILFS_NAME_LEN;
517 buf->f_fsid.val[0] = (u32)id;
518 buf->f_fsid.val[1] = (u32)(id >> 32);
519
517 return 0; 520 return 0;
518} 521}
519 522
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 33400cf0bbe2..7f65b3be4aa9 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -115,6 +115,7 @@ void put_nilfs(struct the_nilfs *nilfs)
115static int nilfs_load_super_root(struct the_nilfs *nilfs, 115static int nilfs_load_super_root(struct the_nilfs *nilfs,
116 struct nilfs_sb_info *sbi, sector_t sr_block) 116 struct nilfs_sb_info *sbi, sector_t sr_block)
117{ 117{
118 static struct lock_class_key dat_lock_key;
118 struct buffer_head *bh_sr; 119 struct buffer_head *bh_sr;
119 struct nilfs_super_root *raw_sr; 120 struct nilfs_super_root *raw_sr;
120 struct nilfs_super_block **sbp = nilfs->ns_sbp; 121 struct nilfs_super_block **sbp = nilfs->ns_sbp;
@@ -163,6 +164,9 @@ static int nilfs_load_super_root(struct the_nilfs *nilfs,
163 if (unlikely(err)) 164 if (unlikely(err))
164 goto failed_sufile; 165 goto failed_sufile;
165 166
167 lockdep_set_class(&NILFS_MDT(nilfs->ns_dat)->mi_sem, &dat_lock_key);
168 lockdep_set_class(&NILFS_MDT(nilfs->ns_gc_dat)->mi_sem, &dat_lock_key);
169
166 nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat); 170 nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat);
167 nilfs_mdt_set_entry_size(nilfs->ns_cpfile, checkpoint_size, 171 nilfs_mdt_set_entry_size(nilfs->ns_cpfile, checkpoint_size,
168 sizeof(struct nilfs_cpfile_header)); 172 sizeof(struct nilfs_cpfile_header));
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 8672b9536039..c2a87c885b73 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1912,6 +1912,22 @@ out_sems:
1912 return written ? written : ret; 1912 return written ? written : ret;
1913} 1913}
1914 1914
1915static int ocfs2_splice_to_file(struct pipe_inode_info *pipe,
1916 struct file *out,
1917 struct splice_desc *sd)
1918{
1919 int ret;
1920
1921 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, &sd->pos,
1922 sd->total_len, 0, NULL);
1923 if (ret < 0) {
1924 mlog_errno(ret);
1925 return ret;
1926 }
1927
1928 return splice_from_pipe_feed(pipe, sd, pipe_to_file);
1929}
1930
1915static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe, 1931static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1916 struct file *out, 1932 struct file *out,
1917 loff_t *ppos, 1933 loff_t *ppos,
@@ -1919,38 +1935,76 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1919 unsigned int flags) 1935 unsigned int flags)
1920{ 1936{
1921 int ret; 1937 int ret;
1922 struct inode *inode = out->f_path.dentry->d_inode; 1938 struct address_space *mapping = out->f_mapping;
1939 struct inode *inode = mapping->host;
1940 struct splice_desc sd = {
1941 .total_len = len,
1942 .flags = flags,
1943 .pos = *ppos,
1944 .u.file = out,
1945 };
1923 1946
1924 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe, 1947 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
1925 (unsigned int)len, 1948 (unsigned int)len,
1926 out->f_path.dentry->d_name.len, 1949 out->f_path.dentry->d_name.len,
1927 out->f_path.dentry->d_name.name); 1950 out->f_path.dentry->d_name.name);
1928 1951
1929 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT); 1952 if (pipe->inode)
1953 mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_PARENT);
1930 1954
1931 ret = ocfs2_rw_lock(inode, 1); 1955 splice_from_pipe_begin(&sd);
1932 if (ret < 0) { 1956 do {
1933 mlog_errno(ret); 1957 ret = splice_from_pipe_next(pipe, &sd);
1934 goto out; 1958 if (ret <= 0)
1935 } 1959 break;
1936 1960
1937 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, ppos, len, 0, 1961 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
1938 NULL); 1962 ret = ocfs2_rw_lock(inode, 1);
1939 if (ret < 0) { 1963 if (ret < 0)
1940 mlog_errno(ret); 1964 mlog_errno(ret);
1941 goto out_unlock; 1965 else {
1942 } 1966 ret = ocfs2_splice_to_file(pipe, out, &sd);
1967 ocfs2_rw_unlock(inode, 1);
1968 }
1969 mutex_unlock(&inode->i_mutex);
1970 } while (ret > 0);
1971 splice_from_pipe_end(pipe, &sd);
1943 1972
1944 if (pipe->inode) 1973 if (pipe->inode)
1945 mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
1946 ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags);
1947 if (pipe->inode)
1948 mutex_unlock(&pipe->inode->i_mutex); 1974 mutex_unlock(&pipe->inode->i_mutex);
1949 1975
1950out_unlock: 1976 if (sd.num_spliced)
1951 ocfs2_rw_unlock(inode, 1); 1977 ret = sd.num_spliced;
1952out: 1978
1953 mutex_unlock(&inode->i_mutex); 1979 if (ret > 0) {
1980 unsigned long nr_pages;
1981
1982 *ppos += ret;
1983 nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1984
1985 /*
1986 * If file or inode is SYNC and we actually wrote some data,
1987 * sync it.
1988 */
1989 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
1990 int err;
1991
1992 mutex_lock(&inode->i_mutex);
1993 err = ocfs2_rw_lock(inode, 1);
1994 if (err < 0) {
1995 mlog_errno(err);
1996 } else {
1997 err = generic_osync_inode(inode, mapping,
1998 OSYNC_METADATA|OSYNC_DATA);
1999 ocfs2_rw_unlock(inode, 1);
2000 }
2001 mutex_unlock(&inode->i_mutex);
2002
2003 if (err)
2004 ret = err;
2005 }
2006 balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
2007 }
1954 2008
1955 mlog_exit(ret); 2009 mlog_exit(ret);
1956 return ret; 2010 return ret;
diff --git a/fs/pipe.c b/fs/pipe.c
index 4af7aa521813..13414ec45b8d 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -37,6 +37,42 @@
37 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09 37 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
38 */ 38 */
39 39
40static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
41{
42 if (pipe->inode)
43 mutex_lock_nested(&pipe->inode->i_mutex, subclass);
44}
45
46void pipe_lock(struct pipe_inode_info *pipe)
47{
48 /*
49 * pipe_lock() nests non-pipe inode locks (for writing to a file)
50 */
51 pipe_lock_nested(pipe, I_MUTEX_PARENT);
52}
53EXPORT_SYMBOL(pipe_lock);
54
55void pipe_unlock(struct pipe_inode_info *pipe)
56{
57 if (pipe->inode)
58 mutex_unlock(&pipe->inode->i_mutex);
59}
60EXPORT_SYMBOL(pipe_unlock);
61
62void pipe_double_lock(struct pipe_inode_info *pipe1,
63 struct pipe_inode_info *pipe2)
64{
65 BUG_ON(pipe1 == pipe2);
66
67 if (pipe1 < pipe2) {
68 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
69 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
70 } else {
71 pipe_lock_nested(pipe2, I_MUTEX_CHILD);
72 pipe_lock_nested(pipe1, I_MUTEX_PARENT);
73 }
74}
75
40/* Drop the inode semaphore and wait for a pipe event, atomically */ 76/* Drop the inode semaphore and wait for a pipe event, atomically */
41void pipe_wait(struct pipe_inode_info *pipe) 77void pipe_wait(struct pipe_inode_info *pipe)
42{ 78{
@@ -47,12 +83,10 @@ void pipe_wait(struct pipe_inode_info *pipe)
47 * is considered a noninteractive wait: 83 * is considered a noninteractive wait:
48 */ 84 */
49 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE); 85 prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
50 if (pipe->inode) 86 pipe_unlock(pipe);
51 mutex_unlock(&pipe->inode->i_mutex);
52 schedule(); 87 schedule();
53 finish_wait(&pipe->wait, &wait); 88 finish_wait(&pipe->wait, &wait);
54 if (pipe->inode) 89 pipe_lock(pipe);
55 mutex_lock(&pipe->inode->i_mutex);
56} 90}
57 91
58static int 92static int
diff --git a/fs/splice.c b/fs/splice.c
index c18aa7e03e2b..5384a90665d0 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -182,8 +182,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
182 do_wakeup = 0; 182 do_wakeup = 0;
183 page_nr = 0; 183 page_nr = 0;
184 184
185 if (pipe->inode) 185 pipe_lock(pipe);
186 mutex_lock(&pipe->inode->i_mutex);
187 186
188 for (;;) { 187 for (;;) {
189 if (!pipe->readers) { 188 if (!pipe->readers) {
@@ -245,15 +244,13 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
245 pipe->waiting_writers--; 244 pipe->waiting_writers--;
246 } 245 }
247 246
248 if (pipe->inode) { 247 pipe_unlock(pipe);
249 mutex_unlock(&pipe->inode->i_mutex);
250 248
251 if (do_wakeup) { 249 if (do_wakeup) {
252 smp_mb(); 250 smp_mb();
253 if (waitqueue_active(&pipe->wait)) 251 if (waitqueue_active(&pipe->wait))
254 wake_up_interruptible(&pipe->wait); 252 wake_up_interruptible(&pipe->wait);
255 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 253 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
256 }
257 } 254 }
258 255
259 while (page_nr < spd_pages) 256 while (page_nr < spd_pages)
@@ -555,8 +552,8 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
555 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create 552 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
556 * a new page in the output file page cache and fill/dirty that. 553 * a new page in the output file page cache and fill/dirty that.
557 */ 554 */
558static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 555int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
559 struct splice_desc *sd) 556 struct splice_desc *sd)
560{ 557{
561 struct file *file = sd->u.file; 558 struct file *file = sd->u.file;
562 struct address_space *mapping = file->f_mapping; 559 struct address_space *mapping = file->f_mapping;
@@ -600,108 +597,178 @@ static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
600out: 597out:
601 return ret; 598 return ret;
602} 599}
600EXPORT_SYMBOL(pipe_to_file);
601
602static void wakeup_pipe_writers(struct pipe_inode_info *pipe)
603{
604 smp_mb();
605 if (waitqueue_active(&pipe->wait))
606 wake_up_interruptible(&pipe->wait);
607 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
608}
603 609
604/** 610/**
605 * __splice_from_pipe - splice data from a pipe to given actor 611 * splice_from_pipe_feed - feed available data from a pipe to a file
606 * @pipe: pipe to splice from 612 * @pipe: pipe to splice from
607 * @sd: information to @actor 613 * @sd: information to @actor
608 * @actor: handler that splices the data 614 * @actor: handler that splices the data
609 * 615 *
610 * Description: 616 * Description:
611 * This function does little more than loop over the pipe and call 617
612 * @actor to do the actual moving of a single struct pipe_buffer to 618 * This function loops over the pipe and calls @actor to do the
613 * the desired destination. See pipe_to_file, pipe_to_sendpage, or 619 * actual moving of a single struct pipe_buffer to the desired
614 * pipe_to_user. 620 * destination. It returns when there's no more buffers left in
621 * the pipe or if the requested number of bytes (@sd->total_len)
622 * have been copied. It returns a positive number (one) if the
623 * pipe needs to be filled with more data, zero if the required
624 * number of bytes have been copied and -errno on error.
615 * 625 *
626 * This, together with splice_from_pipe_{begin,end,next}, may be
627 * used to implement the functionality of __splice_from_pipe() when
628 * locking is required around copying the pipe buffers to the
629 * destination.
616 */ 630 */
617ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, 631int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
618 splice_actor *actor) 632 splice_actor *actor)
619{ 633{
620 int ret, do_wakeup, err; 634 int ret;
621
622 ret = 0;
623 do_wakeup = 0;
624
625 for (;;) {
626 if (pipe->nrbufs) {
627 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
628 const struct pipe_buf_operations *ops = buf->ops;
629 635
630 sd->len = buf->len; 636 while (pipe->nrbufs) {
631 if (sd->len > sd->total_len) 637 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
632 sd->len = sd->total_len; 638 const struct pipe_buf_operations *ops = buf->ops;
633 639
634 err = actor(pipe, buf, sd); 640 sd->len = buf->len;
635 if (err <= 0) { 641 if (sd->len > sd->total_len)
636 if (!ret && err != -ENODATA) 642 sd->len = sd->total_len;
637 ret = err;
638 643
639 break; 644 ret = actor(pipe, buf, sd);
640 } 645 if (ret <= 0) {
646 if (ret == -ENODATA)
647 ret = 0;
648 return ret;
649 }
650 buf->offset += ret;
651 buf->len -= ret;
641 652
642 ret += err; 653 sd->num_spliced += ret;
643 buf->offset += err; 654 sd->len -= ret;
644 buf->len -= err; 655 sd->pos += ret;
656 sd->total_len -= ret;
645 657
646 sd->len -= err; 658 if (!buf->len) {
647 sd->pos += err; 659 buf->ops = NULL;
648 sd->total_len -= err; 660 ops->release(pipe, buf);
649 if (sd->len) 661 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
650 continue; 662 pipe->nrbufs--;
663 if (pipe->inode)
664 sd->need_wakeup = true;
665 }
651 666
652 if (!buf->len) { 667 if (!sd->total_len)
653 buf->ops = NULL; 668 return 0;
654 ops->release(pipe, buf); 669 }
655 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
656 pipe->nrbufs--;
657 if (pipe->inode)
658 do_wakeup = 1;
659 }
660 670
661 if (!sd->total_len) 671 return 1;
662 break; 672}
663 } 673EXPORT_SYMBOL(splice_from_pipe_feed);
664 674
665 if (pipe->nrbufs) 675/**
666 continue; 676 * splice_from_pipe_next - wait for some data to splice from
677 * @pipe: pipe to splice from
678 * @sd: information about the splice operation
679 *
680 * Description:
681 * This function will wait for some data and return a positive
682 * value (one) if pipe buffers are available. It will return zero
683 * or -errno if no more data needs to be spliced.
684 */
685int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
686{
687 while (!pipe->nrbufs) {
667 if (!pipe->writers) 688 if (!pipe->writers)
668 break; 689 return 0;
669 if (!pipe->waiting_writers) {
670 if (ret)
671 break;
672 }
673 690
674 if (sd->flags & SPLICE_F_NONBLOCK) { 691 if (!pipe->waiting_writers && sd->num_spliced)
675 if (!ret) 692 return 0;
676 ret = -EAGAIN;
677 break;
678 }
679 693
680 if (signal_pending(current)) { 694 if (sd->flags & SPLICE_F_NONBLOCK)
681 if (!ret) 695 return -EAGAIN;
682 ret = -ERESTARTSYS;
683 break;
684 }
685 696
686 if (do_wakeup) { 697 if (signal_pending(current))
687 smp_mb(); 698 return -ERESTARTSYS;
688 if (waitqueue_active(&pipe->wait)) 699
689 wake_up_interruptible_sync(&pipe->wait); 700 if (sd->need_wakeup) {
690 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); 701 wakeup_pipe_writers(pipe);
691 do_wakeup = 0; 702 sd->need_wakeup = false;
692 } 703 }
693 704
694 pipe_wait(pipe); 705 pipe_wait(pipe);
695 } 706 }
696 707
697 if (do_wakeup) { 708 return 1;
698 smp_mb(); 709}
699 if (waitqueue_active(&pipe->wait)) 710EXPORT_SYMBOL(splice_from_pipe_next);
700 wake_up_interruptible(&pipe->wait);
701 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
702 }
703 711
704 return ret; 712/**
713 * splice_from_pipe_begin - start splicing from pipe
714 * @pipe: pipe to splice from
715 *
716 * Description:
717 * This function should be called before a loop containing
718 * splice_from_pipe_next() and splice_from_pipe_feed() to
719 * initialize the necessary fields of @sd.
720 */
721void splice_from_pipe_begin(struct splice_desc *sd)
722{
723 sd->num_spliced = 0;
724 sd->need_wakeup = false;
725}
726EXPORT_SYMBOL(splice_from_pipe_begin);
727
728/**
729 * splice_from_pipe_end - finish splicing from pipe
730 * @pipe: pipe to splice from
731 * @sd: information about the splice operation
732 *
733 * Description:
734 * This function will wake up pipe writers if necessary. It should
735 * be called after a loop containing splice_from_pipe_next() and
736 * splice_from_pipe_feed().
737 */
738void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd)
739{
740 if (sd->need_wakeup)
741 wakeup_pipe_writers(pipe);
742}
743EXPORT_SYMBOL(splice_from_pipe_end);
744
745/**
746 * __splice_from_pipe - splice data from a pipe to given actor
747 * @pipe: pipe to splice from
748 * @sd: information to @actor
749 * @actor: handler that splices the data
750 *
751 * Description:
752 * This function does little more than loop over the pipe and call
753 * @actor to do the actual moving of a single struct pipe_buffer to
754 * the desired destination. See pipe_to_file, pipe_to_sendpage, or
755 * pipe_to_user.
756 *
757 */
758ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
759 splice_actor *actor)
760{
761 int ret;
762
763 splice_from_pipe_begin(sd);
764 do {
765 ret = splice_from_pipe_next(pipe, sd);
766 if (ret > 0)
767 ret = splice_from_pipe_feed(pipe, sd, actor);
768 } while (ret > 0);
769 splice_from_pipe_end(pipe, sd);
770
771 return sd->num_spliced ? sd->num_spliced : ret;
705} 772}
706EXPORT_SYMBOL(__splice_from_pipe); 773EXPORT_SYMBOL(__splice_from_pipe);
707 774
@@ -715,7 +782,7 @@ EXPORT_SYMBOL(__splice_from_pipe);
715 * @actor: handler that splices the data 782 * @actor: handler that splices the data
716 * 783 *
717 * Description: 784 * Description:
718 * See __splice_from_pipe. This function locks the input and output inodes, 785 * See __splice_from_pipe. This function locks the pipe inode,
719 * otherwise it's identical to __splice_from_pipe(). 786 * otherwise it's identical to __splice_from_pipe().
720 * 787 *
721 */ 788 */
@@ -724,7 +791,6 @@ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
724 splice_actor *actor) 791 splice_actor *actor)
725{ 792{
726 ssize_t ret; 793 ssize_t ret;
727 struct inode *inode = out->f_mapping->host;
728 struct splice_desc sd = { 794 struct splice_desc sd = {
729 .total_len = len, 795 .total_len = len,
730 .flags = flags, 796 .flags = flags,
@@ -732,30 +798,15 @@ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
732 .u.file = out, 798 .u.file = out,
733 }; 799 };
734 800
735 /* 801 pipe_lock(pipe);
736 * The actor worker might be calling ->write_begin and
737 * ->write_end. Most of the time, these expect i_mutex to
738 * be held. Since this may result in an ABBA deadlock with
739 * pipe->inode, we have to order lock acquiry here.
740 *
741 * Outer lock must be inode->i_mutex, as pipe_wait() will
742 * release and reacquire pipe->inode->i_mutex, AND inode must
743 * never be a pipe.
744 */
745 WARN_ON(S_ISFIFO(inode->i_mode));
746 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
747 if (pipe->inode)
748 mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
749 ret = __splice_from_pipe(pipe, &sd, actor); 802 ret = __splice_from_pipe(pipe, &sd, actor);
750 if (pipe->inode) 803 pipe_unlock(pipe);
751 mutex_unlock(&pipe->inode->i_mutex);
752 mutex_unlock(&inode->i_mutex);
753 804
754 return ret; 805 return ret;
755} 806}
756 807
757/** 808/**
758 * generic_file_splice_write_nolock - generic_file_splice_write without mutexes 809 * generic_file_splice_write - splice data from a pipe to a file
759 * @pipe: pipe info 810 * @pipe: pipe info
760 * @out: file to write to 811 * @out: file to write to
761 * @ppos: position in @out 812 * @ppos: position in @out
@@ -764,13 +815,12 @@ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
764 * 815 *
765 * Description: 816 * Description:
766 * Will either move or copy pages (determined by @flags options) from 817 * Will either move or copy pages (determined by @flags options) from
767 * the given pipe inode to the given file. The caller is responsible 818 * the given pipe inode to the given file.
768 * for acquiring i_mutex on both inodes.
769 * 819 *
770 */ 820 */
771ssize_t 821ssize_t
772generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out, 822generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
773 loff_t *ppos, size_t len, unsigned int flags) 823 loff_t *ppos, size_t len, unsigned int flags)
774{ 824{
775 struct address_space *mapping = out->f_mapping; 825 struct address_space *mapping = out->f_mapping;
776 struct inode *inode = mapping->host; 826 struct inode *inode = mapping->host;
@@ -781,76 +831,28 @@ generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out,
781 .u.file = out, 831 .u.file = out,
782 }; 832 };
783 ssize_t ret; 833 ssize_t ret;
784 int err;
785
786 err = file_remove_suid(out);
787 if (unlikely(err))
788 return err;
789
790 ret = __splice_from_pipe(pipe, &sd, pipe_to_file);
791 if (ret > 0) {
792 unsigned long nr_pages;
793 834
794 *ppos += ret; 835 pipe_lock(pipe);
795 nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
796 836
797 /* 837 splice_from_pipe_begin(&sd);
798 * If file or inode is SYNC and we actually wrote some data, 838 do {
799 * sync it. 839 ret = splice_from_pipe_next(pipe, &sd);
800 */ 840 if (ret <= 0)
801 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) { 841 break;
802 err = generic_osync_inode(inode, mapping,
803 OSYNC_METADATA|OSYNC_DATA);
804
805 if (err)
806 ret = err;
807 }
808 balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
809 }
810 842
811 return ret; 843 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
812} 844 ret = file_remove_suid(out);
845 if (!ret)
846 ret = splice_from_pipe_feed(pipe, &sd, pipe_to_file);
847 mutex_unlock(&inode->i_mutex);
848 } while (ret > 0);
849 splice_from_pipe_end(pipe, &sd);
813 850
814EXPORT_SYMBOL(generic_file_splice_write_nolock); 851 pipe_unlock(pipe);
815 852
816/** 853 if (sd.num_spliced)
817 * generic_file_splice_write - splice data from a pipe to a file 854 ret = sd.num_spliced;
818 * @pipe: pipe info
819 * @out: file to write to
820 * @ppos: position in @out
821 * @len: number of bytes to splice
822 * @flags: splice modifier flags
823 *
824 * Description:
825 * Will either move or copy pages (determined by @flags options) from
826 * the given pipe inode to the given file.
827 *
828 */
829ssize_t
830generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
831 loff_t *ppos, size_t len, unsigned int flags)
832{
833 struct address_space *mapping = out->f_mapping;
834 struct inode *inode = mapping->host;
835 struct splice_desc sd = {
836 .total_len = len,
837 .flags = flags,
838 .pos = *ppos,
839 .u.file = out,
840 };
841 ssize_t ret;
842 855
843 WARN_ON(S_ISFIFO(inode->i_mode));
844 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
845 ret = file_remove_suid(out);
846 if (likely(!ret)) {
847 if (pipe->inode)
848 mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
849 ret = __splice_from_pipe(pipe, &sd, pipe_to_file);
850 if (pipe->inode)
851 mutex_unlock(&pipe->inode->i_mutex);
852 }
853 mutex_unlock(&inode->i_mutex);
854 if (ret > 0) { 856 if (ret > 0) {
855 unsigned long nr_pages; 857 unsigned long nr_pages;
856 858
@@ -1339,8 +1341,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
1339 if (!pipe) 1341 if (!pipe)
1340 return -EBADF; 1342 return -EBADF;
1341 1343
1342 if (pipe->inode) 1344 pipe_lock(pipe);
1343 mutex_lock(&pipe->inode->i_mutex);
1344 1345
1345 error = ret = 0; 1346 error = ret = 0;
1346 while (nr_segs) { 1347 while (nr_segs) {
@@ -1395,8 +1396,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
1395 iov++; 1396 iov++;
1396 } 1397 }
1397 1398
1398 if (pipe->inode) 1399 pipe_unlock(pipe);
1399 mutex_unlock(&pipe->inode->i_mutex);
1400 1400
1401 if (!ret) 1401 if (!ret)
1402 ret = error; 1402 ret = error;
@@ -1524,7 +1524,7 @@ static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1524 return 0; 1524 return 0;
1525 1525
1526 ret = 0; 1526 ret = 0;
1527 mutex_lock(&pipe->inode->i_mutex); 1527 pipe_lock(pipe);
1528 1528
1529 while (!pipe->nrbufs) { 1529 while (!pipe->nrbufs) {
1530 if (signal_pending(current)) { 1530 if (signal_pending(current)) {
@@ -1542,7 +1542,7 @@ static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1542 pipe_wait(pipe); 1542 pipe_wait(pipe);
1543 } 1543 }
1544 1544
1545 mutex_unlock(&pipe->inode->i_mutex); 1545 pipe_unlock(pipe);
1546 return ret; 1546 return ret;
1547} 1547}
1548 1548
@@ -1562,7 +1562,7 @@ static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1562 return 0; 1562 return 0;
1563 1563
1564 ret = 0; 1564 ret = 0;
1565 mutex_lock(&pipe->inode->i_mutex); 1565 pipe_lock(pipe);
1566 1566
1567 while (pipe->nrbufs >= PIPE_BUFFERS) { 1567 while (pipe->nrbufs >= PIPE_BUFFERS) {
1568 if (!pipe->readers) { 1568 if (!pipe->readers) {
@@ -1583,7 +1583,7 @@ static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1583 pipe->waiting_writers--; 1583 pipe->waiting_writers--;
1584 } 1584 }
1585 1585
1586 mutex_unlock(&pipe->inode->i_mutex); 1586 pipe_unlock(pipe);
1587 return ret; 1587 return ret;
1588} 1588}
1589 1589
@@ -1599,10 +1599,10 @@ static int link_pipe(struct pipe_inode_info *ipipe,
1599 1599
1600 /* 1600 /*
1601 * Potential ABBA deadlock, work around it by ordering lock 1601 * Potential ABBA deadlock, work around it by ordering lock
1602 * grabbing by inode address. Otherwise two different processes 1602 * grabbing by pipe info address. Otherwise two different processes
1603 * could deadlock (one doing tee from A -> B, the other from B -> A). 1603 * could deadlock (one doing tee from A -> B, the other from B -> A).
1604 */ 1604 */
1605 inode_double_lock(ipipe->inode, opipe->inode); 1605 pipe_double_lock(ipipe, opipe);
1606 1606
1607 do { 1607 do {
1608 if (!opipe->readers) { 1608 if (!opipe->readers) {
@@ -1653,7 +1653,8 @@ static int link_pipe(struct pipe_inode_info *ipipe,
1653 if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) 1653 if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
1654 ret = -EAGAIN; 1654 ret = -EAGAIN;
1655 1655
1656 inode_double_unlock(ipipe->inode, opipe->inode); 1656 pipe_unlock(ipipe);
1657 pipe_unlock(opipe);
1657 1658
1658 /* 1659 /*
1659 * If we put data in the output pipe, wakeup any potential readers. 1660 * If we put data in the output pipe, wakeup any potential readers.
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 37b82cb96c89..e727fe0d1451 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -88,7 +88,7 @@ extern void warn_slowpath(const char *file, const int line,
88 88
89#else /* !CONFIG_BUG */ 89#else /* !CONFIG_BUG */
90#ifndef HAVE_ARCH_BUG 90#ifndef HAVE_ARCH_BUG
91#define BUG() 91#define BUG() do {} while(0)
92#endif 92#endif
93 93
94#ifndef HAVE_ARCH_BUG_ON 94#ifndef HAVE_ARCH_BUG_ON
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 67e3353a56d6..95962fa8398a 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -594,6 +594,9 @@ struct drm_i915_gem_busy {
594#define I915_BIT_6_SWIZZLE_9_10_11 4 594#define I915_BIT_6_SWIZZLE_9_10_11 4
595/* Not seen by userland */ 595/* Not seen by userland */
596#define I915_BIT_6_SWIZZLE_UNKNOWN 5 596#define I915_BIT_6_SWIZZLE_UNKNOWN 5
597/* Seen by userland. */
598#define I915_BIT_6_SWIZZLE_9_17 6
599#define I915_BIT_6_SWIZZLE_9_10_17 7
597 600
598struct drm_i915_gem_set_tiling { 601struct drm_i915_gem_set_tiling {
599 /** Handle of the buffer to have its tiling state updated */ 602 /** Handle of the buffer to have its tiling state updated */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b900d2c67d29..b89cf2d82898 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -504,6 +504,115 @@ static inline int bio_has_data(struct bio *bio)
504 return bio && bio->bi_io_vec != NULL; 504 return bio && bio->bi_io_vec != NULL;
505} 505}
506 506
507/*
508 * BIO list managment for use by remapping drivers (e.g. DM or MD).
509 *
510 * A bio_list anchors a singly-linked list of bios chained through the bi_next
511 * member of the bio. The bio_list also caches the last list member to allow
512 * fast access to the tail.
513 */
514struct bio_list {
515 struct bio *head;
516 struct bio *tail;
517};
518
519static inline int bio_list_empty(const struct bio_list *bl)
520{
521 return bl->head == NULL;
522}
523
524static inline void bio_list_init(struct bio_list *bl)
525{
526 bl->head = bl->tail = NULL;
527}
528
529#define bio_list_for_each(bio, bl) \
530 for (bio = (bl)->head; bio; bio = bio->bi_next)
531
532static inline unsigned bio_list_size(const struct bio_list *bl)
533{
534 unsigned sz = 0;
535 struct bio *bio;
536
537 bio_list_for_each(bio, bl)
538 sz++;
539
540 return sz;
541}
542
543static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
544{
545 bio->bi_next = NULL;
546
547 if (bl->tail)
548 bl->tail->bi_next = bio;
549 else
550 bl->head = bio;
551
552 bl->tail = bio;
553}
554
555static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
556{
557 bio->bi_next = bl->head;
558
559 bl->head = bio;
560
561 if (!bl->tail)
562 bl->tail = bio;
563}
564
565static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
566{
567 if (!bl2->head)
568 return;
569
570 if (bl->tail)
571 bl->tail->bi_next = bl2->head;
572 else
573 bl->head = bl2->head;
574
575 bl->tail = bl2->tail;
576}
577
578static inline void bio_list_merge_head(struct bio_list *bl,
579 struct bio_list *bl2)
580{
581 if (!bl2->head)
582 return;
583
584 if (bl->head)
585 bl2->tail->bi_next = bl->head;
586 else
587 bl->tail = bl2->tail;
588
589 bl->head = bl2->head;
590}
591
592static inline struct bio *bio_list_pop(struct bio_list *bl)
593{
594 struct bio *bio = bl->head;
595
596 if (bio) {
597 bl->head = bl->head->bi_next;
598 if (!bl->head)
599 bl->tail = NULL;
600
601 bio->bi_next = NULL;
602 }
603
604 return bio;
605}
606
607static inline struct bio *bio_list_get(struct bio_list *bl)
608{
609 struct bio *bio = bl->head;
610
611 bl->head = bl->tail = NULL;
612
613 return bio;
614}
615
507#if defined(CONFIG_BLK_DEV_INTEGRITY) 616#if defined(CONFIG_BLK_DEV_INTEGRITY)
508 617
509#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) 618#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 7b73bb8f1970..16ed0284d780 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -155,6 +155,7 @@ void create_empty_buffers(struct page *, unsigned long,
155 unsigned long b_state); 155 unsigned long b_state);
156void end_buffer_read_sync(struct buffer_head *bh, int uptodate); 156void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
157void end_buffer_write_sync(struct buffer_head *bh, int uptodate); 157void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
158void end_buffer_async_write(struct buffer_head *bh, int uptodate);
158 159
159/* Things to do with buffers at mapping->private_list */ 160/* Things to do with buffers at mapping->private_list */
160void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); 161void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
@@ -197,6 +198,8 @@ extern int buffer_heads_over_limit;
197void block_invalidatepage(struct page *page, unsigned long offset); 198void block_invalidatepage(struct page *page, unsigned long offset);
198int block_write_full_page(struct page *page, get_block_t *get_block, 199int block_write_full_page(struct page *page, get_block_t *get_block,
199 struct writeback_control *wbc); 200 struct writeback_control *wbc);
201int block_write_full_page_endio(struct page *page, get_block_t *get_block,
202 struct writeback_control *wbc, bh_end_io_t *handler);
200int block_read_full_page(struct page*, get_block_t*); 203int block_read_full_page(struct page*, get_block_t*);
201int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, 204int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
202 unsigned long from); 205 unsigned long from);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 562d2855cf30..e766be0d4329 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -87,6 +87,60 @@ struct inodes_stat_t {
87 */ 87 */
88#define FMODE_NOCMTIME ((__force fmode_t)2048) 88#define FMODE_NOCMTIME ((__force fmode_t)2048)
89 89
90/*
91 * The below are the various read and write types that we support. Some of
92 * them include behavioral modifiers that send information down to the
93 * block layer and IO scheduler. Terminology:
94 *
95 * The block layer uses device plugging to defer IO a little bit, in
96 * the hope that we will see more IO very shortly. This increases
97 * coalescing of adjacent IO and thus reduces the number of IOs we
98 * have to send to the device. It also allows for better queuing,
99 * if the IO isn't mergeable. If the caller is going to be waiting
100 * for the IO, then he must ensure that the device is unplugged so
101 * that the IO is dispatched to the driver.
102 *
103 * All IO is handled async in Linux. This is fine for background
104 * writes, but for reads or writes that someone waits for completion
105 * on, we want to notify the block layer and IO scheduler so that they
106 * know about it. That allows them to make better scheduling
107 * decisions. So when the below references 'sync' and 'async', it
108 * is referencing this priority hint.
109 *
110 * With that in mind, the available types are:
111 *
112 * READ A normal read operation. Device will be plugged.
113 * READ_SYNC A synchronous read. Device is not plugged, caller can
114 * immediately wait on this read without caring about
115 * unplugging.
116 * READA Used for read-ahead operations. Lower priority, and the
117 * block layer could (in theory) choose to ignore this
118 * request if it runs into resource problems.
119 * WRITE A normal async write. Device will be plugged.
120 * SWRITE Like WRITE, but a special case for ll_rw_block() that
121 * tells it to lock the buffer first. Normally a buffer
122 * must be locked before doing IO.
123 * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down
124 * the hint that someone will be waiting on this IO
125 * shortly. The device must still be unplugged explicitly,
126 * WRITE_SYNC_PLUG does not do this as we could be
127 * submitting more writes before we actually wait on any
128 * of them.
129 * WRITE_SYNC Like WRITE_SYNC_PLUG, but also unplugs the device
130 * immediately after submission. The write equivalent
131 * of READ_SYNC.
132 * WRITE_ODIRECT Special case write for O_DIRECT only.
133 * SWRITE_SYNC
134 * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer.
135 * See SWRITE.
136 * WRITE_BARRIER Like WRITE, but tells the block layer that all
137 * previously submitted writes must be safely on storage
138 * before this one is started. Also guarantees that when
139 * this write is complete, it itself is also safely on
140 * storage. Prevents reordering of writes on both sides
141 * of this IO.
142 *
143 */
90#define RW_MASK 1 144#define RW_MASK 1
91#define RWA_MASK 2 145#define RWA_MASK 2
92#define READ 0 146#define READ 0
@@ -102,6 +156,11 @@ struct inodes_stat_t {
102 (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) 156 (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
103#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) 157#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
104#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) 158#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER))
159
160/*
161 * These aren't really reads or writes, they pass down information about
162 * parts of device that are now unused by the file system.
163 */
105#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) 164#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
106#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) 165#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
107 166
@@ -738,9 +797,6 @@ enum inode_i_mutex_lock_class
738 I_MUTEX_QUOTA 797 I_MUTEX_QUOTA
739}; 798};
740 799
741extern void inode_double_lock(struct inode *inode1, struct inode *inode2);
742extern void inode_double_unlock(struct inode *inode1, struct inode *inode2);
743
744/* 800/*
745 * NOTE: in a 32bit arch with a preemptable kernel and 801 * NOTE: in a 32bit arch with a preemptable kernel and
746 * an UP compile the i_size_read/write must be atomic 802 * an UP compile the i_size_read/write must be atomic
@@ -2150,8 +2206,6 @@ extern ssize_t generic_file_splice_read(struct file *, loff_t *,
2150 struct pipe_inode_info *, size_t, unsigned int); 2206 struct pipe_inode_info *, size_t, unsigned int);
2151extern ssize_t generic_file_splice_write(struct pipe_inode_info *, 2207extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
2152 struct file *, loff_t *, size_t, unsigned int); 2208 struct file *, loff_t *, size_t, unsigned int);
2153extern ssize_t generic_file_splice_write_nolock(struct pipe_inode_info *,
2154 struct file *, loff_t *, size_t, unsigned int);
2155extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, 2209extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
2156 struct file *out, loff_t *, size_t len, unsigned int flags); 2210 struct file *out, loff_t *, size_t len, unsigned int flags);
2157extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, 2211extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index f2a78b5e8b55..43fc95d822d5 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -43,10 +43,6 @@
43 * 43 *
44 */ 44 */
45 45
46/* Flags related to I2C device features */
47#define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001
48#define FSL_I2C_DEV_CLOCK_5200 0x00000002
49
50enum fsl_usb2_operating_modes { 46enum fsl_usb2_operating_modes {
51 FSL_USB2_MPH_HOST, 47 FSL_USB2_MPH_HOST,
52 FSL_USB2_DR_HOST, 48 FSL_USB2_DR_HOST,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index b450a2628855..3d501db36a26 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -209,6 +209,7 @@ enum {
209 209
210 /* bits 24:31 of ap->flags are reserved for LLD specific flags */ 210 /* bits 24:31 of ap->flags are reserved for LLD specific flags */
211 211
212
212 /* struct ata_port pflags */ 213 /* struct ata_port pflags */
213 ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */ 214 ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */
214 ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */ 215 ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */
@@ -225,6 +226,9 @@ enum {
225 ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ 226 ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
226 ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */ 227 ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
227 228
229 ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */
230 ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */
231
228 /* struct ata_queued_cmd flags */ 232 /* struct ata_queued_cmd flags */
229 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ 233 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
230 ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */ 234 ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */
@@ -689,7 +693,10 @@ struct ata_port {
689 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ 693 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
690 struct ata_port_operations *ops; 694 struct ata_port_operations *ops;
691 spinlock_t *lock; 695 spinlock_t *lock;
696 /* Flags owned by the EH context. Only EH should touch these once the
697 port is active */
692 unsigned long flags; /* ATA_FLAG_xxx */ 698 unsigned long flags; /* ATA_FLAG_xxx */
699 /* Flags that change dynamically, protected by ap->lock */
693 unsigned int pflags; /* ATA_PFLAG_xxx */ 700 unsigned int pflags; /* ATA_PFLAG_xxx */
694 unsigned int print_id; /* user visible unique port ID */ 701 unsigned int print_id; /* user visible unique port ID */
695 unsigned int port_no; /* 0 based port no. inside the host */ 702 unsigned int port_no; /* 0 based port no. inside the host */
@@ -1595,6 +1602,7 @@ extern void ata_sff_drain_fifo(struct ata_queued_cmd *qc);
1595extern void ata_sff_error_handler(struct ata_port *ap); 1602extern void ata_sff_error_handler(struct ata_port *ap);
1596extern void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc); 1603extern void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc);
1597extern int ata_sff_port_start(struct ata_port *ap); 1604extern int ata_sff_port_start(struct ata_port *ap);
1605extern int ata_sff_port_start32(struct ata_port *ap);
1598extern void ata_sff_std_ports(struct ata_ioports *ioaddr); 1606extern void ata_sff_std_ports(struct ata_ioports *ioaddr);
1599extern unsigned long ata_bmdma_mode_filter(struct ata_device *dev, 1607extern unsigned long ata_bmdma_mode_filter(struct ata_device *dev,
1600 unsigned long xfer_mask); 1608 unsigned long xfer_mask);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 32cf14a4b034..97e40cb6b588 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -388,6 +388,12 @@ struct phy_driver {
388 /* Enables or disables interrupts */ 388 /* Enables or disables interrupts */
389 int (*config_intr)(struct phy_device *phydev); 389 int (*config_intr)(struct phy_device *phydev);
390 390
391 /*
392 * Checks if the PHY generated an interrupt.
393 * For multi-PHY devices with shared PHY interrupt pin
394 */
395 int (*did_interrupt)(struct phy_device *phydev);
396
391 /* Clears up any memory if needed */ 397 /* Clears up any memory if needed */
392 void (*remove)(struct phy_device *phydev); 398 void (*remove)(struct phy_device *phydev);
393 399
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 8e4120285f72..c8f038554e80 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -134,6 +134,11 @@ struct pipe_buf_operations {
134 memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ 134 memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
135#define PIPE_SIZE PAGE_SIZE 135#define PIPE_SIZE PAGE_SIZE
136 136
137/* Pipe lock and unlock operations */
138void pipe_lock(struct pipe_inode_info *);
139void pipe_unlock(struct pipe_inode_info *);
140void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
141
137/* Drop the inode semaphore and wait for a pipe event, atomically */ 142/* Drop the inode semaphore and wait for a pipe event, atomically */
138void pipe_wait(struct pipe_inode_info *pipe); 143void pipe_wait(struct pipe_inode_info *pipe);
139 144
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 528dcb93c2f2..5f3faa9d15ae 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -36,6 +36,8 @@ struct splice_desc {
36 void *data; /* cookie */ 36 void *data; /* cookie */
37 } u; 37 } u;
38 loff_t pos; /* file position */ 38 loff_t pos; /* file position */
39 size_t num_spliced; /* number of bytes already spliced */
40 bool need_wakeup; /* need to wake up writer */
39}; 41};
40 42
41struct partial_page { 43struct partial_page {
@@ -66,6 +68,16 @@ extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
66 splice_actor *); 68 splice_actor *);
67extern ssize_t __splice_from_pipe(struct pipe_inode_info *, 69extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
68 struct splice_desc *, splice_actor *); 70 struct splice_desc *, splice_actor *);
71extern int splice_from_pipe_feed(struct pipe_inode_info *, struct splice_desc *,
72 splice_actor *);
73extern int splice_from_pipe_next(struct pipe_inode_info *,
74 struct splice_desc *);
75extern void splice_from_pipe_begin(struct splice_desc *);
76extern void splice_from_pipe_end(struct pipe_inode_info *,
77 struct splice_desc *);
78extern int pipe_to_file(struct pipe_inode_info *, struct pipe_buffer *,
79 struct splice_desc *);
80
69extern ssize_t splice_to_pipe(struct pipe_inode_info *, 81extern ssize_t splice_to_pipe(struct pipe_inode_info *,
70 struct splice_pipe_desc *); 82 struct splice_pipe_desc *);
71extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, 83extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index b95842542590..625e9e4639c6 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -29,7 +29,7 @@
29/** 29/**
30 * usb_serial_port: structure for the specific ports of a device. 30 * usb_serial_port: structure for the specific ports of a device.
31 * @serial: pointer back to the struct usb_serial owner of this port. 31 * @serial: pointer back to the struct usb_serial owner of this port.
32 * @tty: pointer to the corresponding tty for this port. 32 * @port: pointer to the corresponding tty_port for this port.
33 * @lock: spinlock to grab when updating portions of this structure. 33 * @lock: spinlock to grab when updating portions of this structure.
34 * @mutex: mutex used to synchronize serial_open() and serial_close() 34 * @mutex: mutex used to synchronize serial_open() and serial_close()
35 * access for this port. 35 * access for this port.
@@ -44,19 +44,22 @@
44 * @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe 44 * @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe
45 * for this port. 45 * for this port.
46 * @bulk_in_buffer: pointer to the bulk in buffer for this port. 46 * @bulk_in_buffer: pointer to the bulk in buffer for this port.
47 * @bulk_in_size: the size of the bulk_in_buffer, in bytes.
47 * @read_urb: pointer to the bulk in struct urb for this port. 48 * @read_urb: pointer to the bulk in struct urb for this port.
48 * @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this 49 * @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this
49 * port. 50 * port.
50 * @bulk_out_buffer: pointer to the bulk out buffer for this port. 51 * @bulk_out_buffer: pointer to the bulk out buffer for this port.
51 * @bulk_out_size: the size of the bulk_out_buffer, in bytes. 52 * @bulk_out_size: the size of the bulk_out_buffer, in bytes.
52 * @write_urb: pointer to the bulk out struct urb for this port. 53 * @write_urb: pointer to the bulk out struct urb for this port.
54 * @write_urb_busy: port`s writing status
53 * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this 55 * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this
54 * port. 56 * port.
55 * @write_wait: a wait_queue_head_t used by the port. 57 * @write_wait: a wait_queue_head_t used by the port.
56 * @work: work queue entry for the line discipline waking up. 58 * @work: work queue entry for the line discipline waking up.
57 * @open_count: number of times this port has been opened.
58 * @throttled: nonzero if the read urb is inactive to throttle the device 59 * @throttled: nonzero if the read urb is inactive to throttle the device
59 * @throttle_req: nonzero if the tty wants to throttle us 60 * @throttle_req: nonzero if the tty wants to throttle us
61 * @console: attached usb serial console
62 * @dev: pointer to the serial device
60 * 63 *
61 * This structure is used by the usb-serial core and drivers for the specific 64 * This structure is used by the usb-serial core and drivers for the specific
62 * ports of a device. 65 * ports of a device.
diff --git a/include/net/udp.h b/include/net/udp.h
index 93dbe294d459..90e6ce56be65 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -124,8 +124,6 @@ static inline void udp_lib_close(struct sock *sk, long timeout)
124 sk_common_release(sk); 124 sk_common_release(sk);
125} 125}
126 126
127extern int ipv4_rcv_saddr_equal(const struct sock *sk1,
128 const struct sock *sk2);
129extern int udp_lib_get_port(struct sock *sk, unsigned short snum, 127extern int udp_lib_get_port(struct sock *sk, unsigned short snum,
130 int (*)(const struct sock*,const struct sock*)); 128 int (*)(const struct sock*,const struct sock*));
131 129
diff --git a/include/sound/jack.h b/include/sound/jack.h
index 6b013c6f6a04..f236e426a706 100644
--- a/include/sound/jack.h
+++ b/include/sound/jack.h
@@ -50,6 +50,8 @@ struct snd_jack {
50 int type; 50 int type;
51 const char *id; 51 const char *id;
52 char name[100]; 52 char name[100];
53 void *private_data;
54 void (*private_free)(struct snd_jack *);
53}; 55};
54 56
55#ifdef CONFIG_SND_JACK 57#ifdef CONFIG_SND_JACK
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 8904b1900d7f..c17296891617 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -268,7 +268,8 @@ struct snd_pcm_runtime {
268 int overrange; 268 int overrange;
269 snd_pcm_uframes_t avail_max; 269 snd_pcm_uframes_t avail_max;
270 snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */ 270 snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */
271 snd_pcm_uframes_t hw_ptr_interrupt; /* Position at interrupt time*/ 271 snd_pcm_uframes_t hw_ptr_interrupt; /* Position at interrupt time */
272 unsigned long hw_ptr_jiffies; /* Time when hw_ptr is updated */
272 273
273 /* -- HW params -- */ 274 /* -- HW params -- */
274 snd_pcm_access_t access; /* access mode */ 275 snd_pcm_access_t access; /* access mode */
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 505f319e489c..8ba052c86d48 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -64,8 +64,6 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
64 struct bio *bio; 64 struct bio *bio;
65 65
66 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); 66 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
67 if (!bio)
68 return -ENOMEM;
69 bio->bi_sector = page_off * (PAGE_SIZE >> 9); 67 bio->bi_sector = page_off * (PAGE_SIZE >> 9);
70 bio->bi_bdev = resume_bdev; 68 bio->bi_bdev = resume_bdev;
71 bio->bi_end_io = end_swap_bio_read; 69 bio->bi_end_io = end_swap_bio_read;
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 2c7b8457d0d2..a967c9feb90a 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -58,6 +58,10 @@ static DEFINE_MUTEX(rcu_barrier_mutex);
58static struct completion rcu_barrier_completion; 58static struct completion rcu_barrier_completion;
59int rcu_scheduler_active __read_mostly; 59int rcu_scheduler_active __read_mostly;
60 60
61static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
62static struct rcu_head rcu_migrate_head[3];
63static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
64
61/* 65/*
62 * Awaken the corresponding synchronize_rcu() instance now that a 66 * Awaken the corresponding synchronize_rcu() instance now that a
63 * grace period has elapsed. 67 * grace period has elapsed.
@@ -122,7 +126,10 @@ static void rcu_barrier_func(void *type)
122 } 126 }
123} 127}
124 128
125static inline void wait_migrated_callbacks(void); 129static inline void wait_migrated_callbacks(void)
130{
131 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
132}
126 133
127/* 134/*
128 * Orchestrate the specified type of RCU barrier, waiting for all 135 * Orchestrate the specified type of RCU barrier, waiting for all
@@ -179,21 +186,12 @@ void rcu_barrier_sched(void)
179} 186}
180EXPORT_SYMBOL_GPL(rcu_barrier_sched); 187EXPORT_SYMBOL_GPL(rcu_barrier_sched);
181 188
182static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
183static struct rcu_head rcu_migrate_head[3];
184static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
185
186static void rcu_migrate_callback(struct rcu_head *notused) 189static void rcu_migrate_callback(struct rcu_head *notused)
187{ 190{
188 if (atomic_dec_and_test(&rcu_migrate_type_count)) 191 if (atomic_dec_and_test(&rcu_migrate_type_count))
189 wake_up(&rcu_migrate_wq); 192 wake_up(&rcu_migrate_wq);
190} 193}
191 194
192static inline void wait_migrated_callbacks(void)
193{
194 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
195}
196
197static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, 195static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
198 unsigned long action, void *hcpu) 196 unsigned long action, void *hcpu)
199{ 197{
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 97a777ad4f59..dafeecf5b143 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -258,7 +258,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
258 goto exit; 258 goto exit;
259 259
260 retval = call_usermodehelper(argv[0], argv, 260 retval = call_usermodehelper(argv[0], argv,
261 env->envp, UMH_NO_WAIT); 261 env->envp, UMH_WAIT_EXEC);
262 } 262 }
263 263
264exit: 264exit:
diff --git a/mm/filemap.c b/mm/filemap.c
index 8bd498040f32..379ff0bcbf6e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -441,6 +441,7 @@ int filemap_write_and_wait_range(struct address_space *mapping,
441 } 441 }
442 return err; 442 return err;
443} 443}
444EXPORT_SYMBOL(filemap_write_and_wait_range);
444 445
445/** 446/**
446 * add_to_page_cache_locked - add a locked page to the pagecache 447 * add_to_page_cache_locked - add a locked page to the pagecache
diff --git a/mm/mmap.c b/mm/mmap.c
index 4a3841186c11..3303d1ba8e87 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1575,7 +1575,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
1575 * Overcommit.. This must be the final test, as it will 1575 * Overcommit.. This must be the final test, as it will
1576 * update security statistics. 1576 * update security statistics.
1577 */ 1577 */
1578 if (security_vm_enough_memory(grow)) 1578 if (security_vm_enough_memory_mm(mm, grow))
1579 return -ENOMEM; 1579 return -ENOMEM;
1580 1580
1581 /* Ok, everything looks good - let it rip */ 1581 /* Ok, everything looks good - let it rip */
diff --git a/net/802/tr.c b/net/802/tr.c
index e7eb13084d71..e874447ad144 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -561,6 +561,9 @@ static int rif_seq_show(struct seq_file *seq, void *v)
561 } 561 }
562 seq_putc(seq, '\n'); 562 seq_putc(seq, '\n');
563 } 563 }
564
565 if (dev)
566 dev_put(dev);
564 } 567 }
565 return 0; 568 return 0;
566} 569}
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 654e45f5719d..c67fe6f75653 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -121,8 +121,10 @@ int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
121 if (!skb) 121 if (!skb)
122 return NET_RX_DROP; 122 return NET_RX_DROP;
123 123
124 if (netpoll_rx_on(skb)) 124 if (netpoll_rx_on(skb)) {
125 skb->protocol = eth_type_trans(skb, skb->dev);
125 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci); 126 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci);
127 }
126 128
127 return napi_frags_finish(napi, skb, 129 return napi_frags_finish(napi, skb,
128 vlan_gro_common(napi, grp, vlan_tci, skb)); 130 vlan_gro_common(napi, grp, vlan_tci, skb));
diff --git a/net/core/dev.c b/net/core/dev.c
index 91d792d17e09..343883f65ea7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1430,7 +1430,7 @@ void netif_device_detach(struct net_device *dev)
1430{ 1430{
1431 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 1431 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1432 netif_running(dev)) { 1432 netif_running(dev)) {
1433 netif_stop_queue(dev); 1433 netif_tx_stop_all_queues(dev);
1434 } 1434 }
1435} 1435}
1436EXPORT_SYMBOL(netif_device_detach); 1436EXPORT_SYMBOL(netif_device_detach);
@@ -1445,7 +1445,7 @@ void netif_device_attach(struct net_device *dev)
1445{ 1445{
1446 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 1446 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1447 netif_running(dev)) { 1447 netif_running(dev)) {
1448 netif_wake_queue(dev); 1448 netif_tx_wake_all_queues(dev);
1449 __netdev_watchdog_up(dev); 1449 __netdev_watchdog_up(dev);
1450 } 1450 }
1451} 1451}
@@ -2328,8 +2328,10 @@ static int napi_gro_complete(struct sk_buff *skb)
2328 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK]; 2328 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2329 int err = -ENOENT; 2329 int err = -ENOENT;
2330 2330
2331 if (NAPI_GRO_CB(skb)->count == 1) 2331 if (NAPI_GRO_CB(skb)->count == 1) {
2332 skb_shinfo(skb)->gso_size = 0;
2332 goto out; 2333 goto out;
2334 }
2333 2335
2334 rcu_read_lock(); 2336 rcu_read_lock();
2335 list_for_each_entry_rcu(ptype, head, list) { 2337 list_for_each_entry_rcu(ptype, head, list) {
@@ -2348,7 +2350,6 @@ static int napi_gro_complete(struct sk_buff *skb)
2348 } 2350 }
2349 2351
2350out: 2352out:
2351 skb_shinfo(skb)->gso_size = 0;
2352 return netif_receive_skb(skb); 2353 return netif_receive_skb(skb);
2353} 2354}
2354 2355
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2bc8e27a163d..c96a6bb25430 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -928,6 +928,8 @@ static void tcp_init_metrics(struct sock *sk)
928 tcp_set_rto(sk); 928 tcp_set_rto(sk);
929 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) 929 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
930 goto reset; 930 goto reset;
931
932cwnd:
931 tp->snd_cwnd = tcp_init_cwnd(tp, dst); 933 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
932 tp->snd_cwnd_stamp = tcp_time_stamp; 934 tp->snd_cwnd_stamp = tcp_time_stamp;
933 return; 935 return;
@@ -942,6 +944,7 @@ reset:
942 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; 944 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
943 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 945 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
944 } 946 }
947 goto cwnd;
945} 948}
946 949
947static void tcp_update_reordering(struct sock *sk, const int metric, 950static void tcp_update_reordering(struct sock *sk, const int metric,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index bda08a09357d..7a1d1ce22e66 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -222,7 +222,7 @@ fail:
222 return error; 222 return error;
223} 223}
224 224
225int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) 225static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
226{ 226{
227 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); 227 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
228 228
@@ -1823,7 +1823,6 @@ EXPORT_SYMBOL(udp_lib_getsockopt);
1823EXPORT_SYMBOL(udp_lib_setsockopt); 1823EXPORT_SYMBOL(udp_lib_setsockopt);
1824EXPORT_SYMBOL(udp_poll); 1824EXPORT_SYMBOL(udp_poll);
1825EXPORT_SYMBOL(udp_lib_get_port); 1825EXPORT_SYMBOL(udp_lib_get_port);
1826EXPORT_SYMBOL(ipv4_rcv_saddr_equal);
1827 1826
1828#ifdef CONFIG_PROC_FS 1827#ifdef CONFIG_PROC_FS
1829EXPORT_SYMBOL(udp_proc_register); 1828EXPORT_SYMBOL(udp_proc_register);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index d31df0f4bc9a..a7fdf9a27f15 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -380,10 +380,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
380 default: 380 default:
381 goto sticky_done; 381 goto sticky_done;
382 } 382 }
383
384 if ((rthdr->hdrlen & 1) ||
385 (rthdr->hdrlen >> 1) != rthdr->segments_left)
386 goto sticky_done;
387 } 383 }
388 384
389 retv = 0; 385 retv = 0;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 6842dd2edd5b..8905712cfbb8 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -53,6 +53,8 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
53{ 53{
54 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; 54 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
55 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); 55 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
56 __be32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr;
57 __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
56 int sk_ipv6only = ipv6_only_sock(sk); 58 int sk_ipv6only = ipv6_only_sock(sk);
57 int sk2_ipv6only = inet_v6_ipv6only(sk2); 59 int sk2_ipv6only = inet_v6_ipv6only(sk2);
58 int addr_type = ipv6_addr_type(sk_rcv_saddr6); 60 int addr_type = ipv6_addr_type(sk_rcv_saddr6);
@@ -60,7 +62,9 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
60 62
61 /* if both are mapped, treat as IPv4 */ 63 /* if both are mapped, treat as IPv4 */
62 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) 64 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
63 return ipv4_rcv_saddr_equal(sk, sk2); 65 return (!sk2_ipv6only &&
66 (!sk_rcv_saddr || !sk2_rcv_saddr ||
67 sk_rcv_saddr == sk2_rcv_saddr));
64 68
65 if (addr_type2 == IPV6_ADDR_ANY && 69 if (addr_type2 == IPV6_ADDR_ANY &&
66 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) 70 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 74776de523ec..f546e81acc45 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1758,8 +1758,9 @@ static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
1758 1758
1759static inline char *alloc_one_pg_vec_page(unsigned long order) 1759static inline char *alloc_one_pg_vec_page(unsigned long order)
1760{ 1760{
1761 return (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO, 1761 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN;
1762 order); 1762
1763 return (char *) __get_free_pages(gfp_flags, order);
1763} 1764}
1764 1765
1765static char **alloc_pg_vec(struct tpacket_req *req, int order) 1766static char **alloc_pg_vec(struct tpacket_req *req, int order)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 0f36e8d59b29..877a7f65f707 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1072,10 +1072,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1072 unsigned char *asmptr; 1072 unsigned char *asmptr;
1073 int n, size, qbit = 0; 1073 int n, size, qbit = 0;
1074 1074
1075 /* ROSE empty frame has no meaning : don't send */
1076 if (len == 0)
1077 return 0;
1078
1079 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) 1075 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1080 return -EINVAL; 1076 return -EINVAL;
1081 1077
@@ -1273,12 +1269,6 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1273 skb_reset_transport_header(skb); 1269 skb_reset_transport_header(skb);
1274 copied = skb->len; 1270 copied = skb->len;
1275 1271
1276 /* ROSE empty frame has no meaning : ignore it */
1277 if (copied == 0) {
1278 skb_free_datagram(sk, skb);
1279 return copied;
1280 }
1281
1282 if (copied > size) { 1272 if (copied > size) {
1283 copied = size; 1273 copied = size;
1284 msg->msg_flags |= MSG_TRUNC; 1274 msg->msg_flags |= MSG_TRUNC;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 72cf86e3c090..fad596bf32d7 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -176,8 +176,10 @@ META_COLLECTOR(var_dev)
176 176
177META_COLLECTOR(int_vlan_tag) 177META_COLLECTOR(int_vlan_tag)
178{ 178{
179 unsigned short uninitialized_var(tag); 179 unsigned short tag;
180 if (vlan_get_tag(skb, &tag) < 0) 180
181 tag = vlan_tx_tag_get(skb);
182 if (!tag && __vlan_get_tag(skb, &tag))
181 *err = -1; 183 *err = -1;
182 else 184 else
183 dst->value = tag; 185 dst->value = tag;
diff --git a/sound/core/control.c b/sound/core/control.c
index 4b20fa2b7e6d..17b8d47a5cd0 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -723,14 +723,11 @@ static int snd_ctl_elem_read_user(struct snd_card *card,
723{ 723{
724 struct snd_ctl_elem_value *control; 724 struct snd_ctl_elem_value *control;
725 int result; 725 int result;
726 726
727 control = kmalloc(sizeof(*control), GFP_KERNEL); 727 control = memdup_user(_control, sizeof(*control));
728 if (control == NULL) 728 if (IS_ERR(control))
729 return -ENOMEM; 729 return PTR_ERR(control);
730 if (copy_from_user(control, _control, sizeof(*control))) { 730
731 kfree(control);
732 return -EFAULT;
733 }
734 snd_power_lock(card); 731 snd_power_lock(card);
735 result = snd_power_wait(card, SNDRV_CTL_POWER_D0); 732 result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
736 if (result >= 0) 733 if (result >= 0)
@@ -784,13 +781,10 @@ static int snd_ctl_elem_write_user(struct snd_ctl_file *file,
784 struct snd_card *card; 781 struct snd_card *card;
785 int result; 782 int result;
786 783
787 control = kmalloc(sizeof(*control), GFP_KERNEL); 784 control = memdup_user(_control, sizeof(*control));
788 if (control == NULL) 785 if (IS_ERR(control))
789 return -ENOMEM; 786 return PTR_ERR(control);
790 if (copy_from_user(control, _control, sizeof(*control))) { 787
791 kfree(control);
792 return -EFAULT;
793 }
794 card = file->card; 788 card = file->card;
795 snd_power_lock(card); 789 snd_power_lock(card);
796 result = snd_power_wait(card, SNDRV_CTL_POWER_D0); 790 result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
@@ -916,13 +910,10 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
916 if (op_flag > 0) { 910 if (op_flag > 0) {
917 if (size > 1024 * 128) /* sane value */ 911 if (size > 1024 * 128) /* sane value */
918 return -EINVAL; 912 return -EINVAL;
919 new_data = kmalloc(size, GFP_KERNEL); 913
920 if (new_data == NULL) 914 new_data = memdup_user(tlv, size);
921 return -ENOMEM; 915 if (IS_ERR(new_data))
922 if (copy_from_user(new_data, tlv, size)) { 916 return PTR_ERR(new_data);
923 kfree(new_data);
924 return -EFAULT;
925 }
926 change = ue->tlv_data_size != size; 917 change = ue->tlv_data_size != size;
927 if (!change) 918 if (!change)
928 change = memcmp(ue->tlv_data, new_data, size); 919 change = memcmp(ue->tlv_data, new_data, size);
diff --git a/sound/core/jack.c b/sound/core/jack.c
index c8254c667c62..d54d1a05fe65 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -35,6 +35,9 @@ static int snd_jack_dev_free(struct snd_device *device)
35{ 35{
36 struct snd_jack *jack = device->device_data; 36 struct snd_jack *jack = device->device_data;
37 37
38 if (jack->private_free)
39 jack->private_free(jack);
40
38 /* If the input device is registered with the input subsystem 41 /* If the input device is registered with the input subsystem
39 * then we need to use a different deallocator. */ 42 * then we need to use a different deallocator. */
40 if (jack->registered) 43 if (jack->registered)
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 36d7a5998234..08bfed594a83 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -232,14 +232,11 @@ static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
232 if (! (runtime = substream->runtime)) 232 if (! (runtime = substream->runtime))
233 return -ENOTTY; 233 return -ENOTTY;
234 234
235 data = kmalloc(sizeof(*data), GFP_KERNEL);
236 if (data == NULL)
237 return -ENOMEM;
238 /* only fifo_size is different, so just copy all */ 235 /* only fifo_size is different, so just copy all */
239 if (copy_from_user(data, data32, sizeof(*data32))) { 236 data = memdup_user(data32, sizeof(*data32));
240 err = -EFAULT; 237 if (IS_ERR(data))
241 goto error; 238 return PTR_ERR(data);
242 } 239
243 if (refine) 240 if (refine)
244 err = snd_pcm_hw_refine(substream, data); 241 err = snd_pcm_hw_refine(substream, data);
245 else 242 else
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index fbb2e391591e..63d088f2265f 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -209,9 +209,11 @@ static int snd_pcm_update_hw_ptr_interrupt(struct snd_pcm_substream *substream)
209{ 209{
210 struct snd_pcm_runtime *runtime = substream->runtime; 210 struct snd_pcm_runtime *runtime = substream->runtime;
211 snd_pcm_uframes_t pos; 211 snd_pcm_uframes_t pos;
212 snd_pcm_uframes_t new_hw_ptr, hw_ptr_interrupt, hw_base; 212 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_ptr_interrupt, hw_base;
213 snd_pcm_sframes_t delta; 213 snd_pcm_sframes_t hdelta, delta;
214 unsigned long jdelta;
214 215
216 old_hw_ptr = runtime->status->hw_ptr;
215 pos = snd_pcm_update_hw_ptr_pos(substream, runtime); 217 pos = snd_pcm_update_hw_ptr_pos(substream, runtime);
216 if (pos == SNDRV_PCM_POS_XRUN) { 218 if (pos == SNDRV_PCM_POS_XRUN) {
217 xrun(substream); 219 xrun(substream);
@@ -247,7 +249,30 @@ static int snd_pcm_update_hw_ptr_interrupt(struct snd_pcm_substream *substream)
247 new_hw_ptr = hw_base + pos; 249 new_hw_ptr = hw_base + pos;
248 } 250 }
249 } 251 }
250 if (delta > runtime->period_size) { 252 hdelta = new_hw_ptr - old_hw_ptr;
253 jdelta = jiffies - runtime->hw_ptr_jiffies;
254 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
255 delta = jdelta /
256 (((runtime->period_size * HZ) / runtime->rate)
257 + HZ/100);
258 hw_ptr_error(substream,
259 "hw_ptr skipping! [Q] "
260 "(pos=%ld, delta=%ld, period=%ld, "
261 "jdelta=%lu/%lu/%lu)\n",
262 (long)pos, (long)hdelta,
263 (long)runtime->period_size, jdelta,
264 ((hdelta * HZ) / runtime->rate), delta);
265 hw_ptr_interrupt = runtime->hw_ptr_interrupt +
266 runtime->period_size * delta;
267 if (hw_ptr_interrupt >= runtime->boundary)
268 hw_ptr_interrupt -= runtime->boundary;
269 /* rebase to interrupt position */
270 hw_base = new_hw_ptr = hw_ptr_interrupt;
271 /* align hw_base to buffer_size */
272 hw_base -= hw_base % runtime->buffer_size;
273 delta = 0;
274 }
275 if (delta > runtime->period_size + runtime->period_size / 2) {
251 hw_ptr_error(substream, 276 hw_ptr_error(substream,
252 "Lost interrupts? " 277 "Lost interrupts? "
253 "(stream=%i, delta=%ld, intr_ptr=%ld)\n", 278 "(stream=%i, delta=%ld, intr_ptr=%ld)\n",
@@ -263,6 +288,7 @@ static int snd_pcm_update_hw_ptr_interrupt(struct snd_pcm_substream *substream)
263 288
264 runtime->hw_ptr_base = hw_base; 289 runtime->hw_ptr_base = hw_base;
265 runtime->status->hw_ptr = new_hw_ptr; 290 runtime->status->hw_ptr = new_hw_ptr;
291 runtime->hw_ptr_jiffies = jiffies;
266 runtime->hw_ptr_interrupt = hw_ptr_interrupt; 292 runtime->hw_ptr_interrupt = hw_ptr_interrupt;
267 293
268 return snd_pcm_update_hw_ptr_post(substream, runtime); 294 return snd_pcm_update_hw_ptr_post(substream, runtime);
@@ -275,6 +301,7 @@ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
275 snd_pcm_uframes_t pos; 301 snd_pcm_uframes_t pos;
276 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base; 302 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
277 snd_pcm_sframes_t delta; 303 snd_pcm_sframes_t delta;
304 unsigned long jdelta;
278 305
279 old_hw_ptr = runtime->status->hw_ptr; 306 old_hw_ptr = runtime->status->hw_ptr;
280 pos = snd_pcm_update_hw_ptr_pos(substream, runtime); 307 pos = snd_pcm_update_hw_ptr_pos(substream, runtime);
@@ -286,14 +313,15 @@ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
286 new_hw_ptr = hw_base + pos; 313 new_hw_ptr = hw_base + pos;
287 314
288 delta = new_hw_ptr - old_hw_ptr; 315 delta = new_hw_ptr - old_hw_ptr;
316 jdelta = jiffies - runtime->hw_ptr_jiffies;
289 if (delta < 0) { 317 if (delta < 0) {
290 delta += runtime->buffer_size; 318 delta += runtime->buffer_size;
291 if (delta < 0) { 319 if (delta < 0) {
292 hw_ptr_error(substream, 320 hw_ptr_error(substream,
293 "Unexpected hw_pointer value [2] " 321 "Unexpected hw_pointer value [2] "
294 "(stream=%i, pos=%ld, old_ptr=%ld)\n", 322 "(stream=%i, pos=%ld, old_ptr=%ld, jdelta=%li)\n",
295 substream->stream, (long)pos, 323 substream->stream, (long)pos,
296 (long)old_hw_ptr); 324 (long)old_hw_ptr, jdelta);
297 return 0; 325 return 0;
298 } 326 }
299 hw_base += runtime->buffer_size; 327 hw_base += runtime->buffer_size;
@@ -301,12 +329,13 @@ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
301 hw_base = 0; 329 hw_base = 0;
302 new_hw_ptr = hw_base + pos; 330 new_hw_ptr = hw_base + pos;
303 } 331 }
304 if (delta > runtime->period_size && runtime->periods > 1) { 332 if (((delta * HZ) / runtime->rate) > jdelta + HZ/100) {
305 hw_ptr_error(substream, 333 hw_ptr_error(substream,
306 "hw_ptr skipping! " 334 "hw_ptr skipping! "
307 "(pos=%ld, delta=%ld, period=%ld)\n", 335 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu)\n",
308 (long)pos, (long)delta, 336 (long)pos, (long)delta,
309 (long)runtime->period_size); 337 (long)runtime->period_size, jdelta,
338 ((delta * HZ) / runtime->rate));
310 return 0; 339 return 0;
311 } 340 }
312 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && 341 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
@@ -315,6 +344,7 @@ int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
315 344
316 runtime->hw_ptr_base = hw_base; 345 runtime->hw_ptr_base = hw_base;
317 runtime->status->hw_ptr = new_hw_ptr; 346 runtime->status->hw_ptr = new_hw_ptr;
347 runtime->hw_ptr_jiffies = jiffies;
318 348
319 return snd_pcm_update_hw_ptr_post(substream, runtime); 349 return snd_pcm_update_hw_ptr_post(substream, runtime);
320} 350}
@@ -1441,6 +1471,7 @@ static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1441 runtime->status->hw_ptr %= runtime->buffer_size; 1471 runtime->status->hw_ptr %= runtime->buffer_size;
1442 else 1472 else
1443 runtime->status->hw_ptr = 0; 1473 runtime->status->hw_ptr = 0;
1474 runtime->hw_ptr_jiffies = jiffies;
1444 snd_pcm_stream_unlock_irqrestore(substream, flags); 1475 snd_pcm_stream_unlock_irqrestore(substream, flags);
1445 return 0; 1476 return 0;
1446} 1477}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index a151fb01ba82..fc6f98e257df 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -327,21 +327,16 @@ static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
327 struct snd_pcm_hw_params *params; 327 struct snd_pcm_hw_params *params;
328 int err; 328 int err;
329 329
330 params = kmalloc(sizeof(*params), GFP_KERNEL); 330 params = memdup_user(_params, sizeof(*params));
331 if (!params) { 331 if (IS_ERR(params))
332 err = -ENOMEM; 332 return PTR_ERR(params);
333 goto out; 333
334 }
335 if (copy_from_user(params, _params, sizeof(*params))) {
336 err = -EFAULT;
337 goto out;
338 }
339 err = snd_pcm_hw_refine(substream, params); 334 err = snd_pcm_hw_refine(substream, params);
340 if (copy_to_user(_params, params, sizeof(*params))) { 335 if (copy_to_user(_params, params, sizeof(*params))) {
341 if (!err) 336 if (!err)
342 err = -EFAULT; 337 err = -EFAULT;
343 } 338 }
344out: 339
345 kfree(params); 340 kfree(params);
346 return err; 341 return err;
347} 342}
@@ -465,21 +460,16 @@ static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
465 struct snd_pcm_hw_params *params; 460 struct snd_pcm_hw_params *params;
466 int err; 461 int err;
467 462
468 params = kmalloc(sizeof(*params), GFP_KERNEL); 463 params = memdup_user(_params, sizeof(*params));
469 if (!params) { 464 if (IS_ERR(params))
470 err = -ENOMEM; 465 return PTR_ERR(params);
471 goto out; 466
472 }
473 if (copy_from_user(params, _params, sizeof(*params))) {
474 err = -EFAULT;
475 goto out;
476 }
477 err = snd_pcm_hw_params(substream, params); 467 err = snd_pcm_hw_params(substream, params);
478 if (copy_to_user(_params, params, sizeof(*params))) { 468 if (copy_to_user(_params, params, sizeof(*params))) {
479 if (!err) 469 if (!err)
480 err = -EFAULT; 470 err = -EFAULT;
481 } 471 }
482out: 472
483 kfree(params); 473 kfree(params);
484 return err; 474 return err;
485} 475}
@@ -2593,13 +2583,11 @@ static int snd_pcm_playback_ioctl1(struct file *file,
2593 return -EFAULT; 2583 return -EFAULT;
2594 if (copy_from_user(&xfern, _xfern, sizeof(xfern))) 2584 if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
2595 return -EFAULT; 2585 return -EFAULT;
2596 bufs = kmalloc(sizeof(void *) * runtime->channels, GFP_KERNEL); 2586
2597 if (bufs == NULL) 2587 bufs = memdup_user(xfern.bufs,
2598 return -ENOMEM; 2588 sizeof(void *) * runtime->channels);
2599 if (copy_from_user(bufs, xfern.bufs, sizeof(void *) * runtime->channels)) { 2589 if (IS_ERR(bufs))
2600 kfree(bufs); 2590 return PTR_ERR(bufs);
2601 return -EFAULT;
2602 }
2603 result = snd_pcm_lib_writev(substream, bufs, xfern.frames); 2591 result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
2604 kfree(bufs); 2592 kfree(bufs);
2605 __put_user(result, &_xfern->result); 2593 __put_user(result, &_xfern->result);
@@ -2675,13 +2663,11 @@ static int snd_pcm_capture_ioctl1(struct file *file,
2675 return -EFAULT; 2663 return -EFAULT;
2676 if (copy_from_user(&xfern, _xfern, sizeof(xfern))) 2664 if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
2677 return -EFAULT; 2665 return -EFAULT;
2678 bufs = kmalloc(sizeof(void *) * runtime->channels, GFP_KERNEL); 2666
2679 if (bufs == NULL) 2667 bufs = memdup_user(xfern.bufs,
2680 return -ENOMEM; 2668 sizeof(void *) * runtime->channels);
2681 if (copy_from_user(bufs, xfern.bufs, sizeof(void *) * runtime->channels)) { 2669 if (IS_ERR(bufs))
2682 kfree(bufs); 2670 return PTR_ERR(bufs);
2683 return -EFAULT;
2684 }
2685 result = snd_pcm_lib_readv(substream, bufs, xfern.frames); 2671 result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
2686 kfree(bufs); 2672 kfree(bufs);
2687 __put_user(result, &_xfern->result); 2673 __put_user(result, &_xfern->result);
@@ -3312,18 +3298,12 @@ static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
3312 int err; 3298 int err;
3313 3299
3314 params = kmalloc(sizeof(*params), GFP_KERNEL); 3300 params = kmalloc(sizeof(*params), GFP_KERNEL);
3315 if (!params) { 3301 if (!params)
3316 err = -ENOMEM; 3302 return -ENOMEM;
3317 goto out;
3318 }
3319 oparams = kmalloc(sizeof(*oparams), GFP_KERNEL);
3320 if (!oparams) {
3321 err = -ENOMEM;
3322 goto out;
3323 }
3324 3303
3325 if (copy_from_user(oparams, _oparams, sizeof(*oparams))) { 3304 oparams = memdup_user(_oparams, sizeof(*oparams));
3326 err = -EFAULT; 3305 if (IS_ERR(oparams)) {
3306 err = PTR_ERR(oparams);
3327 goto out; 3307 goto out;
3328 } 3308 }
3329 snd_pcm_hw_convert_from_old_params(params, oparams); 3309 snd_pcm_hw_convert_from_old_params(params, oparams);
@@ -3333,9 +3313,10 @@ static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
3333 if (!err) 3313 if (!err)
3334 err = -EFAULT; 3314 err = -EFAULT;
3335 } 3315 }
3316
3317 kfree(oparams);
3336out: 3318out:
3337 kfree(params); 3319 kfree(params);
3338 kfree(oparams);
3339 return err; 3320 return err;
3340} 3321}
3341 3322
@@ -3347,17 +3328,12 @@ static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
3347 int err; 3328 int err;
3348 3329
3349 params = kmalloc(sizeof(*params), GFP_KERNEL); 3330 params = kmalloc(sizeof(*params), GFP_KERNEL);
3350 if (!params) { 3331 if (!params)
3351 err = -ENOMEM; 3332 return -ENOMEM;
3352 goto out; 3333
3353 } 3334 oparams = memdup_user(_oparams, sizeof(*oparams));
3354 oparams = kmalloc(sizeof(*oparams), GFP_KERNEL); 3335 if (IS_ERR(oparams)) {
3355 if (!oparams) { 3336 err = PTR_ERR(oparams);
3356 err = -ENOMEM;
3357 goto out;
3358 }
3359 if (copy_from_user(oparams, _oparams, sizeof(*oparams))) {
3360 err = -EFAULT;
3361 goto out; 3337 goto out;
3362 } 3338 }
3363 snd_pcm_hw_convert_from_old_params(params, oparams); 3339 snd_pcm_hw_convert_from_old_params(params, oparams);
@@ -3367,9 +3343,10 @@ static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
3367 if (!err) 3343 if (!err)
3368 err = -EFAULT; 3344 err = -EFAULT;
3369 } 3345 }
3346
3347 kfree(oparams);
3370out: 3348out:
3371 kfree(params); 3349 kfree(params);
3372 kfree(oparams);
3373 return err; 3350 return err;
3374} 3351}
3375#endif /* CONFIG_SND_SUPPORT_OLD_API */ 3352#endif /* CONFIG_SND_SUPPORT_OLD_API */
diff --git a/sound/core/seq/seq_compat.c b/sound/core/seq/seq_compat.c
index 38693f47c262..c956fe462569 100644
--- a/sound/core/seq/seq_compat.c
+++ b/sound/core/seq/seq_compat.c
@@ -48,12 +48,11 @@ static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned
48 struct snd_seq_port_info *data; 48 struct snd_seq_port_info *data;
49 mm_segment_t fs; 49 mm_segment_t fs;
50 50
51 data = kmalloc(sizeof(*data), GFP_KERNEL); 51 data = memdup_user(data32, sizeof(*data32));
52 if (! data) 52 if (IS_ERR(data))
53 return -ENOMEM; 53 return PTR_ERR(data);
54 54
55 if (copy_from_user(data, data32, sizeof(*data32)) || 55 if (get_user(data->flags, &data32->flags) ||
56 get_user(data->flags, &data32->flags) ||
57 get_user(data->time_queue, &data32->time_queue)) 56 get_user(data->time_queue, &data32->time_queue))
58 goto error; 57 goto error;
59 data->kernel = NULL; 58 data->kernel = NULL;
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 3f0050d0b71e..8f8b17ac074d 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1395,13 +1395,10 @@ static int snd_timer_user_ginfo(struct file *file,
1395 struct list_head *p; 1395 struct list_head *p;
1396 int err = 0; 1396 int err = 0;
1397 1397
1398 ginfo = kmalloc(sizeof(*ginfo), GFP_KERNEL); 1398 ginfo = memdup_user(_ginfo, sizeof(*ginfo));
1399 if (! ginfo) 1399 if (IS_ERR(ginfo))
1400 return -ENOMEM; 1400 return PTR_ERR(ginfo);
1401 if (copy_from_user(ginfo, _ginfo, sizeof(*ginfo))) { 1401
1402 kfree(ginfo);
1403 return -EFAULT;
1404 }
1405 tid = ginfo->tid; 1402 tid = ginfo->tid;
1406 memset(ginfo, 0, sizeof(*ginfo)); 1403 memset(ginfo, 0, sizeof(*ginfo));
1407 ginfo->tid = tid; 1404 ginfo->tid = tid;
diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
index 49037d074c71..bdc8dde4e4a2 100644
--- a/sound/isa/sb/sb16_csp.c
+++ b/sound/isa/sb/sb16_csp.c
@@ -684,15 +684,16 @@ static int snd_sb_csp_load(struct snd_sb_csp * p, const unsigned char *buf, int
684 684
685static int snd_sb_csp_load_user(struct snd_sb_csp * p, const unsigned char __user *buf, int size, int load_flags) 685static int snd_sb_csp_load_user(struct snd_sb_csp * p, const unsigned char __user *buf, int size, int load_flags)
686{ 686{
687 int err = -ENOMEM; 687 int err;
688 unsigned char *kbuf = kmalloc(size, GFP_KERNEL); 688 unsigned char *kbuf;
689 if (kbuf) { 689
690 if (copy_from_user(kbuf, buf, size)) 690 kbuf = memdup_user(buf, size);
691 err = -EFAULT; 691 if (IS_ERR(kbuf))
692 else 692 return PTR_ERR(kbuf);
693 err = snd_sb_csp_load(p, kbuf, size, load_flags); 693
694 kfree(kbuf); 694 err = snd_sb_csp_load(p, kbuf, size, load_flags);
695 } 695
696 kfree(kbuf);
696 return err; 697 return err;
697} 698}
698 699
diff --git a/sound/isa/wavefront/wavefront_fx.c b/sound/isa/wavefront/wavefront_fx.c
index a4345fc07561..2bb1cee09255 100644
--- a/sound/isa/wavefront/wavefront_fx.c
+++ b/sound/isa/wavefront/wavefront_fx.c
@@ -202,15 +202,11 @@ snd_wavefront_fx_ioctl (struct snd_hwdep *sdev, struct file *file,
202 "> 512 bytes to FX\n"); 202 "> 512 bytes to FX\n");
203 return -EIO; 203 return -EIO;
204 } 204 }
205 page_data = kmalloc(r.data[2] * sizeof(short), GFP_KERNEL); 205 page_data = memdup_user((unsigned char __user *)
206 if (!page_data) 206 r.data[3],
207 return -ENOMEM; 207 r.data[2] * sizeof(short));
208 if (copy_from_user (page_data, 208 if (IS_ERR(page_data))
209 (unsigned char __user *) r.data[3], 209 return PTR_ERR(page_data);
210 r.data[2] * sizeof(short))) {
211 kfree(page_data);
212 return -EFAULT;
213 }
214 pd = page_data; 210 pd = page_data;
215 } 211 }
216 212
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
index beb312cca75b..5d4ff48c4345 100644
--- a/sound/isa/wavefront/wavefront_synth.c
+++ b/sound/isa/wavefront/wavefront_synth.c
@@ -1664,12 +1664,11 @@ snd_wavefront_synth_ioctl (struct snd_hwdep *hw, struct file *file,
1664 break; 1664 break;
1665 1665
1666 case WFCTL_WFCMD: 1666 case WFCTL_WFCMD:
1667 wc = kmalloc(sizeof(*wc), GFP_KERNEL); 1667 wc = memdup_user(argp, sizeof(*wc));
1668 if (! wc) 1668 if (IS_ERR(wc))
1669 return -ENOMEM; 1669 return PTR_ERR(wc);
1670 if (copy_from_user (wc, argp, sizeof (*wc))) 1670
1671 err = -EFAULT; 1671 if (wavefront_synth_control (acard, wc) < 0)
1672 else if (wavefront_synth_control (acard, wc) < 0)
1673 err = -EIO; 1672 err = -EIO;
1674 else if (copy_to_user (argp, wc, sizeof (*wc))) 1673 else if (copy_to_user (argp, wc, sizeof (*wc)))
1675 err = -EFAULT; 1674 err = -EFAULT;
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 191e1cd9997d..4b302d86f5f2 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -2493,24 +2493,17 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un
2493 case SNDRV_EMU10K1_IOCTL_CODE_POKE: 2493 case SNDRV_EMU10K1_IOCTL_CODE_POKE:
2494 if (!capable(CAP_SYS_ADMIN)) 2494 if (!capable(CAP_SYS_ADMIN))
2495 return -EPERM; 2495 return -EPERM;
2496 icode = kmalloc(sizeof(*icode), GFP_KERNEL); 2496
2497 if (icode == NULL) 2497 icode = memdup_user(argp, sizeof(*icode));
2498 return -ENOMEM; 2498 if (IS_ERR(icode))
2499 if (copy_from_user(icode, argp, sizeof(*icode))) { 2499 return PTR_ERR(icode);
2500 kfree(icode);
2501 return -EFAULT;
2502 }
2503 res = snd_emu10k1_icode_poke(emu, icode); 2500 res = snd_emu10k1_icode_poke(emu, icode);
2504 kfree(icode); 2501 kfree(icode);
2505 return res; 2502 return res;
2506 case SNDRV_EMU10K1_IOCTL_CODE_PEEK: 2503 case SNDRV_EMU10K1_IOCTL_CODE_PEEK:
2507 icode = kmalloc(sizeof(*icode), GFP_KERNEL); 2504 icode = memdup_user(argp, sizeof(*icode));
2508 if (icode == NULL) 2505 if (IS_ERR(icode))
2509 return -ENOMEM; 2506 return PTR_ERR(icode);
2510 if (copy_from_user(icode, argp, sizeof(*icode))) {
2511 kfree(icode);
2512 return -EFAULT;
2513 }
2514 res = snd_emu10k1_icode_peek(emu, icode); 2507 res = snd_emu10k1_icode_peek(emu, icode);
2515 if (res == 0 && copy_to_user(argp, icode, sizeof(*icode))) { 2508 if (res == 0 && copy_to_user(argp, icode, sizeof(*icode))) {
2516 kfree(icode); 2509 kfree(icode);
@@ -2519,24 +2512,16 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un
2519 kfree(icode); 2512 kfree(icode);
2520 return res; 2513 return res;
2521 case SNDRV_EMU10K1_IOCTL_PCM_POKE: 2514 case SNDRV_EMU10K1_IOCTL_PCM_POKE:
2522 ipcm = kmalloc(sizeof(*ipcm), GFP_KERNEL); 2515 ipcm = memdup_user(argp, sizeof(*ipcm));
2523 if (ipcm == NULL) 2516 if (IS_ERR(ipcm))
2524 return -ENOMEM; 2517 return PTR_ERR(ipcm);
2525 if (copy_from_user(ipcm, argp, sizeof(*ipcm))) {
2526 kfree(ipcm);
2527 return -EFAULT;
2528 }
2529 res = snd_emu10k1_ipcm_poke(emu, ipcm); 2518 res = snd_emu10k1_ipcm_poke(emu, ipcm);
2530 kfree(ipcm); 2519 kfree(ipcm);
2531 return res; 2520 return res;
2532 case SNDRV_EMU10K1_IOCTL_PCM_PEEK: 2521 case SNDRV_EMU10K1_IOCTL_PCM_PEEK:
2533 ipcm = kzalloc(sizeof(*ipcm), GFP_KERNEL); 2522 ipcm = memdup_user(argp, sizeof(*ipcm));
2534 if (ipcm == NULL) 2523 if (IS_ERR(ipcm))
2535 return -ENOMEM; 2524 return PTR_ERR(ipcm);
2536 if (copy_from_user(ipcm, argp, sizeof(*ipcm))) {
2537 kfree(ipcm);
2538 return -EFAULT;
2539 }
2540 res = snd_emu10k1_ipcm_peek(emu, ipcm); 2525 res = snd_emu10k1_ipcm_peek(emu, ipcm);
2541 if (res == 0 && copy_to_user(argp, ipcm, sizeof(*ipcm))) { 2526 if (res == 0 && copy_to_user(argp, ipcm, sizeof(*ipcm))) {
2542 kfree(ipcm); 2527 kfree(ipcm);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index a4e5e5952115..fd6e6f337d10 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2250,7 +2250,11 @@ int snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid,
2250 err = bus->ops.command(bus, res); 2250 err = bus->ops.command(bus, res);
2251 if (!err) { 2251 if (!err) {
2252 struct hda_cache_head *c; 2252 struct hda_cache_head *c;
2253 u32 key = build_cmd_cache_key(nid, verb); 2253 u32 key;
2254 /* parm may contain the verb stuff for get/set amp */
2255 verb = verb | (parm >> 8);
2256 parm &= 0xff;
2257 key = build_cmd_cache_key(nid, verb);
2254 c = get_alloc_hash(&codec->cmd_cache, key); 2258 c = get_alloc_hash(&codec->cmd_cache, key);
2255 if (c) 2259 if (c)
2256 c->val = parm; 2260 c->val = parm;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7ba8db5d4c42..bc882f8f163c 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -312,6 +312,9 @@ struct azx_dev {
312 unsigned int period_bytes; /* size of the period in bytes */ 312 unsigned int period_bytes; /* size of the period in bytes */
313 unsigned int frags; /* number for period in the play buffer */ 313 unsigned int frags; /* number for period in the play buffer */
314 unsigned int fifo_size; /* FIFO size */ 314 unsigned int fifo_size; /* FIFO size */
315 unsigned int start_flag: 1; /* stream full start flag */
316 unsigned long start_jiffies; /* start + minimum jiffies */
317 unsigned long min_jiffies; /* minimum jiffies before position is valid */
315 318
316 void __iomem *sd_addr; /* stream descriptor pointer */ 319 void __iomem *sd_addr; /* stream descriptor pointer */
317 320
@@ -330,7 +333,6 @@ struct azx_dev {
330 unsigned int opened :1; 333 unsigned int opened :1;
331 unsigned int running :1; 334 unsigned int running :1;
332 unsigned int irq_pending :1; 335 unsigned int irq_pending :1;
333 unsigned int irq_ignore :1;
334 /* 336 /*
335 * For VIA: 337 * For VIA:
336 * A flag to ensure DMA position is 0 338 * A flag to ensure DMA position is 0
@@ -975,7 +977,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
975 struct azx *chip = dev_id; 977 struct azx *chip = dev_id;
976 struct azx_dev *azx_dev; 978 struct azx_dev *azx_dev;
977 u32 status; 979 u32 status;
978 int i; 980 int i, ok;
979 981
980 spin_lock(&chip->reg_lock); 982 spin_lock(&chip->reg_lock);
981 983
@@ -991,18 +993,14 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
991 azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK); 993 azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK);
992 if (!azx_dev->substream || !azx_dev->running) 994 if (!azx_dev->substream || !azx_dev->running)
993 continue; 995 continue;
994 /* ignore the first dummy IRQ (due to pos_adj) */
995 if (azx_dev->irq_ignore) {
996 azx_dev->irq_ignore = 0;
997 continue;
998 }
999 /* check whether this IRQ is really acceptable */ 996 /* check whether this IRQ is really acceptable */
1000 if (azx_position_ok(chip, azx_dev)) { 997 ok = azx_position_ok(chip, azx_dev);
998 if (ok == 1) {
1001 azx_dev->irq_pending = 0; 999 azx_dev->irq_pending = 0;
1002 spin_unlock(&chip->reg_lock); 1000 spin_unlock(&chip->reg_lock);
1003 snd_pcm_period_elapsed(azx_dev->substream); 1001 snd_pcm_period_elapsed(azx_dev->substream);
1004 spin_lock(&chip->reg_lock); 1002 spin_lock(&chip->reg_lock);
1005 } else if (chip->bus && chip->bus->workq) { 1003 } else if (ok == 0 && chip->bus && chip->bus->workq) {
1006 /* bogus IRQ, process it later */ 1004 /* bogus IRQ, process it later */
1007 azx_dev->irq_pending = 1; 1005 azx_dev->irq_pending = 1;
1008 queue_work(chip->bus->workq, 1006 queue_work(chip->bus->workq,
@@ -1088,7 +1086,6 @@ static int azx_setup_periods(struct azx *chip,
1088 bdl = (u32 *)azx_dev->bdl.area; 1086 bdl = (u32 *)azx_dev->bdl.area;
1089 ofs = 0; 1087 ofs = 0;
1090 azx_dev->frags = 0; 1088 azx_dev->frags = 0;
1091 azx_dev->irq_ignore = 0;
1092 pos_adj = bdl_pos_adj[chip->dev_index]; 1089 pos_adj = bdl_pos_adj[chip->dev_index];
1093 if (pos_adj > 0) { 1090 if (pos_adj > 0) {
1094 struct snd_pcm_runtime *runtime = substream->runtime; 1091 struct snd_pcm_runtime *runtime = substream->runtime;
@@ -1109,7 +1106,6 @@ static int azx_setup_periods(struct azx *chip,
1109 &bdl, ofs, pos_adj, 1); 1106 &bdl, ofs, pos_adj, 1);
1110 if (ofs < 0) 1107 if (ofs < 0)
1111 goto error; 1108 goto error;
1112 azx_dev->irq_ignore = 1;
1113 } 1109 }
1114 } else 1110 } else
1115 pos_adj = 0; 1111 pos_adj = 0;
@@ -1155,6 +1151,9 @@ static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
1155 while (((val = azx_sd_readb(azx_dev, SD_CTL)) & SD_CTL_STREAM_RESET) && 1151 while (((val = azx_sd_readb(azx_dev, SD_CTL)) & SD_CTL_STREAM_RESET) &&
1156 --timeout) 1152 --timeout)
1157 ; 1153 ;
1154
1155 /* reset first position - may not be synced with hw at this time */
1156 *azx_dev->posbuf = 0;
1158} 1157}
1159 1158
1160/* 1159/*
@@ -1409,7 +1408,6 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
1409 snd_pcm_set_sync(substream); 1408 snd_pcm_set_sync(substream);
1410 mutex_unlock(&chip->open_mutex); 1409 mutex_unlock(&chip->open_mutex);
1411 1410
1412 azx_stream_reset(chip, azx_dev);
1413 return 0; 1411 return 0;
1414} 1412}
1415 1413
@@ -1474,6 +1472,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
1474 unsigned int bufsize, period_bytes, format_val; 1472 unsigned int bufsize, period_bytes, format_val;
1475 int err; 1473 int err;
1476 1474
1475 azx_stream_reset(chip, azx_dev);
1477 format_val = snd_hda_calc_stream_format(runtime->rate, 1476 format_val = snd_hda_calc_stream_format(runtime->rate,
1478 runtime->channels, 1477 runtime->channels,
1479 runtime->format, 1478 runtime->format,
@@ -1502,6 +1501,8 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
1502 return err; 1501 return err;
1503 } 1502 }
1504 1503
1504 azx_dev->min_jiffies = (runtime->period_size * HZ) /
1505 (runtime->rate * 2);
1505 azx_setup_controller(chip, azx_dev); 1506 azx_setup_controller(chip, azx_dev);
1506 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 1507 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
1507 azx_dev->fifo_size = azx_sd_readw(azx_dev, SD_FIFOSIZE) + 1; 1508 azx_dev->fifo_size = azx_sd_readw(azx_dev, SD_FIFOSIZE) + 1;
@@ -1518,13 +1519,14 @@ static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
1518 struct azx *chip = apcm->chip; 1519 struct azx *chip = apcm->chip;
1519 struct azx_dev *azx_dev; 1520 struct azx_dev *azx_dev;
1520 struct snd_pcm_substream *s; 1521 struct snd_pcm_substream *s;
1521 int start, nsync = 0, sbits = 0; 1522 int rstart = 0, start, nsync = 0, sbits = 0;
1522 int nwait, timeout; 1523 int nwait, timeout;
1523 1524
1524 switch (cmd) { 1525 switch (cmd) {
1526 case SNDRV_PCM_TRIGGER_START:
1527 rstart = 1;
1525 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 1528 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
1526 case SNDRV_PCM_TRIGGER_RESUME: 1529 case SNDRV_PCM_TRIGGER_RESUME:
1527 case SNDRV_PCM_TRIGGER_START:
1528 start = 1; 1530 start = 1;
1529 break; 1531 break;
1530 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 1532 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
@@ -1554,6 +1556,10 @@ static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
1554 if (s->pcm->card != substream->pcm->card) 1556 if (s->pcm->card != substream->pcm->card)
1555 continue; 1557 continue;
1556 azx_dev = get_azx_dev(s); 1558 azx_dev = get_azx_dev(s);
1559 if (rstart) {
1560 azx_dev->start_flag = 1;
1561 azx_dev->start_jiffies = jiffies + azx_dev->min_jiffies;
1562 }
1557 if (start) 1563 if (start)
1558 azx_stream_start(chip, azx_dev); 1564 azx_stream_start(chip, azx_dev);
1559 else 1565 else
@@ -1703,6 +1709,11 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
1703{ 1709{
1704 unsigned int pos; 1710 unsigned int pos;
1705 1711
1712 if (azx_dev->start_flag &&
1713 time_before_eq(jiffies, azx_dev->start_jiffies))
1714 return -1; /* bogus (too early) interrupt */
1715 azx_dev->start_flag = 0;
1716
1706 pos = azx_get_position(chip, azx_dev); 1717 pos = azx_get_position(chip, azx_dev);
1707 if (chip->position_fix == POS_FIX_AUTO) { 1718 if (chip->position_fix == POS_FIX_AUTO) {
1708 if (!pos) { 1719 if (!pos) {
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 1f2ad76ca94b..56ce19e68cb5 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -350,12 +350,20 @@ static int conexant_mux_enum_put(struct snd_kcontrol *kcontrol,
350} 350}
351 351
352#ifdef CONFIG_SND_JACK 352#ifdef CONFIG_SND_JACK
353static void conexant_free_jack_priv(struct snd_jack *jack)
354{
355 struct conexant_jack *jacks = jack->private_data;
356 jacks->nid = 0;
357 jacks->jack = NULL;
358}
359
353static int conexant_add_jack(struct hda_codec *codec, 360static int conexant_add_jack(struct hda_codec *codec,
354 hda_nid_t nid, int type) 361 hda_nid_t nid, int type)
355{ 362{
356 struct conexant_spec *spec; 363 struct conexant_spec *spec;
357 struct conexant_jack *jack; 364 struct conexant_jack *jack;
358 const char *name; 365 const char *name;
366 int err;
359 367
360 spec = codec->spec; 368 spec = codec->spec;
361 snd_array_init(&spec->jacks, sizeof(*jack), 32); 369 snd_array_init(&spec->jacks, sizeof(*jack), 32);
@@ -368,7 +376,12 @@ static int conexant_add_jack(struct hda_codec *codec,
368 jack->nid = nid; 376 jack->nid = nid;
369 jack->type = type; 377 jack->type = type;
370 378
371 return snd_jack_new(codec->bus->card, name, type, &jack->jack); 379 err = snd_jack_new(codec->bus->card, name, type, &jack->jack);
380 if (err < 0)
381 return err;
382 jack->jack->private_data = jack;
383 jack->jack->private_free = conexant_free_jack_priv;
384 return 0;
372} 385}
373 386
374static void conexant_report_jack(struct hda_codec *codec, hda_nid_t nid) 387static void conexant_report_jack(struct hda_codec *codec, hda_nid_t nid)
@@ -455,8 +468,10 @@ static void conexant_free(struct hda_codec *codec)
455 if (spec->jacks.list) { 468 if (spec->jacks.list) {
456 struct conexant_jack *jacks = spec->jacks.list; 469 struct conexant_jack *jacks = spec->jacks.list;
457 int i; 470 int i;
458 for (i = 0; i < spec->jacks.used; i++) 471 for (i = 0; i < spec->jacks.used; i++, jacks++) {
459 snd_device_free(codec->bus->card, &jacks[i].jack); 472 if (jacks->jack)
473 snd_device_free(codec->bus->card, jacks->jack);
474 }
460 snd_array_free(&spec->jacks); 475 snd_array_free(&spec->jacks);
461 } 476 }
462#endif 477#endif
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f35e58a2d921..6ed787eedd06 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -8742,10 +8742,9 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
8742 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC883_LAPTOP_EAPD), 8742 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC883_LAPTOP_EAPD),
8743 SND_PCI_QUIRK(0x15d9, 0x8780, "Supermicro PDSBA", ALC883_3ST_6ch), 8743 SND_PCI_QUIRK(0x15d9, 0x8780, "Supermicro PDSBA", ALC883_3ST_6ch),
8744 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_MEDION), 8744 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_MEDION),
8745 SND_PCI_QUIRK(0x1734, 0x1107, "FSC AMILO Xi2550", 8745 SND_PCI_QUIRK_MASK(0x1734, 0xfff0, 0x1100, "FSC AMILO Xi/Pi25xx",
8746 ALC883_FUJITSU_PI2515), 8746 ALC883_FUJITSU_PI2515),
8747 SND_PCI_QUIRK(0x1734, 0x1108, "Fujitsu AMILO Pi2515", ALC883_FUJITSU_PI2515), 8747 SND_PCI_QUIRK_MASK(0x1734, 0xfff0, 0x1130, "Fujitsu AMILO Xa35xx",
8748 SND_PCI_QUIRK(0x1734, 0x113d, "Fujitsu AMILO Xa3530",
8749 ALC888_FUJITSU_XA3530), 8748 ALC888_FUJITSU_XA3530),
8750 SND_PCI_QUIRK(0x17aa, 0x101e, "Lenovo 101e", ALC883_LENOVO_101E_2ch), 8749 SND_PCI_QUIRK(0x17aa, 0x101e, "Lenovo 101e", ALC883_LENOVO_101E_2ch),
8751 SND_PCI_QUIRK(0x17aa, 0x2085, "Lenovo NB0763", ALC883_LENOVO_NB0763), 8750 SND_PCI_QUIRK(0x17aa, 0x2085, "Lenovo NB0763", ALC883_LENOVO_NB0763),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 61996a2f45df..ce30b459aee6 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -3851,6 +3851,15 @@ static void stac_gpio_set(struct hda_codec *codec, unsigned int mask,
3851 AC_VERB_SET_GPIO_DATA, gpiostate); /* sync */ 3851 AC_VERB_SET_GPIO_DATA, gpiostate); /* sync */
3852} 3852}
3853 3853
3854#ifdef CONFIG_SND_JACK
3855static void stac92xx_free_jack_priv(struct snd_jack *jack)
3856{
3857 struct sigmatel_jack *jacks = jack->private_data;
3858 jacks->nid = 0;
3859 jacks->jack = NULL;
3860}
3861#endif
3862
3854static int stac92xx_add_jack(struct hda_codec *codec, 3863static int stac92xx_add_jack(struct hda_codec *codec,
3855 hda_nid_t nid, int type) 3864 hda_nid_t nid, int type)
3856{ 3865{
@@ -3860,6 +3869,7 @@ static int stac92xx_add_jack(struct hda_codec *codec,
3860 int def_conf = snd_hda_codec_get_pincfg(codec, nid); 3869 int def_conf = snd_hda_codec_get_pincfg(codec, nid);
3861 int connectivity = get_defcfg_connect(def_conf); 3870 int connectivity = get_defcfg_connect(def_conf);
3862 char name[32]; 3871 char name[32];
3872 int err;
3863 3873
3864 if (connectivity && connectivity != AC_JACK_PORT_FIXED) 3874 if (connectivity && connectivity != AC_JACK_PORT_FIXED)
3865 return 0; 3875 return 0;
@@ -3876,10 +3886,15 @@ static int stac92xx_add_jack(struct hda_codec *codec,
3876 snd_hda_get_jack_connectivity(def_conf), 3886 snd_hda_get_jack_connectivity(def_conf),
3877 snd_hda_get_jack_location(def_conf)); 3887 snd_hda_get_jack_location(def_conf));
3878 3888
3879 return snd_jack_new(codec->bus->card, name, type, &jack->jack); 3889 err = snd_jack_new(codec->bus->card, name, type, &jack->jack);
3880#else 3890 if (err < 0) {
3881 return 0; 3891 jack->nid = 0;
3892 return err;
3893 }
3894 jack->jack->private_data = jack;
3895 jack->jack->private_free = stac92xx_free_jack_priv;
3882#endif 3896#endif
3897 return 0;
3883} 3898}
3884 3899
3885static int stac_add_event(struct sigmatel_spec *spec, hda_nid_t nid, 3900static int stac_add_event(struct sigmatel_spec *spec, hda_nid_t nid,
@@ -4138,8 +4153,10 @@ static void stac92xx_free_jacks(struct hda_codec *codec)
4138 if (!codec->bus->shutdown && spec->jacks.list) { 4153 if (!codec->bus->shutdown && spec->jacks.list) {
4139 struct sigmatel_jack *jacks = spec->jacks.list; 4154 struct sigmatel_jack *jacks = spec->jacks.list;
4140 int i; 4155 int i;
4141 for (i = 0; i < spec->jacks.used; i++) 4156 for (i = 0; i < spec->jacks.used; i++, jacks++) {
4142 snd_device_free(codec->bus->card, &jacks[i].jack); 4157 if (jacks->jack)
4158 snd_device_free(codec->bus->card, jacks->jack);
4159 }
4143 } 4160 }
4144 snd_array_free(&spec->jacks); 4161 snd_array_free(&spec->jacks);
4145#endif 4162#endif
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 57648810eaf1..5dced5b79387 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -355,6 +355,9 @@ struct ichdev {
355 unsigned int fragsize1; 355 unsigned int fragsize1;
356 unsigned int position; 356 unsigned int position;
357 unsigned int pos_shift; 357 unsigned int pos_shift;
358 unsigned int last_pos;
359 unsigned long last_pos_jiffies;
360 unsigned int jiffy_to_bytes;
358 int frags; 361 int frags;
359 int lvi; 362 int lvi;
360 int lvi_frag; 363 int lvi_frag;
@@ -838,7 +841,10 @@ static int snd_intel8x0_pcm_trigger(struct snd_pcm_substream *substream, int cmd
838 ichdev->suspended = 0; 841 ichdev->suspended = 0;
839 /* fallthru */ 842 /* fallthru */
840 case SNDRV_PCM_TRIGGER_START: 843 case SNDRV_PCM_TRIGGER_START:
844 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
841 val = ICH_IOCE | ICH_STARTBM; 845 val = ICH_IOCE | ICH_STARTBM;
846 ichdev->last_pos = ichdev->position;
847 ichdev->last_pos_jiffies = jiffies;
842 break; 848 break;
843 case SNDRV_PCM_TRIGGER_SUSPEND: 849 case SNDRV_PCM_TRIGGER_SUSPEND:
844 ichdev->suspended = 1; 850 ichdev->suspended = 1;
@@ -849,9 +855,6 @@ static int snd_intel8x0_pcm_trigger(struct snd_pcm_substream *substream, int cmd
849 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 855 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
850 val = ICH_IOCE; 856 val = ICH_IOCE;
851 break; 857 break;
852 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
853 val = ICH_IOCE | ICH_STARTBM;
854 break;
855 default: 858 default:
856 return -EINVAL; 859 return -EINVAL;
857 } 860 }
@@ -1045,6 +1048,7 @@ static int snd_intel8x0_pcm_prepare(struct snd_pcm_substream *substream)
1045 ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1; 1048 ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1;
1046 } 1049 }
1047 snd_intel8x0_setup_periods(chip, ichdev); 1050 snd_intel8x0_setup_periods(chip, ichdev);
1051 ichdev->jiffy_to_bytes = (runtime->rate * 4 * ichdev->pos_shift) / HZ;
1048 return 0; 1052 return 0;
1049} 1053}
1050 1054
@@ -1053,7 +1057,7 @@ static snd_pcm_uframes_t snd_intel8x0_pcm_pointer(struct snd_pcm_substream *subs
1053 struct intel8x0 *chip = snd_pcm_substream_chip(substream); 1057 struct intel8x0 *chip = snd_pcm_substream_chip(substream);
1054 struct ichdev *ichdev = get_ichdev(substream); 1058 struct ichdev *ichdev = get_ichdev(substream);
1055 size_t ptr1, ptr; 1059 size_t ptr1, ptr;
1056 int civ, timeout = 100; 1060 int civ, timeout = 10;
1057 unsigned int position; 1061 unsigned int position;
1058 1062
1059 spin_lock(&chip->reg_lock); 1063 spin_lock(&chip->reg_lock);
@@ -1069,9 +1073,19 @@ static snd_pcm_uframes_t snd_intel8x0_pcm_pointer(struct snd_pcm_substream *subs
1069 ptr1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb)) 1073 ptr1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb))
1070 break; 1074 break;
1071 } while (timeout--); 1075 } while (timeout--);
1072 ptr1 <<= ichdev->pos_shift; 1076 if (ptr1 != 0) {
1073 ptr = ichdev->fragsize1 - ptr1; 1077 ptr1 <<= ichdev->pos_shift;
1074 ptr += position; 1078 ptr = ichdev->fragsize1 - ptr1;
1079 ptr += position;
1080 ichdev->last_pos = ptr;
1081 ichdev->last_pos_jiffies = jiffies;
1082 } else {
1083 ptr1 = jiffies - ichdev->last_pos_jiffies;
1084 if (ptr1)
1085 ptr1 -= 1;
1086 ptr = ichdev->last_pos + ptr1 * ichdev->jiffy_to_bytes;
1087 ptr %= ichdev->size;
1088 }
1075 spin_unlock(&chip->reg_lock); 1089 spin_unlock(&chip->reg_lock);
1076 if (ptr >= ichdev->size) 1090 if (ptr >= ichdev->size)
1077 return 0; 1091 return 0;
@@ -2661,12 +2675,14 @@ static void __devinit intel8x0_measure_ac97_clock(struct intel8x0 *chip)
2661 struct snd_pcm_substream *subs; 2675 struct snd_pcm_substream *subs;
2662 struct ichdev *ichdev; 2676 struct ichdev *ichdev;
2663 unsigned long port; 2677 unsigned long port;
2664 unsigned long pos, t; 2678 unsigned long pos, pos1, t;
2665 struct timeval start_time, stop_time; 2679 int civ, timeout = 1000, attempt = 1;
2680 struct timespec start_time, stop_time;
2666 2681
2667 if (chip->ac97_bus->clock != 48000) 2682 if (chip->ac97_bus->clock != 48000)
2668 return; /* specified in module option */ 2683 return; /* specified in module option */
2669 2684
2685 __again:
2670 subs = chip->pcm[0]->streams[0].substream; 2686 subs = chip->pcm[0]->streams[0].substream;
2671 if (! subs || subs->dma_buffer.bytes < INTEL8X0_TESTBUF_SIZE) { 2687 if (! subs || subs->dma_buffer.bytes < INTEL8X0_TESTBUF_SIZE) {
2672 snd_printk(KERN_WARNING "no playback buffer allocated - aborting measure ac97 clock\n"); 2688 snd_printk(KERN_WARNING "no playback buffer allocated - aborting measure ac97 clock\n");
@@ -2674,7 +2690,7 @@ static void __devinit intel8x0_measure_ac97_clock(struct intel8x0 *chip)
2674 } 2690 }
2675 ichdev = &chip->ichd[ICHD_PCMOUT]; 2691 ichdev = &chip->ichd[ICHD_PCMOUT];
2676 ichdev->physbuf = subs->dma_buffer.addr; 2692 ichdev->physbuf = subs->dma_buffer.addr;
2677 ichdev->size = chip->ichd[ICHD_PCMOUT].fragsize = INTEL8X0_TESTBUF_SIZE; 2693 ichdev->size = ichdev->fragsize = INTEL8X0_TESTBUF_SIZE;
2678 ichdev->substream = NULL; /* don't process interrupts */ 2694 ichdev->substream = NULL; /* don't process interrupts */
2679 2695
2680 /* set rate */ 2696 /* set rate */
@@ -2693,16 +2709,31 @@ static void __devinit intel8x0_measure_ac97_clock(struct intel8x0 *chip)
2693 iputbyte(chip, port + ICH_REG_OFF_CR, ICH_IOCE); 2709 iputbyte(chip, port + ICH_REG_OFF_CR, ICH_IOCE);
2694 iputdword(chip, ICHREG(ALI_DMACR), 1 << ichdev->ali_slot); 2710 iputdword(chip, ICHREG(ALI_DMACR), 1 << ichdev->ali_slot);
2695 } 2711 }
2696 do_gettimeofday(&start_time); 2712 do_posix_clock_monotonic_gettime(&start_time);
2697 spin_unlock_irq(&chip->reg_lock); 2713 spin_unlock_irq(&chip->reg_lock);
2698 msleep(50); 2714 msleep(50);
2699 spin_lock_irq(&chip->reg_lock); 2715 spin_lock_irq(&chip->reg_lock);
2700 /* check the position */ 2716 /* check the position */
2701 pos = ichdev->fragsize1; 2717 do {
2702 pos -= igetword(chip, ichdev->reg_offset + ichdev->roff_picb) << ichdev->pos_shift; 2718 civ = igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV);
2703 pos += ichdev->position; 2719 pos1 = igetword(chip, ichdev->reg_offset + ichdev->roff_picb);
2720 if (pos1 == 0) {
2721 udelay(10);
2722 continue;
2723 }
2724 if (civ == igetbyte(chip, ichdev->reg_offset + ICH_REG_OFF_CIV) &&
2725 pos1 == igetword(chip, ichdev->reg_offset + ichdev->roff_picb))
2726 break;
2727 } while (timeout--);
2728 if (pos1 == 0) { /* oops, this value is not reliable */
2729 pos = 0;
2730 } else {
2731 pos = ichdev->fragsize1;
2732 pos -= pos1 << ichdev->pos_shift;
2733 pos += ichdev->position;
2734 }
2704 chip->in_measurement = 0; 2735 chip->in_measurement = 0;
2705 do_gettimeofday(&stop_time); 2736 do_posix_clock_monotonic_gettime(&stop_time);
2706 /* stop */ 2737 /* stop */
2707 if (chip->device_type == DEVICE_ALI) { 2738 if (chip->device_type == DEVICE_ALI) {
2708 iputdword(chip, ICHREG(ALI_DMACR), 1 << (ichdev->ali_slot + 16)); 2739 iputdword(chip, ICHREG(ALI_DMACR), 1 << (ichdev->ali_slot + 16));
@@ -2717,19 +2748,37 @@ static void __devinit intel8x0_measure_ac97_clock(struct intel8x0 *chip)
2717 iputbyte(chip, port + ICH_REG_OFF_CR, ICH_RESETREGS); 2748 iputbyte(chip, port + ICH_REG_OFF_CR, ICH_RESETREGS);
2718 spin_unlock_irq(&chip->reg_lock); 2749 spin_unlock_irq(&chip->reg_lock);
2719 2750
2751 if (pos == 0) {
2752 snd_printk(KERN_ERR "intel8x0: measure - unreliable DMA position..\n");
2753 __retry:
2754 if (attempt < 2) {
2755 attempt++;
2756 goto __again;
2757 }
2758 return;
2759 }
2760
2761 pos /= 4;
2720 t = stop_time.tv_sec - start_time.tv_sec; 2762 t = stop_time.tv_sec - start_time.tv_sec;
2721 t *= 1000000; 2763 t *= 1000000;
2722 t += stop_time.tv_usec - start_time.tv_usec; 2764 t += (stop_time.tv_nsec - start_time.tv_nsec) / 1000;
2723 printk(KERN_INFO "%s: measured %lu usecs\n", __func__, t); 2765 printk(KERN_INFO "%s: measured %lu usecs (%lu samples)\n", __func__, t, pos);
2724 if (t == 0) { 2766 if (t == 0) {
2725 snd_printk(KERN_ERR "?? calculation error..\n"); 2767 snd_printk(KERN_ERR "intel8x0: ?? calculation error..\n");
2726 return; 2768 goto __retry;
2727 } 2769 }
2728 pos = (pos / 4) * 1000; 2770 pos *= 1000;
2729 pos = (pos / t) * 1000 + ((pos % t) * 1000) / t; 2771 pos = (pos / t) * 1000 + ((pos % t) * 1000) / t;
2730 if (pos < 40000 || pos >= 60000) 2772 if (pos < 40000 || pos >= 60000) {
2731 /* abnormal value. hw problem? */ 2773 /* abnormal value. hw problem? */
2732 printk(KERN_INFO "intel8x0: measured clock %ld rejected\n", pos); 2774 printk(KERN_INFO "intel8x0: measured clock %ld rejected\n", pos);
2775 goto __retry;
2776 } else if (pos > 40500 && pos < 41500)
2777 /* first exception - 41000Hz reference clock */
2778 chip->ac97_bus->clock = 41000;
2779 else if (pos > 43600 && pos < 44600)
2780 /* second exception - 44100HZ reference clock */
2781 chip->ac97_bus->clock = 44100;
2733 else if (pos < 47500 || pos > 48500) 2782 else if (pos < 47500 || pos > 48500)
2734 /* not 48000Hz, tuning the clock.. */ 2783 /* not 48000Hz, tuning the clock.. */
2735 chip->ac97_bus->clock = (chip->ac97_bus->clock * 48000) / pos; 2784 chip->ac97_bus->clock = (chip->ac97_bus->clock * 48000) / pos;
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index f7c4544f7859..0625c342a1c9 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -27,8 +27,6 @@
27#include <sound/soc.h> 27#include <sound/soc.h>
28#include <sound/soc-dapm.h> 28#include <sound/soc-dapm.h>
29 29
30#include <mach/pxa-regs.h>
31#include <mach/hardware.h>
32#include <mach/magician.h> 30#include <mach/magician.h>
33#include <asm/mach-types.h> 31#include <asm/mach-types.h>
34#include "../codecs/uda1380.h" 32#include "../codecs/uda1380.h"
diff --git a/sound/soc/s3c24xx/Kconfig b/sound/soc/s3c24xx/Kconfig
index 2f3a21eee051..df494d1e346f 100644
--- a/sound/soc/s3c24xx/Kconfig
+++ b/sound/soc/s3c24xx/Kconfig
@@ -1,10 +1,10 @@
1config SND_S3C24XX_SOC 1config SND_S3C24XX_SOC
2 tristate "SoC Audio for the Samsung S3CXXXX chips" 2 tristate "SoC Audio for the Samsung S3CXXXX chips"
3 depends on ARCH_S3C2410 || ARCH_S3C64XX 3 depends on ARCH_S3C2410
4 help 4 help
5 Say Y or M if you want to add support for codecs attached to 5 Say Y or M if you want to add support for codecs attached to
6 the S3C24XX and S3C64XX AC97, I2S or SSP interface. You will 6 the S3C24XX AC97 or I2S interfaces. You will also need to
7 also need to select the audio interfaces to support below. 7 select the audio interfaces to support below.
8 8
9config SND_S3C24XX_SOC_I2S 9config SND_S3C24XX_SOC_I2S
10 tristate 10 tristate
diff --git a/sound/usb/caiaq/Makefile b/sound/usb/caiaq/Makefile
index 23dadd5a11cd..388999653aaa 100644
--- a/sound/usb/caiaq/Makefile
+++ b/sound/usb/caiaq/Makefile
@@ -1,4 +1,4 @@
1snd-usb-caiaq-y := caiaq-device.o caiaq-audio.o caiaq-midi.o caiaq-control.o 1snd-usb-caiaq-y := device.o audio.o midi.o control.o
2snd-usb-caiaq-$(CONFIG_SND_USB_CAIAQ_INPUT) += caiaq-input.o 2snd-usb-caiaq-$(CONFIG_SND_USB_CAIAQ_INPUT) += input.o
3 3
4obj-$(CONFIG_SND_USB_CAIAQ) += snd-usb-caiaq.o 4obj-$(CONFIG_SND_USB_CAIAQ) += snd-usb-caiaq.o
diff --git a/sound/usb/caiaq/caiaq-audio.c b/sound/usb/caiaq/audio.c
index 08d51e0c9fea..3f45c0fe61ab 100644
--- a/sound/usb/caiaq/caiaq-audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -16,20 +16,14 @@
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/ 17*/
18 18
19#include <linux/spinlock.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/interrupt.h>
23#include <linux/usb.h> 21#include <linux/usb.h>
24#include <linux/spinlock.h>
25#include <sound/core.h> 22#include <sound/core.h>
26#include <sound/initval.h>
27#include <sound/pcm.h> 23#include <sound/pcm.h>
28#include <sound/rawmidi.h>
29#include <linux/input.h>
30 24
31#include "caiaq-device.h" 25#include "device.h"
32#include "caiaq-audio.h" 26#include "audio.h"
33 27
34#define N_URBS 32 28#define N_URBS 32
35#define CLOCK_DRIFT_TOLERANCE 5 29#define CLOCK_DRIFT_TOLERANCE 5
diff --git a/sound/usb/caiaq/caiaq-audio.h b/sound/usb/caiaq/audio.h
index 8ab1f8d9529e..8ab1f8d9529e 100644
--- a/sound/usb/caiaq/caiaq-audio.h
+++ b/sound/usb/caiaq/audio.h
diff --git a/sound/usb/caiaq/caiaq-control.c b/sound/usb/caiaq/control.c
index e92c2bbf4fe9..537102ba6b9d 100644
--- a/sound/usb/caiaq/caiaq-control.c
+++ b/sound/usb/caiaq/control.c
@@ -18,17 +18,13 @@
18 */ 18 */
19 19
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/usb.h> 21#include <linux/usb.h>
22#include <sound/control.h>
23#include <sound/core.h> 23#include <sound/core.h>
24#include <sound/initval.h>
25#include <sound/pcm.h> 24#include <sound/pcm.h>
26#include <sound/rawmidi.h>
27#include <sound/control.h>
28#include <linux/input.h>
29 25
30#include "caiaq-device.h" 26#include "device.h"
31#include "caiaq-control.h" 27#include "control.h"
32 28
33#define CNT_INTVAL 0x10000 29#define CNT_INTVAL 0x10000
34 30
diff --git a/sound/usb/caiaq/caiaq-control.h b/sound/usb/caiaq/control.h
index 2e7ab1aa4fb3..2e7ab1aa4fb3 100644
--- a/sound/usb/caiaq/caiaq-control.h
+++ b/sound/usb/caiaq/control.h
diff --git a/sound/usb/caiaq/caiaq-device.c b/sound/usb/caiaq/device.c
index cf573a982fdc..6d517705da0e 100644
--- a/sound/usb/caiaq/caiaq-device.c
+++ b/sound/usb/caiaq/device.c
@@ -19,27 +19,20 @@
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20*/ 20*/
21 21
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h> 22#include <linux/moduleparam.h>
25#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/usb.h> 26#include <linux/usb.h>
27#include <linux/input.h>
28#include <linux/spinlock.h>
29#include <sound/core.h>
30#include <sound/initval.h> 27#include <sound/initval.h>
28#include <sound/core.h>
31#include <sound/pcm.h> 29#include <sound/pcm.h>
32#include <sound/rawmidi.h>
33#include <sound/control.h>
34
35#include "caiaq-device.h"
36#include "caiaq-audio.h"
37#include "caiaq-midi.h"
38#include "caiaq-control.h"
39 30
40#ifdef CONFIG_SND_USB_CAIAQ_INPUT 31#include "device.h"
41#include "caiaq-input.h" 32#include "audio.h"
42#endif 33#include "midi.h"
34#include "control.h"
35#include "input.h"
43 36
44MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); 37MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
45MODULE_DESCRIPTION("caiaq USB audio, version 1.3.13"); 38MODULE_DESCRIPTION("caiaq USB audio, version 1.3.13");
diff --git a/sound/usb/caiaq/caiaq-device.h b/sound/usb/caiaq/device.h
index 4cce1ad7493d..4cce1ad7493d 100644
--- a/sound/usb/caiaq/caiaq-device.h
+++ b/sound/usb/caiaq/device.h
diff --git a/sound/usb/caiaq/caiaq-input.c b/sound/usb/caiaq/input.c
index f743847a5e5a..a48d309bd94c 100644
--- a/sound/usb/caiaq/caiaq-input.c
+++ b/sound/usb/caiaq/input.c
@@ -17,17 +17,12 @@
17*/ 17*/
18 18
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/input.h>
23#include <linux/usb.h> 20#include <linux/usb.h>
24#include <linux/usb/input.h> 21#include <linux/usb/input.h>
25#include <linux/spinlock.h>
26#include <sound/core.h>
27#include <sound/rawmidi.h>
28#include <sound/pcm.h> 22#include <sound/pcm.h>
29#include "caiaq-device.h" 23
30#include "caiaq-input.h" 24#include "device.h"
25#include "input.h"
31 26
32static unsigned short keycode_ak1[] = { KEY_C, KEY_B, KEY_A }; 27static unsigned short keycode_ak1[] = { KEY_C, KEY_B, KEY_A };
33static unsigned short keycode_rk2[] = { KEY_1, KEY_2, KEY_3, KEY_4, 28static unsigned short keycode_rk2[] = { KEY_1, KEY_2, KEY_3, KEY_4,
diff --git a/sound/usb/caiaq/caiaq-input.h b/sound/usb/caiaq/input.h
index ced535577864..ced535577864 100644
--- a/sound/usb/caiaq/caiaq-input.h
+++ b/sound/usb/caiaq/input.h
diff --git a/sound/usb/caiaq/caiaq-midi.c b/sound/usb/caiaq/midi.c
index f19fd360c936..8fa8cd88d763 100644
--- a/sound/usb/caiaq/caiaq-midi.c
+++ b/sound/usb/caiaq/midi.c
@@ -16,20 +16,13 @@
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/ 17*/
18 18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/interrupt.h>
23#include <linux/usb.h> 19#include <linux/usb.h>
24#include <linux/input.h>
25#include <linux/spinlock.h>
26#include <sound/core.h>
27#include <sound/rawmidi.h> 20#include <sound/rawmidi.h>
21#include <sound/core.h>
28#include <sound/pcm.h> 22#include <sound/pcm.h>
29 23
30#include "caiaq-device.h" 24#include "device.h"
31#include "caiaq-midi.h" 25#include "midi.h"
32
33 26
34static int snd_usb_caiaq_midi_input_open(struct snd_rawmidi_substream *substream) 27static int snd_usb_caiaq_midi_input_open(struct snd_rawmidi_substream *substream)
35{ 28{
diff --git a/sound/usb/caiaq/caiaq-midi.h b/sound/usb/caiaq/midi.h
index 9d16db027fc3..9d16db027fc3 100644
--- a/sound/usb/caiaq/caiaq-midi.h
+++ b/sound/usb/caiaq/midi.h
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index 98276aafefe6..012ff1f6f8af 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -349,14 +349,10 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
349 if (cmd != SNDRV_USB_STREAM_IOCTL_SET_PARAMS) 349 if (cmd != SNDRV_USB_STREAM_IOCTL_SET_PARAMS)
350 return -ENOTTY; 350 return -ENOTTY;
351 351
352 cfg = kmalloc(sizeof(*cfg), GFP_KERNEL); 352 cfg = memdup_user((void *)arg, sizeof(*cfg));
353 if (!cfg) 353 if (IS_ERR(cfg))
354 return -ENOMEM; 354 return PTR_ERR(cfg);
355 355
356 if (copy_from_user(cfg, (void *)arg, sizeof(*cfg))) {
357 err = -EFAULT;
358 goto free;
359 }
360 if (cfg->version != USB_STREAM_INTERFACE_VERSION) { 356 if (cfg->version != USB_STREAM_INTERFACE_VERSION) {
361 err = -ENXIO; 357 err = -ENXIO;
362 goto free; 358 goto free;
diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
index 4af8740db717..f3d8f71265dd 100644
--- a/sound/usb/usx2y/usX2Yhwdep.c
+++ b/sound/usb/usx2y/usX2Yhwdep.c
@@ -203,13 +203,12 @@ static int snd_usX2Y_hwdep_dsp_load(struct snd_hwdep *hw,
203 203
204 if (access_ok(VERIFY_READ, dsp->image, dsp->length)) { 204 if (access_ok(VERIFY_READ, dsp->image, dsp->length)) {
205 struct usb_device* dev = priv->chip.dev; 205 struct usb_device* dev = priv->chip.dev;
206 char *buf = kmalloc(dsp->length, GFP_KERNEL); 206 char *buf;
207 if (!buf) 207
208 return -ENOMEM; 208 buf = memdup_user(dsp->image, dsp->length);
209 if (copy_from_user(buf, dsp->image, dsp->length)) { 209 if (IS_ERR(buf))
210 kfree(buf); 210 return PTR_ERR(buf);
211 return -EFAULT; 211
212 }
213 err = usb_set_interface(dev, 0, 1); 212 err = usb_set_interface(dev, 0, 1);
214 if (err) 213 if (err)
215 snd_printk(KERN_ERR "usb_set_interface error \n"); 214 snd_printk(KERN_ERR "usb_set_interface error \n");