aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci-devices-cciss28
-rw-r--r--Documentation/SubmittingPatches2
-rw-r--r--Documentation/arm/tcm.txt10
-rw-r--r--Documentation/cgroups/cgroups.txt11
-rw-r--r--Documentation/connector/cn_test.c2
-rw-r--r--Documentation/connector/connector.txt8
-rw-r--r--Documentation/hwmon/ltc42157
-rw-r--r--Documentation/hwmon/ltc42457
-rw-r--r--Documentation/i2c/instantiating-devices2
-rw-r--r--Documentation/isdn/INTERFACE.CAPI83
-rw-r--r--Documentation/misc-devices/eeprom (renamed from Documentation/i2c/chips/eeprom)0
-rw-r--r--Documentation/misc-devices/max6875 (renamed from Documentation/i2c/chips/max6875)6
-rw-r--r--Documentation/networking/pktgen.txt8
-rw-r--r--Documentation/networking/timestamping/timestamping.c2
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt1
-rw-r--r--Documentation/vm/ksm.txt13
-rw-r--r--Documentation/vm/page-types.c304
-rw-r--r--Documentation/vm/pagemap.txt8
-rw-r--r--Documentation/w1/masters/ds24826
-rw-r--r--MAINTAINERS44
-rw-r--r--Makefile2
-rw-r--r--arch/arm/common/sa1111.c25
-rw-r--r--arch/arm/configs/h3600_defconfig1
-rw-r--r--arch/arm/configs/iop33x_defconfig554
-rw-r--r--arch/arm/include/asm/glue.h26
-rw-r--r--arch/arm/include/asm/hardware/iop3xx.h1
-rw-r--r--arch/arm/include/asm/smp_plat.h16
-rw-r--r--arch/arm/include/asm/unistd.h1
-rw-r--r--arch/arm/kernel/entry-armv.S18
-rw-r--r--arch/arm/kernel/entry-common.S11
-rw-r--r--arch/arm/kernel/head-common.S4
-rw-r--r--arch/arm/kernel/smp.c13
-rw-r--r--arch/arm/kernel/smp_twd.c4
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/mach-bcmring/core.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c1
-rw-r--r--arch/arm/mach-integrator/pci_v3.c2
-rw-r--r--arch/arm/mach-iop32x/include/mach/iop32x.h2
-rw-r--r--arch/arm/mach-iop33x/include/mach/iop33x.h2
-rw-r--r--arch/arm/mach-ns9xxx/clock.c2
-rw-r--r--arch/arm/mach-omap2/clock34xx.c35
-rw-r--r--arch/arm/mach-omap2/pm-debug.c4
-rw-r--r--arch/arm/mach-omap2/pm34xx.c187
-rw-r--r--arch/arm/mach-omap2/powerdomain.c39
-rw-r--r--arch/arm/mach-sa1100/Kconfig5
-rw-r--r--arch/arm/mach-sa1100/time.c2
-rw-r--r--arch/arm/mach-u300/gpio.c10
-rw-r--r--arch/arm/mach-u300/include/mach/gpio.h1
-rw-r--r--arch/arm/mm/Kconfig57
-rw-r--r--arch/arm/mm/Makefile4
-rw-r--r--arch/arm/mm/fault.c53
-rw-r--r--arch/arm/mm/mmap.c2
-rw-r--r--arch/arm/mm/mmu.c39
-rw-r--r--arch/arm/mm/pabort-legacy.S19
-rw-r--r--arch/arm/mm/pabort-v6.S19
-rw-r--r--arch/arm/mm/pabort-v7.S20
-rw-r--r--arch/arm/mm/proc-arm1020.S2
-rw-r--r--arch/arm/mm/proc-arm1020e.S2
-rw-r--r--arch/arm/mm/proc-arm1022.S2
-rw-r--r--arch/arm/mm/proc-arm1026.S2
-rw-r--r--arch/arm/mm/proc-arm6_7.S4
-rw-r--r--arch/arm/mm/proc-arm720.S2
-rw-r--r--arch/arm/mm/proc-arm740.S2
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S2
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm922.S2
-rw-r--r--arch/arm/mm/proc-arm925.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-arm940.S2
-rw-r--r--arch/arm/mm/proc-arm946.S2
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S2
-rw-r--r--arch/arm/mm/proc-fa526.S2
-rw-r--r--arch/arm/mm/proc-feroceon.S2
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa110.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/plat-iop/pci.c3
-rw-r--r--arch/arm/plat-iop/time.c2
-rw-r--r--arch/arm/plat-omap/include/mach/cpu.h37
-rw-r--r--arch/arm/plat-omap/include/mach/powerdomain.h2
-rw-r--r--arch/arm/plat-omap/iovmm.c9
-rw-r--r--arch/arm/plat-omap/sram.c3
-rw-r--r--arch/arm/plat-s3c24xx/include/plat/mci.h3
-rw-r--r--arch/blackfin/mach-bf561/coreb.c2
-rw-r--r--arch/cris/arch-v10/drivers/sync_serial.c2
-rw-r--r--arch/cris/arch-v32/drivers/mach-fs/gpio.c2
-rw-r--r--arch/m32r/include/asm/io.h7
-rw-r--r--arch/m32r/kernel/m32r_ksyms.c6
-rw-r--r--arch/m32r/kernel/time.c9
-rw-r--r--arch/m32r/kernel/traps.c4
-rw-r--r--arch/m32r/lib/delay.c4
-rw-r--r--arch/m32r/mm/discontig.c5
-rw-r--r--arch/m32r/mm/mmu.S12
-rw-r--r--arch/m68k/include/asm/hardirq_mm.h12
-rw-r--r--arch/m68knommu/kernel/asm-offsets.c28
-rw-r--r--arch/m68knommu/kernel/entry.S6
-rw-r--r--arch/m68knommu/mm/init.c2
-rw-r--r--arch/m68knommu/platform/5206e/config.c1
-rw-r--r--arch/m68knommu/platform/68328/entry.S32
-rw-r--r--arch/m68knommu/platform/68360/entry.S16
-rw-r--r--arch/m68knommu/platform/coldfire/entry.S20
-rw-r--r--arch/microblaze/kernel/entry.S2
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S2
-rw-r--r--arch/microblaze/kernel/process.c2
-rw-r--r--arch/mn10300/include/asm/uaccess.h73
-rw-r--r--arch/mn10300/unit-asb2303/include/unit/clock.h6
-rw-r--r--arch/mn10300/unit-asb2305/include/unit/clock.h6
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/fixmap.h4
-rw-r--r--arch/parisc/include/asm/hardirq.h20
-rw-r--r--arch/parisc/include/asm/ptrace.h5
-rw-r--r--arch/parisc/include/asm/syscall.h40
-rw-r--r--arch/parisc/include/asm/thread_info.h14
-rw-r--r--arch/parisc/kernel/asm-offsets.c4
-rw-r--r--arch/parisc/kernel/entry.S21
-rw-r--r--arch/parisc/kernel/irq.c5
-rw-r--r--arch/parisc/kernel/module.c2
-rw-r--r--arch/parisc/kernel/ptrace.c42
-rw-r--r--arch/parisc/kernel/signal.c5
-rw-r--r--arch/parisc/kernel/syscall.S22
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S10
-rw-r--r--arch/parisc/mm/init.c11
-rw-r--r--arch/powerpc/kvm/timing.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c2
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c2
-rw-r--r--arch/s390/kvm/kvm-s390.h2
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/hardirq_32.h12
-rw-r--r--arch/sparc/include/asm/irq_32.h4
-rw-r--r--arch/sparc/include/asm/pgtable_64.h4
-rw-r--r--arch/sparc/kernel/ktlb.S8
-rw-r--r--arch/sparc/kernel/perf_event.c577
-rw-r--r--arch/sparc/oprofile/init.c1
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/smp.c1
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c84
-rw-r--r--arch/x86/kvm/paging_tmpl.h18
-rw-r--r--arch/x86/kvm/svm.c25
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/xen/debugfs.c2
-rw-r--r--block/blk-barrier.c45
-rw-r--r--block/blk-core.c21
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-settings.c34
-rw-r--r--block/blk-sysfs.c11
-rw-r--r--block/cfq-iosched.c63
-rw-r--r--block/compat_ioctl.c13
-rw-r--r--block/genhd.c4
-rw-r--r--block/ioctl.c17
-rw-r--r--drivers/acpi/Kconfig12
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/acpi_pad.c514
-rw-r--r--drivers/acpi/dock.c16
-rw-r--r--drivers/acpi/ec.c56
-rw-r--r--drivers/acpi/proc.c2
-rw-r--r--drivers/acpi/processor_core.c7
-rw-r--r--drivers/acpi/scan.c7
-rw-r--r--drivers/acpi/video.c2
-rw-r--r--drivers/atm/ambassador.c8
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/horizon.c2
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/block/DAC960.c156
-rw-r--r--drivers/block/cciss.c755
-rw-r--r--drivers/block/cciss.h12
-rw-r--r--drivers/block/cpqarray.c63
-rw-r--r--drivers/char/agp/parisc-agp.c2
-rw-r--r--drivers/char/apm-emulation.c2
-rw-r--r--drivers/char/bfin-otp.c2
-rw-r--r--drivers/char/cyclades.c2
-rw-r--r--drivers/char/dtlk.c1
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c1
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c1
-rw-r--r--drivers/char/serial167.c7
-rw-r--r--drivers/char/tty_ldisc.c7
-rw-r--r--drivers/char/vt_ioctl.c6
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c2
-rw-r--r--drivers/connector/cn_proc.c3
-rw-r--r--drivers/connector/cn_queue.c12
-rw-r--r--drivers/connector/connector.c22
-rw-r--r--drivers/edac/amd64_edac.c104
-rw-r--r--drivers/edac/amd64_edac.h23
-rw-r--r--drivers/edac/amd64_edac_inj.c49
-rw-r--r--drivers/firewire/core-cdev.c1
-rw-r--r--drivers/firmware/iscsi_ibft.c2
-rw-r--r--drivers/firmware/iscsi_ibft_find.c4
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c138
-rw-r--r--drivers/gpu/drm/i915/intel_display.c11
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/r100.c432
-rw-r--r--drivers/gpu/drm/radeon/r100d.h145
-rw-r--r--drivers/gpu/drm/radeon/r200.c3
-rw-r--r--drivers/gpu/drm/radeon/r300.c308
-rw-r--r--drivers/gpu/drm/radeon/r300d.h205
-rw-r--r--drivers/gpu/drm/radeon/r420.c6
-rw-r--r--drivers/gpu/drm/radeon/r420d.h24
-rw-r--r--drivers/gpu/drm/radeon/r520.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c314
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c10
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/r600d.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h58
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h253
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c237
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c17
-rw-r--r--drivers/gpu/drm/radeon/rs100d.h40
-rw-r--r--drivers/gpu/drm/radeon/rs400.c275
-rw-r--r--drivers/gpu/drm/radeon/rs400d.h160
-rw-r--r--drivers/gpu/drm/radeon/rs600.c481
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h470
-rw-r--r--drivers/gpu/drm/radeon/rs690.c355
-rw-r--r--drivers/gpu/drm/radeon/rs690d.h307
-rw-r--r--drivers/gpu/drm/radeon/rs690r.h99
-rw-r--r--drivers/gpu/drm/radeon/rv200d.h36
-rw-r--r--drivers/gpu/drm/radeon/rv250d.h123
-rw-r--r--drivers/gpu/drm/radeon/rv350d.h52
-rw-r--r--drivers/gpu/drm/radeon/rv515.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770.c199
-rw-r--r--drivers/gpu/drm/ttm/ttm_global.c2
-rw-r--r--drivers/hid/hidraw.c1
-rw-r--r--drivers/hwmon/fschmd.c2
-rw-r--r--drivers/hwmon/lis3lv02d_spi.c3
-rw-r--r--drivers/hwmon/ltc4215.c47
-rw-r--r--drivers/hwmon/ltc4245.c131
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-isch.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c4
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c2
-rw-r--r--drivers/ide/ide-proc.c8
-rw-r--r--drivers/ide/sis5513.c10
-rw-r--r--drivers/infiniband/core/ucm.c1
-rw-r--r--drivers/infiniband/core/user_mad.c1
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/input/evdev.c1
-rw-r--r--drivers/input/input.c3
-rw-r--r--drivers/input/joydev.c1
-rw-r--r--drivers/input/misc/uinput.c1
-rw-r--r--drivers/input/mousedev.c1
-rw-r--r--drivers/isdn/capi/capi.c2
-rw-r--r--drivers/isdn/capi/capidrv.c27
-rw-r--r--drivers/isdn/divert/divert_procfs.c1
-rw-r--r--drivers/isdn/gigaset/asyncdata.c28
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c87
-rw-r--r--drivers/isdn/gigaset/common.c134
-rw-r--r--drivers/isdn/gigaset/ev-layer.c30
-rw-r--r--drivers/isdn/gigaset/i4l.c23
-rw-r--r--drivers/isdn/gigaset/interface.c9
-rw-r--r--drivers/isdn/gigaset/isocdata.c30
-rw-r--r--drivers/isdn/mISDN/socket.c2
-rw-r--r--drivers/leds/leds-pca9532.c3
-rw-r--r--drivers/lguest/lguest_user.c2
-rw-r--r--drivers/macintosh/therm_adt746x.c4
-rw-r--r--drivers/macintosh/therm_pm72.c4
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c4
-rw-r--r--drivers/md/dm-log-userspace-transfer.c6
-rw-r--r--drivers/md/dm.c16
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c3
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c1
-rw-r--r--drivers/media/dvb/firewire/firedtv-ci.c2
-rw-r--r--drivers/media/radio/radio-cadet.c1
-rw-r--r--drivers/media/video/cpia.c1
-rw-r--r--drivers/mfd/ab3100-core.c4
-rw-r--r--drivers/mfd/ucb1400_core.c1
-rw-r--r--drivers/misc/eeprom/max6875.c29
-rw-r--r--drivers/misc/phantom.c2
-rw-r--r--drivers/misc/sgi-gru/grufile.c3
-rw-r--r--drivers/mmc/core/debugfs.c2
-rw-r--r--drivers/mmc/core/sdio_cis.c72
-rw-r--r--drivers/mmc/host/Kconfig41
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c608
-rw-r--r--drivers/mmc/host/s3cmci.h14
-rw-r--r--drivers/mtd/mtd_blkdevs.c19
-rw-r--r--drivers/net/3c59x.c77
-rw-r--r--drivers/net/Kconfig7
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/au1000_eth.c4
-rw-r--r--drivers/net/bcm63xx_enet.c2
-rw-r--r--drivers/net/benet/be.h1
-rw-r--r--drivers/net/benet/be_cmds.c4
-rw-r--r--drivers/net/benet/be_cmds.h5
-rw-r--r--drivers/net/benet/be_ethtool.c2
-rw-r--r--drivers/net/benet/be_main.c29
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/cnic.c3
-rw-r--r--drivers/net/cnic_if.h4
-rw-r--r--drivers/net/e1000e/82571.c4
-rw-r--r--drivers/net/e1000e/netdev.c13
-rw-r--r--drivers/net/ethoc.c81
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/igb/igb_main.c13
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c232
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c56
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h11
-rw-r--r--drivers/net/ks8851_mll.c1697
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/pasemi_mac_ethtool.c3
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c10
-rw-r--r--drivers/net/pppol2tp.c2
-rw-r--r--drivers/net/qlge/qlge.h26
-rw-r--r--drivers/net/qlge/qlge_ethtool.c2
-rw-r--r--drivers/net/qlge/qlge_main.c44
-rw-r--r--drivers/net/qlge/qlge_mpi.c12
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/skge.c16
-rw-r--r--drivers/net/skge.h2
-rw-r--r--drivers/net/sky2.c7
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/tg3.c41
-rw-r--r--drivers/net/tg3.h2
-rw-r--r--drivers/net/usb/rndis_host.c1
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wireless/Kconfig13
-rw-r--r--drivers/net/wireless/ath/ar9170/phy.c6
-rw-r--r--drivers/net/wireless/b43/pio.c60
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/pcmcia/sa1100_assabet.c2
-rw-r--r--drivers/pcmcia/sa1100_neponset.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c127
-rw-r--r--drivers/s390/cio/qdio_debug.c2
-rw-r--r--drivers/s390/cio/qdio_perf.c2
-rw-r--r--drivers/scsi/sg.c43
-rw-r--r--drivers/serial/8250.c7
-rw-r--r--drivers/serial/Kconfig2
-rw-r--r--drivers/serial/icom.c54
-rw-r--r--drivers/serial/sa1100.c2
-rw-r--r--drivers/serial/serial_cs.c12
-rw-r--r--drivers/serial/serial_txx9.c39
-rw-r--r--drivers/sfi/sfi_core.c17
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi_imx.c (renamed from drivers/spi/mxc_spi.c)383
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/staging/dst/dcore.c9
-rw-r--r--drivers/staging/iio/light/tsl2561.c4
-rw-r--r--drivers/staging/pohmelfs/config.c5
-rw-r--r--drivers/usb/class/usbtmc.c2
-rw-r--r--drivers/usb/gadget/inode.c1
-rw-r--r--drivers/usb/gadget/printer.c2
-rw-r--r--drivers/usb/host/whci/debug.c6
-rw-r--r--drivers/usb/misc/rio500.c3
-rw-r--r--drivers/uwb/uwb-debug.c6
-rw-r--r--drivers/video/da8xx-fb.c1
-rw-r--r--drivers/video/msm/mddi.c2
-rw-r--r--drivers/video/omap/blizzard.c10
-rw-r--r--drivers/video/omap/omapfb_main.c22
-rw-r--r--drivers/video/uvesafb.c5
-rw-r--r--drivers/w1/masters/ds2482.c35
-rw-r--r--drivers/w1/w1_netlink.c2
-rw-r--r--drivers/xen/xenfs/xenbus.c1
-rw-r--r--firmware/Makefile7
-rw-r--r--firmware/WHENCE5
-rw-r--r--firmware/cis/COMpad2.cis.ihex11
-rw-r--r--firmware/cis/COMpad4.cis.ihex9
-rw-r--r--firmware/cis/DP83903.cis.ihex14
-rw-r--r--firmware/cis/NE2K.cis.ihex8
-rw-r--r--firmware/cis/tamarack.cis.ihex10
-rw-r--r--fs/afs/cache.h12
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/anon_inodes.c2
-rw-r--r--fs/bio.c49
-rw-r--r--fs/btrfs/acl.c6
-rw-r--r--fs/btrfs/btrfs_inode.h8
-rw-r--r--fs/btrfs/ctree.h27
-rw-r--r--fs/btrfs/disk-io.c10
-rw-r--r--fs/btrfs/extent-tree.c391
-rw-r--r--fs/btrfs/extent_io.c92
-rw-r--r--fs/btrfs/extent_io.h13
-rw-r--r--fs/btrfs/file.c35
-rw-r--r--fs/btrfs/inode.c239
-rw-r--r--fs/btrfs/ioctl.c62
-rw-r--r--fs/btrfs/ordered-data.c93
-rw-r--r--fs/btrfs/ordered-data.h4
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/transaction.c10
-rw-r--r--fs/btrfs/volumes.c4
-rw-r--r--fs/btrfs/xattr.c2
-rw-r--r--fs/coda/psdev.c1
-rw-r--r--fs/ext4/Kconfig14
-rw-r--r--fs/ext4/inode.c28
-rw-r--r--fs/ext4/super.c31
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/nfsd/nfsctl.c2
-rw-r--r--fs/nilfs2/dir.c2
-rw-r--r--fs/nilfs2/file.c2
-rw-r--r--fs/nilfs2/mdt.c2
-rw-r--r--fs/nilfs2/nilfs.h4
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/ocfs2/cluster/netdebug.c4
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c8
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/omfs/dir.c2
-rw-r--r--fs/omfs/file.c2
-rw-r--r--fs/omfs/omfs.h4
-rw-r--r--fs/partitions/check.c12
-rw-r--r--fs/proc/kcore.c1
-rw-r--r--fs/proc/page.c5
-rw-r--r--fs/select.c1
-rw-r--r--include/asm-generic/gpio.h1
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_fb_helper.h7
-rw-r--r--include/drm/drm_pciids.h4
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/atmdev.h2
-rw-r--r--include/linux/blkdev.h48
-rw-r--r--include/linux/blktrace_api.h2
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/connector.h11
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/genhd.h21
-rw-r--r--include/linux/mroute.h4
-rw-r--r--include/linux/mroute6.h4
-rw-r--r--include/linux/net.h8
-rw-r--r--include/linux/netfilter.h4
-rw-r--r--include/linux/perf_event.h5
-rw-r--r--include/linux/poll.h2
-rw-r--r--include/linux/res_counter.h6
-rw-r--r--include/linux/socket.h21
-rw-r--r--include/net/compat.h4
-rw-r--r--include/net/inet_connection_sock.h6
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ipv6.h4
-rw-r--r--include/net/sctp/structs.h4
-rw-r--r--include/net/sock.h12
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/net/udp.h2
-rw-r--r--include/trace/events/block.h33
-rw-r--r--init/Kconfig18
-rw-r--r--kernel/cgroup.c15
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/kprobes.c4
-rw-r--r--kernel/module.c7
-rw-r--r--kernel/perf_event.c282
-rw-r--r--kernel/rcutree_trace.c10
-rw-r--r--kernel/res_counter.c18
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/time/tick-sched.c9
-rw-r--r--kernel/time/timer_list.c2
-rw-r--r--kernel/time/timer_stats.c2
-rw-r--r--kernel/trace/blktrace.c39
-rw-r--r--kernel/trace/ftrace.c27
-rw-r--r--kernel/trace/kmemtrace.c2
-rw-r--r--kernel/trace/trace_branch.c8
-rw-r--r--kernel/trace/trace_event_profile.c15
-rw-r--r--kernel/trace/trace_hw_branches.c8
-rw-r--r--kernel/trace/trace_output.c18
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--lib/vsprintf.c2
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/ksm.c10
-rw-r--r--mm/memcontrol.c127
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/swapfile.c12
-rw-r--r--mm/vmalloc.c49
-rw-r--r--net/atm/common.c2
-rw-r--r--net/atm/common.h2
-rw-r--r--net/atm/pvc.c2
-rw-r--r--net/atm/svc.c2
-rw-r--r--net/ax25/af_ax25.c4
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/l2cap.c4
-rw-r--r--net/bluetooth/rfcomm/sock.c4
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/can/raw.c2
-rw-r--r--net/compat.c12
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/net-sysfs.c4
-rw-r--r--net/core/pktgen.c10
-rw-r--r--net/core/sock.c27
-rw-r--r--net/dccp/dccp.h4
-rw-r--r--net/dccp/proto.c10
-rw-r--r--net/decnet/af_decnet.c6
-rw-r--r--net/ieee802154/dgram.c2
-rw-r--r--net/ieee802154/raw.c2
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/devinet.c16
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ip_sockglue.c6
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_output.c11
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv4/udp_impl.h4
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c6
-rw-r--r--net/ipv6/raw.c6
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/ipv6/udp_impl.h4
-rw-r--r--net/ipx/af_ipx.c2
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/mac80211/tx.c5
-rw-r--r--net/netfilter/nf_sockopt.c4
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/phonet/pep.c2
-rw-r--r--net/phonet/socket.c1
-rw-r--r--net/rds/af_rds.c2
-rw-r--r--net/rfkill/core.c1
-rw-r--r--net/rose/af_rose.c2
-rw-r--r--net/rxrpc/af_rxrpc.c2
-rw-r--r--net/sctp/socket.c62
-rw-r--r--net/socket.c2
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--samples/tracepoints/tracepoint-sample.c2
-rw-r--r--security/integrity/ima/ima_fs.c10
-rw-r--r--sound/aoa/codecs/tas.c9
-rw-r--r--sound/drivers/opl3/opl3_midi.c28
-rw-r--r--sound/mips/hal2.c2
-rw-r--r--sound/mips/sgio2audio.c2
-rw-r--r--sound/pci/ctxfi/ctatc.c4
-rw-r--r--sound/pci/echoaudio/echoaudio.c30
-rw-r--r--sound/pci/echoaudio/mia.c1
-rw-r--r--sound/pci/hda/hda_intel.c1
-rw-r--r--sound/pci/hda/patch_analog.c139
-rw-r--r--sound/pci/hda/patch_conexant.c12
-rw-r--r--sound/pci/hda/patch_realtek.c333
-rw-r--r--sound/pci/hda/patch_sigmatel.c20
-rw-r--r--sound/pci/ice1712/ice1712.c2
-rw-r--r--sound/pci/ice1712/ice1724.c6
-rw-r--r--sound/pci/intel8x0.c12
-rw-r--r--sound/pci/via82xx.c27
-rw-r--r--sound/ppc/keywest.c14
-rw-r--r--sound/soc/blackfin/Kconfig98
-rw-r--r--sound/soc/blackfin/bf5xx-i2s.c8
-rw-r--r--sound/soc/blackfin/bf5xx-tdm.c8
-rw-r--r--sound/soc/codecs/wm8350.c4
-rw-r--r--sound/soc/codecs/wm8940.c2
-rw-r--r--sound/soc/davinci/davinci-i2s.c37
-rw-r--r--sound/soc/davinci/davinci-mcasp.c80
-rw-r--r--sound/soc/davinci/davinci-mcasp.h7
-rw-r--r--sound/soc/davinci/davinci-pcm.c13
-rw-r--r--sound/soc/davinci/davinci-pcm.h1
-rw-r--r--sound/soc/imx/mxc-ssi.c8
-rw-r--r--sound/soc/pxa/Kconfig2
-rw-r--r--sound/soc/soc-dapm.c5
-rw-r--r--sound/usb/usbmixer.c23
-rw-r--r--tools/perf/Documentation/perf-timechart.txt3
-rw-r--r--tools/perf/Makefile2
-rw-r--r--tools/perf/builtin-record.c6
-rw-r--r--tools/perf/builtin-stat.c8
-rw-r--r--tools/perf/builtin-timechart.c10
-rw-r--r--tools/perf/builtin-top.c1
-rw-r--r--tools/perf/builtin-trace.c6
-rw-r--r--tools/perf/design.txt3
-rw-r--r--tools/perf/util/svghelper.c14
-rw-r--r--tools/perf/util/symbol.c3
-rw-r--r--tools/perf/util/trace-event-parse.c9
-rw-r--r--virt/kvm/kvm_main.c16
590 files changed, 12988 insertions, 5424 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss b/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
index 0a92a7c93a62..4f29e5f1ebfa 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
+++ b/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
@@ -31,3 +31,31 @@ Date: March 2009
31Kernel Version: 2.6.30 31Kernel Version: 2.6.30
32Contact: iss_storagedev@hp.com 32Contact: iss_storagedev@hp.com
33Description: A symbolic link to /sys/block/cciss!cXdY 33Description: A symbolic link to /sys/block/cciss!cXdY
34
35Where: /sys/bus/pci/devices/<dev>/ccissX/rescan
36Date: August 2009
37Kernel Version: 2.6.31
38Contact: iss_storagedev@hp.com
39Description: Kicks of a rescan of the controller to discover logical
40 drive topology changes.
41
42Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/lunid
43Date: August 2009
44Kernel Version: 2.6.31
45Contact: iss_storagedev@hp.com
46Description: Displays the 8-byte LUN ID used to address logical
47 drive Y of controller X.
48
49Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/raid_level
50Date: August 2009
51Kernel Version: 2.6.31
52Contact: iss_storagedev@hp.com
53Description: Displays the RAID level of logical drive Y of
54 controller X.
55
56Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/usage_count
57Date: August 2009
58Kernel Version: 2.6.31
59Contact: iss_storagedev@hp.com
60Description: Displays the usage count (number of opens) of logical drive Y
61 of controller X.
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index b7f9d3b4bbf6..72651f788f4e 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -232,7 +232,7 @@ your e-mail client so that it sends your patches untouched.
232When sending patches to Linus, always follow step #7. 232When sending patches to Linus, always follow step #7.
233 233
234Large changes are not appropriate for mailing lists, and some 234Large changes are not appropriate for mailing lists, and some
235maintainers. If your patch, uncompressed, exceeds 40 kB in size, 235maintainers. If your patch, uncompressed, exceeds 300 kB in size,
236it is preferred that you store your patch on an Internet-accessible 236it is preferred that you store your patch on an Internet-accessible
237server, and provide instead a URL (link) pointing to your patch. 237server, and provide instead a URL (link) pointing to your patch.
238 238
diff --git a/Documentation/arm/tcm.txt b/Documentation/arm/tcm.txt
index 074f4be6667f..77fd9376e6d7 100644
--- a/Documentation/arm/tcm.txt
+++ b/Documentation/arm/tcm.txt
@@ -29,11 +29,13 @@ TCM location and size. Notice that this is not a MMU table: you
29actually move the physical location of the TCM around. At the 29actually move the physical location of the TCM around. At the
30place you put it, it will mask any underlying RAM from the 30place you put it, it will mask any underlying RAM from the
31CPU so it is usually wise not to overlap any physical RAM with 31CPU so it is usually wise not to overlap any physical RAM with
32the TCM. The TCM memory exists totally outside the MMU and will 32the TCM.
33override any MMU mappings.
34 33
35Code executing inside the ITCM does not "see" any MMU mappings 34The TCM memory can then be remapped to another address again using
36and e.g. register accesses must be made to physical addresses. 35the MMU, but notice that the TCM if often used in situations where
36the MMU is turned off. To avoid confusion the current Linux
37implementation will map the TCM 1 to 1 from physical to virtual
38memory in the location specified by the machine.
37 39
38TCM is used for a few things: 40TCM is used for a few things:
39 41
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 455d4e6d346d..0b33bfe7dde9 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -227,7 +227,14 @@ as the path relative to the root of the cgroup file system.
227Each cgroup is represented by a directory in the cgroup file system 227Each cgroup is represented by a directory in the cgroup file system
228containing the following files describing that cgroup: 228containing the following files describing that cgroup:
229 229
230 - tasks: list of tasks (by pid) attached to that cgroup 230 - tasks: list of tasks (by pid) attached to that cgroup. This list
231 is not guaranteed to be sorted. Writing a thread id into this file
232 moves the thread into this cgroup.
233 - cgroup.procs: list of tgids in the cgroup. This list is not
234 guaranteed to be sorted or free of duplicate tgids, and userspace
235 should sort/uniquify the list if this property is required.
236 Writing a tgid into this file moves all threads with that tgid into
237 this cgroup.
231 - notify_on_release flag: run the release agent on exit? 238 - notify_on_release flag: run the release agent on exit?
232 - release_agent: the path to use for release notifications (this file 239 - release_agent: the path to use for release notifications (this file
233 exists in the top cgroup only) 240 exists in the top cgroup only)
@@ -374,7 +381,7 @@ Now you want to do something with this cgroup.
374 381
375In this directory you can find several files: 382In this directory you can find several files:
376# ls 383# ls
377notify_on_release tasks 384cgroup.procs notify_on_release tasks
378(plus whatever files added by the attached subsystems) 385(plus whatever files added by the attached subsystems)
379 386
380Now attach your shell to this cgroup: 387Now attach your shell to this cgroup:
diff --git a/Documentation/connector/cn_test.c b/Documentation/connector/cn_test.c
index 1711adc33373..b07add3467f1 100644
--- a/Documentation/connector/cn_test.c
+++ b/Documentation/connector/cn_test.c
@@ -34,7 +34,7 @@ static char cn_test_name[] = "cn_test";
34static struct sock *nls; 34static struct sock *nls;
35static struct timer_list cn_test_timer; 35static struct timer_list cn_test_timer;
36 36
37static void cn_test_callback(struct cn_msg *msg) 37static void cn_test_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
38{ 38{
39 pr_info("%s: %lu: idx=%x, val=%x, seq=%u, ack=%u, len=%d: %s.\n", 39 pr_info("%s: %lu: idx=%x, val=%x, seq=%u, ack=%u, len=%d: %s.\n",
40 __func__, jiffies, msg->id.idx, msg->id.val, 40 __func__, jiffies, msg->id.idx, msg->id.val,
diff --git a/Documentation/connector/connector.txt b/Documentation/connector/connector.txt
index 81e6bf6ead57..78c9466a9aa8 100644
--- a/Documentation/connector/connector.txt
+++ b/Documentation/connector/connector.txt
@@ -23,7 +23,7 @@ handling, etc... The Connector driver allows any kernelspace agents to use
23netlink based networking for inter-process communication in a significantly 23netlink based networking for inter-process communication in a significantly
24easier way: 24easier way:
25 25
26int cn_add_callback(struct cb_id *id, char *name, void (*callback) (void *)); 26int cn_add_callback(struct cb_id *id, char *name, void (*callback) (struct cn_msg *, struct netlink_skb_parms *));
27void cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask); 27void cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask);
28 28
29struct cb_id 29struct cb_id
@@ -53,15 +53,15 @@ struct cn_msg
53Connector interfaces. 53Connector interfaces.
54/*****************************************/ 54/*****************************************/
55 55
56int cn_add_callback(struct cb_id *id, char *name, void (*callback) (void *)); 56int cn_add_callback(struct cb_id *id, char *name, void (*callback) (struct cn_msg *, struct netlink_skb_parms *));
57 57
58 Registers new callback with connector core. 58 Registers new callback with connector core.
59 59
60 struct cb_id *id - unique connector's user identifier. 60 struct cb_id *id - unique connector's user identifier.
61 It must be registered in connector.h for legal in-kernel users. 61 It must be registered in connector.h for legal in-kernel users.
62 char *name - connector's callback symbolic name. 62 char *name - connector's callback symbolic name.
63 void (*callback) (void *) - connector's callback. 63 void (*callback) (struct cn..) - connector's callback.
64 Argument must be dereferenced to struct cn_msg *. 64 cn_msg and the sender's credentials
65 65
66 66
67void cn_del_callback(struct cb_id *id); 67void cn_del_callback(struct cb_id *id);
diff --git a/Documentation/hwmon/ltc4215 b/Documentation/hwmon/ltc4215
index 2e6a21eb656c..c196a1846259 100644
--- a/Documentation/hwmon/ltc4215
+++ b/Documentation/hwmon/ltc4215
@@ -22,12 +22,13 @@ Usage Notes
22----------- 22-----------
23 23
24This driver does not probe for LTC4215 devices, due to the fact that some 24This driver does not probe for LTC4215 devices, due to the fact that some
25of the possible addresses are unfriendly to probing. You will need to use 25of the possible addresses are unfriendly to probing. You will have to
26the "force" parameter to tell the driver where to find the device. 26instantiate the devices explicitly.
27 27
28Example: the following will load the driver for an LTC4215 at address 0x44 28Example: the following will load the driver for an LTC4215 at address 0x44
29on I2C bus #0: 29on I2C bus #0:
30$ modprobe ltc4215 force=0,0x44 30$ modprobe ltc4215
31$ echo ltc4215 0x44 > /sys/bus/i2c/devices/i2c-0/new_device
31 32
32 33
33Sysfs entries 34Sysfs entries
diff --git a/Documentation/hwmon/ltc4245 b/Documentation/hwmon/ltc4245
index bae7a3adc5d8..02838a47d862 100644
--- a/Documentation/hwmon/ltc4245
+++ b/Documentation/hwmon/ltc4245
@@ -23,12 +23,13 @@ Usage Notes
23----------- 23-----------
24 24
25This driver does not probe for LTC4245 devices, due to the fact that some 25This driver does not probe for LTC4245 devices, due to the fact that some
26of the possible addresses are unfriendly to probing. You will need to use 26of the possible addresses are unfriendly to probing. You will have to
27the "force" parameter to tell the driver where to find the device. 27instantiate the devices explicitly.
28 28
29Example: the following will load the driver for an LTC4245 at address 0x23 29Example: the following will load the driver for an LTC4245 at address 0x23
30on I2C bus #1: 30on I2C bus #1:
31$ modprobe ltc4245 force=1,0x23 31$ modprobe ltc4245
32$ echo ltc4245 0x23 > /sys/bus/i2c/devices/i2c-1/new_device
32 33
33 34
34Sysfs entries 35Sysfs entries
diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices
index c740b7b41088..e89490270aba 100644
--- a/Documentation/i2c/instantiating-devices
+++ b/Documentation/i2c/instantiating-devices
@@ -188,7 +188,7 @@ segment, the address is sufficient to uniquely identify the device to be
188deleted. 188deleted.
189 189
190Example: 190Example:
191# echo eeprom 0x50 > /sys/class/i2c-adapter/i2c-3/new_device 191# echo eeprom 0x50 > /sys/bus/i2c/devices/i2c-3/new_device
192 192
193While this interface should only be used when in-kernel device declaration 193While this interface should only be used when in-kernel device declaration
194can't be done, there is a variety of cases where it can be helpful: 194can't be done, there is a variety of cases where it can be helpful:
diff --git a/Documentation/isdn/INTERFACE.CAPI b/Documentation/isdn/INTERFACE.CAPI
index 686e107923ec..5fe8de5cc727 100644
--- a/Documentation/isdn/INTERFACE.CAPI
+++ b/Documentation/isdn/INTERFACE.CAPI
@@ -60,10 +60,9 @@ open() operation on regular files or character devices.
60 60
61After a successful return from register_appl(), CAPI messages from the 61After a successful return from register_appl(), CAPI messages from the
62application may be passed to the driver for the device via calls to the 62application may be passed to the driver for the device via calls to the
63send_message() callback function. The CAPI message to send is stored in the 63send_message() callback function. Conversely, the driver may call Kernel
64data portion of an skb. Conversely, the driver may call Kernel CAPI's 64CAPI's capi_ctr_handle_message() function to pass a received CAPI message to
65capi_ctr_handle_message() function to pass a received CAPI message to Kernel 65Kernel CAPI for forwarding to an application, specifying its ApplID.
66CAPI for forwarding to an application, specifying its ApplID.
67 66
68Deregistration requests (CAPI operation CAPI_RELEASE) from applications are 67Deregistration requests (CAPI operation CAPI_RELEASE) from applications are
69forwarded as calls to the release_appl() callback function, passing the same 68forwarded as calls to the release_appl() callback function, passing the same
@@ -142,6 +141,7 @@ u16 (*send_message)(struct capi_ctr *ctrlr, struct sk_buff *skb)
142 to accepting or queueing the message. Errors occurring during the 141 to accepting or queueing the message. Errors occurring during the
143 actual processing of the message should be signaled with an 142 actual processing of the message should be signaled with an
144 appropriate reply message. 143 appropriate reply message.
144 May be called in process or interrupt context.
145 Calls to this function are not serialized by Kernel CAPI, ie. it must 145 Calls to this function are not serialized by Kernel CAPI, ie. it must
146 be prepared to be re-entered. 146 be prepared to be re-entered.
147 147
@@ -154,7 +154,8 @@ read_proc_t *ctr_read_proc
154 system entry, /proc/capi/controllers/<n>; will be called with a 154 system entry, /proc/capi/controllers/<n>; will be called with a
155 pointer to the device's capi_ctr structure as the last (data) argument 155 pointer to the device's capi_ctr structure as the last (data) argument
156 156
157Note: Callback functions are never called in interrupt context. 157Note: Callback functions except send_message() are never called in interrupt
158context.
158 159
159- to be filled in before calling capi_ctr_ready(): 160- to be filled in before calling capi_ctr_ready():
160 161
@@ -171,14 +172,40 @@ u8 serial[CAPI_SERIAL_LEN]
171 value to return for CAPI_GET_SERIAL 172 value to return for CAPI_GET_SERIAL
172 173
173 174
1744.3 The _cmsg Structure 1754.3 SKBs
176
177CAPI messages are passed between Kernel CAPI and the driver via send_message()
178and capi_ctr_handle_message(), stored in the data portion of a socket buffer
179(skb). Each skb contains a single CAPI message coded according to the CAPI 2.0
180standard.
181
182For the data transfer messages, DATA_B3_REQ and DATA_B3_IND, the actual
183payload data immediately follows the CAPI message itself within the same skb.
184The Data and Data64 parameters are not used for processing. The Data64
185parameter may be omitted by setting the length field of the CAPI message to 22
186instead of 30.
187
188
1894.4 The _cmsg Structure
175 190
176(declared in <linux/isdn/capiutil.h>) 191(declared in <linux/isdn/capiutil.h>)
177 192
178The _cmsg structure stores the contents of a CAPI 2.0 message in an easily 193The _cmsg structure stores the contents of a CAPI 2.0 message in an easily
179accessible form. It contains members for all possible CAPI 2.0 parameters, of 194accessible form. It contains members for all possible CAPI 2.0 parameters,
180which only those appearing in the message type currently being processed are 195including subparameters of the Additional Info and B Protocol structured
181actually used. Unused members should be set to zero. 196parameters, with the following exceptions:
197
198* second Calling party number (CONNECT_IND)
199
200* Data64 (DATA_B3_REQ and DATA_B3_IND)
201
202* Sending complete (subparameter of Additional Info, CONNECT_REQ and INFO_REQ)
203
204* Global Configuration (subparameter of B Protocol, CONNECT_REQ, CONNECT_RESP
205 and SELECT_B_PROTOCOL_REQ)
206
207Only those parameters appearing in the message type currently being processed
208are actually used. Unused members should be set to zero.
182 209
183Members are named after the CAPI 2.0 standard names of the parameters they 210Members are named after the CAPI 2.0 standard names of the parameters they
184represent. See <linux/isdn/capiutil.h> for the exact spelling. Member data 211represent. See <linux/isdn/capiutil.h> for the exact spelling. Member data
@@ -190,18 +217,19 @@ u16 for CAPI parameters of type 'word'
190 217
191u32 for CAPI parameters of type 'dword' 218u32 for CAPI parameters of type 'dword'
192 219
193_cstruct for CAPI parameters of type 'struct' not containing any 220_cstruct for CAPI parameters of type 'struct'
194 variably-sized (struct) subparameters (eg. 'Called Party Number')
195 The member is a pointer to a buffer containing the parameter in 221 The member is a pointer to a buffer containing the parameter in
196 CAPI encoding (length + content). It may also be NULL, which will 222 CAPI encoding (length + content). It may also be NULL, which will
197 be taken to represent an empty (zero length) parameter. 223 be taken to represent an empty (zero length) parameter.
224 Subparameters are stored in encoded form within the content part.
198 225
199_cmstruct for CAPI parameters of type 'struct' containing 'struct' 226_cmstruct alternative representation for CAPI parameters of type 'struct'
200 subparameters ('Additional Info' and 'B Protocol') 227 (used only for the 'Additional Info' and 'B Protocol' parameters)
201 The representation is a single byte containing one of the values: 228 The representation is a single byte containing one of the values:
202 CAPI_DEFAULT: the parameter is empty 229 CAPI_DEFAULT: The parameter is empty/absent.
203 CAPI_COMPOSE: the values of the subparameters are stored 230 CAPI_COMPOSE: The parameter is present.
204 individually in the corresponding _cmsg structure members 231 Subparameter values are stored individually in the corresponding
232 _cmsg structure members.
205 233
206Functions capi_cmsg2message() and capi_message2cmsg() are provided to convert 234Functions capi_cmsg2message() and capi_message2cmsg() are provided to convert
207messages between their transport encoding described in the CAPI 2.0 standard 235messages between their transport encoding described in the CAPI 2.0 standard
@@ -297,3 +325,26 @@ char *capi_cmd2str(u8 Command, u8 Subcommand)
297 be NULL if the command/subcommand is not one of those defined in the 325 be NULL if the command/subcommand is not one of those defined in the
298 CAPI 2.0 standard. 326 CAPI 2.0 standard.
299 327
328
3297. Debugging
330
331The module kernelcapi has a module parameter showcapimsgs controlling some
332debugging output produced by the module. It can only be set when the module is
333loaded, via a parameter "showcapimsgs=<n>" to the modprobe command, either on
334the command line or in the configuration file.
335
336If the lowest bit of showcapimsgs is set, kernelcapi logs controller and
337application up and down events.
338
339In addition, every registered CAPI controller has an associated traceflag
340parameter controlling how CAPI messages sent from and to tha controller are
341logged. The traceflag parameter is initialized with the value of the
342showcapimsgs parameter when the controller is registered, but can later be
343changed via the MANUFACTURER_REQ command KCAPI_CMD_TRACE.
344
345If the value of traceflag is non-zero, CAPI messages are logged.
346DATA_B3 messages are only logged if the value of traceflag is > 2.
347
348If the lowest bit of traceflag is set, only the command/subcommand and message
349length are logged. Otherwise, kernelcapi logs a readable representation of
350the entire message.
diff --git a/Documentation/i2c/chips/eeprom b/Documentation/misc-devices/eeprom
index f7e8104b5764..f7e8104b5764 100644
--- a/Documentation/i2c/chips/eeprom
+++ b/Documentation/misc-devices/eeprom
diff --git a/Documentation/i2c/chips/max6875 b/Documentation/misc-devices/max6875
index 10ca43cd1a72..1e89ee3ccc1b 100644
--- a/Documentation/i2c/chips/max6875
+++ b/Documentation/misc-devices/max6875
@@ -42,10 +42,12 @@ General Remarks
42 42
43Valid addresses for the MAX6875 are 0x50 and 0x52. 43Valid addresses for the MAX6875 are 0x50 and 0x52.
44Valid addresses for the MAX6874 are 0x50, 0x52, 0x54 and 0x56. 44Valid addresses for the MAX6874 are 0x50, 0x52, 0x54 and 0x56.
45The driver does not probe any address, so you must force the address. 45The driver does not probe any address, so you explicitly instantiate the
46devices.
46 47
47Example: 48Example:
48$ modprobe max6875 force=0,0x50 49$ modprobe max6875
50$ echo max6875 0x50 > /sys/bus/i2c/devices/i2c-0/new_device
49 51
50The MAX6874/MAX6875 ignores address bit 0, so this driver attaches to multiple 52The MAX6874/MAX6875 ignores address bit 0, so this driver attaches to multiple
51addresses. For example, for address 0x50, it also reserves 0x51. 53addresses. For example, for address 0x50, it also reserves 0x51.
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt
index c6cf4a3c16e0..61bb645d50e0 100644
--- a/Documentation/networking/pktgen.txt
+++ b/Documentation/networking/pktgen.txt
@@ -90,6 +90,11 @@ Examples:
90 pgset "dstmac 00:00:00:00:00:00" sets MAC destination address 90 pgset "dstmac 00:00:00:00:00:00" sets MAC destination address
91 pgset "srcmac 00:00:00:00:00:00" sets MAC source address 91 pgset "srcmac 00:00:00:00:00:00" sets MAC source address
92 92
93 pgset "queue_map_min 0" Sets the min value of tx queue interval
94 pgset "queue_map_max 7" Sets the max value of tx queue interval, for multiqueue devices
95 To select queue 1 of a given device,
96 use queue_map_min=1 and queue_map_max=1
97
93 pgset "src_mac_count 1" Sets the number of MACs we'll range through. 98 pgset "src_mac_count 1" Sets the number of MACs we'll range through.
94 The 'minimum' MAC is what you set with srcmac. 99 The 'minimum' MAC is what you set with srcmac.
95 100
@@ -101,6 +106,9 @@ Examples:
101 IPDST_RND, UDPSRC_RND, 106 IPDST_RND, UDPSRC_RND,
102 UDPDST_RND, MACSRC_RND, MACDST_RND 107 UDPDST_RND, MACSRC_RND, MACDST_RND
103 MPLS_RND, VID_RND, SVID_RND 108 MPLS_RND, VID_RND, SVID_RND
109 QUEUE_MAP_RND # queue map random
110 QUEUE_MAP_CPU # queue map mirrors smp_processor_id()
111
104 112
105 pgset "udp_src_min 9" set UDP source port min, If < udp_src_max, then 113 pgset "udp_src_min 9" set UDP source port min, If < udp_src_max, then
106 cycle through the port range. 114 cycle through the port range.
diff --git a/Documentation/networking/timestamping/timestamping.c b/Documentation/networking/timestamping/timestamping.c
index 43d143104210..a7936fe8444a 100644
--- a/Documentation/networking/timestamping/timestamping.c
+++ b/Documentation/networking/timestamping/timestamping.c
@@ -381,7 +381,7 @@ int main(int argc, char **argv)
381 memset(&hwtstamp, 0, sizeof(hwtstamp)); 381 memset(&hwtstamp, 0, sizeof(hwtstamp));
382 strncpy(hwtstamp.ifr_name, interface, sizeof(hwtstamp.ifr_name)); 382 strncpy(hwtstamp.ifr_name, interface, sizeof(hwtstamp.ifr_name));
383 hwtstamp.ifr_data = (void *)&hwconfig; 383 hwtstamp.ifr_data = (void *)&hwconfig;
384 memset(&hwconfig, 0, sizeof(&hwconfig)); 384 memset(&hwconfig, 0, sizeof(hwconfig));
385 hwconfig.tx_type = 385 hwconfig.tx_type =
386 (so_timestamping_flags & SOF_TIMESTAMPING_TX_HARDWARE) ? 386 (so_timestamping_flags & SOF_TIMESTAMPING_TX_HARDWARE) ?
387 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; 387 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index f1708b79f963..75fddb40f416 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -209,6 +209,7 @@ AD1884A / AD1883 / AD1984A / AD1984B
209 laptop laptop with HP jack sensing 209 laptop laptop with HP jack sensing
210 mobile mobile devices with HP jack sensing 210 mobile mobile devices with HP jack sensing
211 thinkpad Lenovo Thinkpad X300 211 thinkpad Lenovo Thinkpad X300
212 touchsmart HP Touchsmart
212 213
213AD1884 214AD1884
214====== 215======
diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt
index 72a22f65960e..262d8e6793a3 100644
--- a/Documentation/vm/ksm.txt
+++ b/Documentation/vm/ksm.txt
@@ -52,15 +52,15 @@ The KSM daemon is controlled by sysfs files in /sys/kernel/mm/ksm/,
52readable by all but writable only by root: 52readable by all but writable only by root:
53 53
54max_kernel_pages - set to maximum number of kernel pages that KSM may use 54max_kernel_pages - set to maximum number of kernel pages that KSM may use
55 e.g. "echo 2000 > /sys/kernel/mm/ksm/max_kernel_pages" 55 e.g. "echo 100000 > /sys/kernel/mm/ksm/max_kernel_pages"
56 Value 0 imposes no limit on the kernel pages KSM may use; 56 Value 0 imposes no limit on the kernel pages KSM may use;
57 but note that any process using MADV_MERGEABLE can cause 57 but note that any process using MADV_MERGEABLE can cause
58 KSM to allocate these pages, unswappable until it exits. 58 KSM to allocate these pages, unswappable until it exits.
59 Default: 2000 (chosen for demonstration purposes) 59 Default: quarter of memory (chosen to not pin too much)
60 60
61pages_to_scan - how many present pages to scan before ksmd goes to sleep 61pages_to_scan - how many present pages to scan before ksmd goes to sleep
62 e.g. "echo 200 > /sys/kernel/mm/ksm/pages_to_scan" 62 e.g. "echo 100 > /sys/kernel/mm/ksm/pages_to_scan"
63 Default: 200 (chosen for demonstration purposes) 63 Default: 100 (chosen for demonstration purposes)
64 64
65sleep_millisecs - how many milliseconds ksmd should sleep before next scan 65sleep_millisecs - how many milliseconds ksmd should sleep before next scan
66 e.g. "echo 20 > /sys/kernel/mm/ksm/sleep_millisecs" 66 e.g. "echo 20 > /sys/kernel/mm/ksm/sleep_millisecs"
@@ -70,7 +70,8 @@ run - set 0 to stop ksmd from running but keep merged pages,
70 set 1 to run ksmd e.g. "echo 1 > /sys/kernel/mm/ksm/run", 70 set 1 to run ksmd e.g. "echo 1 > /sys/kernel/mm/ksm/run",
71 set 2 to stop ksmd and unmerge all pages currently merged, 71 set 2 to stop ksmd and unmerge all pages currently merged,
72 but leave mergeable areas registered for next run 72 but leave mergeable areas registered for next run
73 Default: 1 (for immediate use by apps which register) 73 Default: 0 (must be changed to 1 to activate KSM,
74 except if CONFIG_SYSFS is disabled)
74 75
75The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/: 76The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/:
76 77
@@ -86,4 +87,4 @@ pages_volatile embraces several different kinds of activity, but a high
86proportion there would also indicate poor use of madvise MADV_MERGEABLE. 87proportion there would also indicate poor use of madvise MADV_MERGEABLE.
87 88
88Izik Eidus, 89Izik Eidus,
89Hugh Dickins, 30 July 2009 90Hugh Dickins, 24 Sept 2009
diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c
index fa1a30d9e9d5..3ec4f2a22585 100644
--- a/Documentation/vm/page-types.c
+++ b/Documentation/vm/page-types.c
@@ -2,7 +2,10 @@
2 * page-types: Tool for querying page flags 2 * page-types: Tool for querying page flags
3 * 3 *
4 * Copyright (C) 2009 Intel corporation 4 * Copyright (C) 2009 Intel corporation
5 * Copyright (C) 2009 Wu Fengguang <fengguang.wu@intel.com> 5 *
6 * Authors: Wu Fengguang <fengguang.wu@intel.com>
7 *
8 * Released under the General Public License (GPL).
6 */ 9 */
7 10
8#define _LARGEFILE64_SOURCE 11#define _LARGEFILE64_SOURCE
@@ -69,7 +72,9 @@
69#define KPF_COMPOUND_TAIL 16 72#define KPF_COMPOUND_TAIL 16
70#define KPF_HUGE 17 73#define KPF_HUGE 17
71#define KPF_UNEVICTABLE 18 74#define KPF_UNEVICTABLE 18
75#define KPF_HWPOISON 19
72#define KPF_NOPAGE 20 76#define KPF_NOPAGE 20
77#define KPF_KSM 21
73 78
74/* [32-] kernel hacking assistances */ 79/* [32-] kernel hacking assistances */
75#define KPF_RESERVED 32 80#define KPF_RESERVED 32
@@ -116,7 +121,9 @@ static char *page_flag_names[] = {
116 [KPF_COMPOUND_TAIL] = "T:compound_tail", 121 [KPF_COMPOUND_TAIL] = "T:compound_tail",
117 [KPF_HUGE] = "G:huge", 122 [KPF_HUGE] = "G:huge",
118 [KPF_UNEVICTABLE] = "u:unevictable", 123 [KPF_UNEVICTABLE] = "u:unevictable",
124 [KPF_HWPOISON] = "X:hwpoison",
119 [KPF_NOPAGE] = "n:nopage", 125 [KPF_NOPAGE] = "n:nopage",
126 [KPF_KSM] = "x:ksm",
120 127
121 [KPF_RESERVED] = "r:reserved", 128 [KPF_RESERVED] = "r:reserved",
122 [KPF_MLOCKED] = "m:mlocked", 129 [KPF_MLOCKED] = "m:mlocked",
@@ -152,9 +159,6 @@ static unsigned long opt_size[MAX_ADDR_RANGES];
152static int nr_vmas; 159static int nr_vmas;
153static unsigned long pg_start[MAX_VMAS]; 160static unsigned long pg_start[MAX_VMAS];
154static unsigned long pg_end[MAX_VMAS]; 161static unsigned long pg_end[MAX_VMAS];
155static unsigned long voffset;
156
157static int pagemap_fd;
158 162
159#define MAX_BIT_FILTERS 64 163#define MAX_BIT_FILTERS 64
160static int nr_bit_filters; 164static int nr_bit_filters;
@@ -163,9 +167,16 @@ static uint64_t opt_bits[MAX_BIT_FILTERS];
163 167
164static int page_size; 168static int page_size;
165 169
166#define PAGES_BATCH (64 << 10) /* 64k pages */ 170static int pagemap_fd;
167static int kpageflags_fd; 171static int kpageflags_fd;
168 172
173static int opt_hwpoison;
174static int opt_unpoison;
175
176static char *hwpoison_debug_fs = "/debug/hwpoison";
177static int hwpoison_inject_fd;
178static int hwpoison_forget_fd;
179
169#define HASH_SHIFT 13 180#define HASH_SHIFT 13
170#define HASH_SIZE (1 << HASH_SHIFT) 181#define HASH_SIZE (1 << HASH_SHIFT)
171#define HASH_MASK (HASH_SIZE - 1) 182#define HASH_MASK (HASH_SIZE - 1)
@@ -207,6 +218,74 @@ static void fatal(const char *x, ...)
207 exit(EXIT_FAILURE); 218 exit(EXIT_FAILURE);
208} 219}
209 220
221int checked_open(const char *pathname, int flags)
222{
223 int fd = open(pathname, flags);
224
225 if (fd < 0) {
226 perror(pathname);
227 exit(EXIT_FAILURE);
228 }
229
230 return fd;
231}
232
233/*
234 * pagemap/kpageflags routines
235 */
236
237static unsigned long do_u64_read(int fd, char *name,
238 uint64_t *buf,
239 unsigned long index,
240 unsigned long count)
241{
242 long bytes;
243
244 if (index > ULONG_MAX / 8)
245 fatal("index overflow: %lu\n", index);
246
247 if (lseek(fd, index * 8, SEEK_SET) < 0) {
248 perror(name);
249 exit(EXIT_FAILURE);
250 }
251
252 bytes = read(fd, buf, count * 8);
253 if (bytes < 0) {
254 perror(name);
255 exit(EXIT_FAILURE);
256 }
257 if (bytes % 8)
258 fatal("partial read: %lu bytes\n", bytes);
259
260 return bytes / 8;
261}
262
263static unsigned long kpageflags_read(uint64_t *buf,
264 unsigned long index,
265 unsigned long pages)
266{
267 return do_u64_read(kpageflags_fd, PROC_KPAGEFLAGS, buf, index, pages);
268}
269
270static unsigned long pagemap_read(uint64_t *buf,
271 unsigned long index,
272 unsigned long pages)
273{
274 return do_u64_read(pagemap_fd, "/proc/pid/pagemap", buf, index, pages);
275}
276
277static unsigned long pagemap_pfn(uint64_t val)
278{
279 unsigned long pfn;
280
281 if (val & PM_PRESENT)
282 pfn = PM_PFRAME(val);
283 else
284 pfn = 0;
285
286 return pfn;
287}
288
210 289
211/* 290/*
212 * page flag names 291 * page flag names
@@ -255,7 +334,8 @@ static char *page_flag_longname(uint64_t flags)
255 * page list and summary 334 * page list and summary
256 */ 335 */
257 336
258static void show_page_range(unsigned long offset, uint64_t flags) 337static void show_page_range(unsigned long voffset,
338 unsigned long offset, uint64_t flags)
259{ 339{
260 static uint64_t flags0; 340 static uint64_t flags0;
261 static unsigned long voff; 341 static unsigned long voff;
@@ -281,7 +361,8 @@ static void show_page_range(unsigned long offset, uint64_t flags)
281 count = 1; 361 count = 1;
282} 362}
283 363
284static void show_page(unsigned long offset, uint64_t flags) 364static void show_page(unsigned long voffset,
365 unsigned long offset, uint64_t flags)
285{ 366{
286 if (opt_pid) 367 if (opt_pid)
287 printf("%lx\t", voffset); 368 printf("%lx\t", voffset);
@@ -362,6 +443,62 @@ static uint64_t well_known_flags(uint64_t flags)
362 return flags; 443 return flags;
363} 444}
364 445
446static uint64_t kpageflags_flags(uint64_t flags)
447{
448 flags = expand_overloaded_flags(flags);
449
450 if (!opt_raw)
451 flags = well_known_flags(flags);
452
453 return flags;
454}
455
456/*
457 * page actions
458 */
459
460static void prepare_hwpoison_fd(void)
461{
462 char buf[100];
463
464 if (opt_hwpoison && !hwpoison_inject_fd) {
465 sprintf(buf, "%s/corrupt-pfn", hwpoison_debug_fs);
466 hwpoison_inject_fd = checked_open(buf, O_WRONLY);
467 }
468
469 if (opt_unpoison && !hwpoison_forget_fd) {
470 sprintf(buf, "%s/renew-pfn", hwpoison_debug_fs);
471 hwpoison_forget_fd = checked_open(buf, O_WRONLY);
472 }
473}
474
475static int hwpoison_page(unsigned long offset)
476{
477 char buf[100];
478 int len;
479
480 len = sprintf(buf, "0x%lx\n", offset);
481 len = write(hwpoison_inject_fd, buf, len);
482 if (len < 0) {
483 perror("hwpoison inject");
484 return len;
485 }
486 return 0;
487}
488
489static int unpoison_page(unsigned long offset)
490{
491 char buf[100];
492 int len;
493
494 len = sprintf(buf, "0x%lx\n", offset);
495 len = write(hwpoison_forget_fd, buf, len);
496 if (len < 0) {
497 perror("hwpoison forget");
498 return len;
499 }
500 return 0;
501}
365 502
366/* 503/*
367 * page frame walker 504 * page frame walker
@@ -394,104 +531,83 @@ static int hash_slot(uint64_t flags)
394 exit(EXIT_FAILURE); 531 exit(EXIT_FAILURE);
395} 532}
396 533
397static void add_page(unsigned long offset, uint64_t flags) 534static void add_page(unsigned long voffset,
535 unsigned long offset, uint64_t flags)
398{ 536{
399 flags = expand_overloaded_flags(flags); 537 flags = kpageflags_flags(flags);
400
401 if (!opt_raw)
402 flags = well_known_flags(flags);
403 538
404 if (!bit_mask_ok(flags)) 539 if (!bit_mask_ok(flags))
405 return; 540 return;
406 541
542 if (opt_hwpoison)
543 hwpoison_page(offset);
544 if (opt_unpoison)
545 unpoison_page(offset);
546
407 if (opt_list == 1) 547 if (opt_list == 1)
408 show_page_range(offset, flags); 548 show_page_range(voffset, offset, flags);
409 else if (opt_list == 2) 549 else if (opt_list == 2)
410 show_page(offset, flags); 550 show_page(voffset, offset, flags);
411 551
412 nr_pages[hash_slot(flags)]++; 552 nr_pages[hash_slot(flags)]++;
413 total_pages++; 553 total_pages++;
414} 554}
415 555
416static void walk_pfn(unsigned long index, unsigned long count) 556#define KPAGEFLAGS_BATCH (64 << 10) /* 64k pages */
557static void walk_pfn(unsigned long voffset,
558 unsigned long index,
559 unsigned long count)
417{ 560{
561 uint64_t buf[KPAGEFLAGS_BATCH];
418 unsigned long batch; 562 unsigned long batch;
419 unsigned long n; 563 unsigned long pages;
420 unsigned long i; 564 unsigned long i;
421 565
422 if (index > ULONG_MAX / KPF_BYTES)
423 fatal("index overflow: %lu\n", index);
424
425 lseek(kpageflags_fd, index * KPF_BYTES, SEEK_SET);
426
427 while (count) { 566 while (count) {
428 uint64_t kpageflags_buf[KPF_BYTES * PAGES_BATCH]; 567 batch = min_t(unsigned long, count, KPAGEFLAGS_BATCH);
429 568 pages = kpageflags_read(buf, index, batch);
430 batch = min_t(unsigned long, count, PAGES_BATCH); 569 if (pages == 0)
431 n = read(kpageflags_fd, kpageflags_buf, batch * KPF_BYTES);
432 if (n == 0)
433 break; 570 break;
434 if (n < 0) {
435 perror(PROC_KPAGEFLAGS);
436 exit(EXIT_FAILURE);
437 }
438 571
439 if (n % KPF_BYTES != 0) 572 for (i = 0; i < pages; i++)
440 fatal("partial read: %lu bytes\n", n); 573 add_page(voffset + i, index + i, buf[i]);
441 n = n / KPF_BYTES;
442 574
443 for (i = 0; i < n; i++) 575 index += pages;
444 add_page(index + i, kpageflags_buf[i]); 576 count -= pages;
445
446 index += batch;
447 count -= batch;
448 } 577 }
449} 578}
450 579
451 580#define PAGEMAP_BATCH (64 << 10)
452#define PAGEMAP_BATCH 4096 581static void walk_vma(unsigned long index, unsigned long count)
453static unsigned long task_pfn(unsigned long pgoff)
454{ 582{
455 static uint64_t buf[PAGEMAP_BATCH]; 583 uint64_t buf[PAGEMAP_BATCH];
456 static unsigned long start; 584 unsigned long batch;
457 static long count; 585 unsigned long pages;
458 uint64_t pfn; 586 unsigned long pfn;
587 unsigned long i;
459 588
460 if (pgoff < start || pgoff >= start + count) { 589 while (count) {
461 if (lseek64(pagemap_fd, 590 batch = min_t(unsigned long, count, PAGEMAP_BATCH);
462 (uint64_t)pgoff * PM_ENTRY_BYTES, 591 pages = pagemap_read(buf, index, batch);
463 SEEK_SET) < 0) { 592 if (pages == 0)
464 perror("pagemap seek"); 593 break;
465 exit(EXIT_FAILURE);
466 }
467 count = read(pagemap_fd, buf, sizeof(buf));
468 if (count == 0)
469 return 0;
470 if (count < 0) {
471 perror("pagemap read");
472 exit(EXIT_FAILURE);
473 }
474 if (count % PM_ENTRY_BYTES) {
475 fatal("pagemap read not aligned.\n");
476 exit(EXIT_FAILURE);
477 }
478 count /= PM_ENTRY_BYTES;
479 start = pgoff;
480 }
481 594
482 pfn = buf[pgoff - start]; 595 for (i = 0; i < pages; i++) {
483 if (pfn & PM_PRESENT) 596 pfn = pagemap_pfn(buf[i]);
484 pfn = PM_PFRAME(pfn); 597 if (pfn)
485 else 598 walk_pfn(index + i, pfn, 1);
486 pfn = 0; 599 }
487 600
488 return pfn; 601 index += pages;
602 count -= pages;
603 }
489} 604}
490 605
491static void walk_task(unsigned long index, unsigned long count) 606static void walk_task(unsigned long index, unsigned long count)
492{ 607{
493 int i = 0;
494 const unsigned long end = index + count; 608 const unsigned long end = index + count;
609 unsigned long start;
610 int i = 0;
495 611
496 while (index < end) { 612 while (index < end) {
497 613
@@ -501,15 +617,11 @@ static void walk_task(unsigned long index, unsigned long count)
501 if (pg_start[i] >= end) 617 if (pg_start[i] >= end)
502 return; 618 return;
503 619
504 voffset = max_t(unsigned long, pg_start[i], index); 620 start = max_t(unsigned long, pg_start[i], index);
505 index = min_t(unsigned long, pg_end[i], end); 621 index = min_t(unsigned long, pg_end[i], end);
506 622
507 assert(voffset < index); 623 assert(start < index);
508 for (; voffset < index; voffset++) { 624 walk_vma(start, index - start);
509 unsigned long pfn = task_pfn(voffset);
510 if (pfn)
511 walk_pfn(pfn, 1);
512 }
513 } 625 }
514} 626}
515 627
@@ -527,18 +639,14 @@ static void walk_addr_ranges(void)
527{ 639{
528 int i; 640 int i;
529 641
530 kpageflags_fd = open(PROC_KPAGEFLAGS, O_RDONLY); 642 kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY);
531 if (kpageflags_fd < 0) {
532 perror(PROC_KPAGEFLAGS);
533 exit(EXIT_FAILURE);
534 }
535 643
536 if (!nr_addr_ranges) 644 if (!nr_addr_ranges)
537 add_addr_range(0, ULONG_MAX); 645 add_addr_range(0, ULONG_MAX);
538 646
539 for (i = 0; i < nr_addr_ranges; i++) 647 for (i = 0; i < nr_addr_ranges; i++)
540 if (!opt_pid) 648 if (!opt_pid)
541 walk_pfn(opt_offset[i], opt_size[i]); 649 walk_pfn(0, opt_offset[i], opt_size[i]);
542 else 650 else
543 walk_task(opt_offset[i], opt_size[i]); 651 walk_task(opt_offset[i], opt_size[i]);
544 652
@@ -575,6 +683,8 @@ static void usage(void)
575" -l|--list Show page details in ranges\n" 683" -l|--list Show page details in ranges\n"
576" -L|--list-each Show page details one by one\n" 684" -L|--list-each Show page details one by one\n"
577" -N|--no-summary Don't show summay info\n" 685" -N|--no-summary Don't show summay info\n"
686" -X|--hwpoison hwpoison pages\n"
687" -x|--unpoison unpoison pages\n"
578" -h|--help Show this usage message\n" 688" -h|--help Show this usage message\n"
579"addr-spec:\n" 689"addr-spec:\n"
580" N one page at offset N (unit: pages)\n" 690" N one page at offset N (unit: pages)\n"
@@ -624,11 +734,7 @@ static void parse_pid(const char *str)
624 opt_pid = parse_number(str); 734 opt_pid = parse_number(str);
625 735
626 sprintf(buf, "/proc/%d/pagemap", opt_pid); 736 sprintf(buf, "/proc/%d/pagemap", opt_pid);
627 pagemap_fd = open(buf, O_RDONLY); 737 pagemap_fd = checked_open(buf, O_RDONLY);
628 if (pagemap_fd < 0) {
629 perror(buf);
630 exit(EXIT_FAILURE);
631 }
632 738
633 sprintf(buf, "/proc/%d/maps", opt_pid); 739 sprintf(buf, "/proc/%d/maps", opt_pid);
634 file = fopen(buf, "r"); 740 file = fopen(buf, "r");
@@ -788,6 +894,8 @@ static struct option opts[] = {
788 { "list" , 0, NULL, 'l' }, 894 { "list" , 0, NULL, 'l' },
789 { "list-each" , 0, NULL, 'L' }, 895 { "list-each" , 0, NULL, 'L' },
790 { "no-summary", 0, NULL, 'N' }, 896 { "no-summary", 0, NULL, 'N' },
897 { "hwpoison" , 0, NULL, 'X' },
898 { "unpoison" , 0, NULL, 'x' },
791 { "help" , 0, NULL, 'h' }, 899 { "help" , 0, NULL, 'h' },
792 { NULL , 0, NULL, 0 } 900 { NULL , 0, NULL, 0 }
793}; 901};
@@ -799,7 +907,7 @@ int main(int argc, char *argv[])
799 page_size = getpagesize(); 907 page_size = getpagesize();
800 908
801 while ((c = getopt_long(argc, argv, 909 while ((c = getopt_long(argc, argv,
802 "rp:f:a:b:lLNh", opts, NULL)) != -1) { 910 "rp:f:a:b:lLNXxh", opts, NULL)) != -1) {
803 switch (c) { 911 switch (c) {
804 case 'r': 912 case 'r':
805 opt_raw = 1; 913 opt_raw = 1;
@@ -825,6 +933,14 @@ int main(int argc, char *argv[])
825 case 'N': 933 case 'N':
826 opt_no_summary = 1; 934 opt_no_summary = 1;
827 break; 935 break;
936 case 'X':
937 opt_hwpoison = 1;
938 prepare_hwpoison_fd();
939 break;
940 case 'x':
941 opt_unpoison = 1;
942 prepare_hwpoison_fd();
943 break;
828 case 'h': 944 case 'h':
829 usage(); 945 usage();
830 exit(0); 946 exit(0);
@@ -844,7 +960,7 @@ int main(int argc, char *argv[])
844 walk_addr_ranges(); 960 walk_addr_ranges();
845 961
846 if (opt_list == 1) 962 if (opt_list == 1)
847 show_page_range(0, 0); /* drain the buffer */ 963 show_page_range(0, 0, 0); /* drain the buffer */
848 964
849 if (opt_no_summary) 965 if (opt_no_summary)
850 return 0; 966 return 0;
diff --git a/Documentation/vm/pagemap.txt b/Documentation/vm/pagemap.txt
index 600a304a828c..df09b9650a81 100644
--- a/Documentation/vm/pagemap.txt
+++ b/Documentation/vm/pagemap.txt
@@ -57,7 +57,9 @@ There are three components to pagemap:
57 16. COMPOUND_TAIL 57 16. COMPOUND_TAIL
58 16. HUGE 58 16. HUGE
59 18. UNEVICTABLE 59 18. UNEVICTABLE
60 19. HWPOISON
60 20. NOPAGE 61 20. NOPAGE
62 21. KSM
61 63
62Short descriptions to the page flags: 64Short descriptions to the page flags:
63 65
@@ -86,9 +88,15 @@ Short descriptions to the page flags:
8617. HUGE 8817. HUGE
87 this is an integral part of a HugeTLB page 89 this is an integral part of a HugeTLB page
88 90
9119. HWPOISON
92 hardware detected memory corruption on this page: don't touch the data!
93
8920. NOPAGE 9420. NOPAGE
90 no page frame exists at the requested address 95 no page frame exists at the requested address
91 96
9721. KSM
98 identical memory pages dynamically shared between one or more processes
99
92 [IO related page flags] 100 [IO related page flags]
93 1. ERROR IO error occurred 101 1. ERROR IO error occurred
94 3. UPTODATE page has up-to-date data 102 3. UPTODATE page has up-to-date data
diff --git a/Documentation/w1/masters/ds2482 b/Documentation/w1/masters/ds2482
index 9210d6fa5024..299b91c7609f 100644
--- a/Documentation/w1/masters/ds2482
+++ b/Documentation/w1/masters/ds2482
@@ -24,8 +24,8 @@ General Remarks
24 24
25Valid addresses are 0x18, 0x19, 0x1a, and 0x1b. 25Valid addresses are 0x18, 0x19, 0x1a, and 0x1b.
26However, the device cannot be detected without writing to the i2c bus, so no 26However, the device cannot be detected without writing to the i2c bus, so no
27detection is done. 27detection is done. You should instantiate the device explicitly.
28You should force the device address.
29 28
30$ modprobe ds2482 force=0,0x18 29$ modprobe ds2482
30$ echo ds2482 0x18 > /sys/bus/i2c/devices/i2c-0/new_device
31 31
diff --git a/MAINTAINERS b/MAINTAINERS
index c450f3abb8c9..e1da925b38c8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -257,6 +257,13 @@ W: http://www.lesswatts.org/projects/acpi/
257S: Supported 257S: Supported
258F: drivers/acpi/fan.c 258F: drivers/acpi/fan.c
259 259
260ACPI PROCESSOR AGGREGATOR DRIVER
261M: Shaohua Li <shaohua.li@intel.com>
262L: linux-acpi@vger.kernel.org
263W: http://www.lesswatts.org/projects/acpi/
264S: Supported
265F: drivers/acpi/acpi_pad.c
266
260ACPI THERMAL DRIVER 267ACPI THERMAL DRIVER
261M: Zhang Rui <rui.zhang@intel.com> 268M: Zhang Rui <rui.zhang@intel.com>
262L: linux-acpi@vger.kernel.org 269L: linux-acpi@vger.kernel.org
@@ -646,24 +653,24 @@ ARM/INTEL IOP32X ARM ARCHITECTURE
646M: Lennert Buytenhek <kernel@wantstofly.org> 653M: Lennert Buytenhek <kernel@wantstofly.org>
647M: Dan Williams <dan.j.williams@intel.com> 654M: Dan Williams <dan.j.williams@intel.com>
648L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 655L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
649S: Supported 656S: Maintained
650 657
651ARM/INTEL IOP33X ARM ARCHITECTURE 658ARM/INTEL IOP33X ARM ARCHITECTURE
652M: Dan Williams <dan.j.williams@intel.com> 659M: Dan Williams <dan.j.williams@intel.com>
653L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 660L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
654S: Supported 661S: Maintained
655 662
656ARM/INTEL IOP13XX ARM ARCHITECTURE 663ARM/INTEL IOP13XX ARM ARCHITECTURE
657M: Lennert Buytenhek <kernel@wantstofly.org> 664M: Lennert Buytenhek <kernel@wantstofly.org>
658M: Dan Williams <dan.j.williams@intel.com> 665M: Dan Williams <dan.j.williams@intel.com>
659L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 666L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
660S: Supported 667S: Maintained
661 668
662ARM/INTEL IQ81342EX MACHINE SUPPORT 669ARM/INTEL IQ81342EX MACHINE SUPPORT
663M: Lennert Buytenhek <kernel@wantstofly.org> 670M: Lennert Buytenhek <kernel@wantstofly.org>
664M: Dan Williams <dan.j.williams@intel.com> 671M: Dan Williams <dan.j.williams@intel.com>
665L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 672L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
666S: Supported 673S: Maintained
667 674
668ARM/INTEL IXP2000 ARM ARCHITECTURE 675ARM/INTEL IXP2000 ARM ARCHITECTURE
669M: Lennert Buytenhek <kernel@wantstofly.org> 676M: Lennert Buytenhek <kernel@wantstofly.org>
@@ -691,7 +698,7 @@ ARM/INTEL XSC3 (MANZANO) ARM CORE
691M: Lennert Buytenhek <kernel@wantstofly.org> 698M: Lennert Buytenhek <kernel@wantstofly.org>
692M: Dan Williams <dan.j.williams@intel.com> 699M: Dan Williams <dan.j.williams@intel.com>
693L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 700L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
694S: Supported 701S: Maintained
695 702
696ARM/IP FABRICS DOUBLE ESPRESSO MACHINE SUPPORT 703ARM/IP FABRICS DOUBLE ESPRESSO MACHINE SUPPORT
697M: Lennert Buytenhek <kernel@wantstofly.org> 704M: Lennert Buytenhek <kernel@wantstofly.org>
@@ -741,23 +748,36 @@ M: Dirk Opfer <dirk@opfer-online.de>
741S: Maintained 748S: Maintained
742 749
743ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT 750ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT
744P: Marek Vasut 751M: Marek Vasut <marek.vasut@gmail.com>
745M: marek.vasut@gmail.com
746L: linux-arm-kernel@lists.infradead.org 752L: linux-arm-kernel@lists.infradead.org
747W: http://hackndev.com 753W: http://hackndev.com
748S: Maintained 754S: Maintained
755F: arch/arm/mach-pxa/include/mach/palmtx.h
756F: arch/arm/mach-pxa/palmtx.c
757F: arch/arm/mach-pxa/include/mach/palmt5.h
758F: arch/arm/mach-pxa/palmt5.c
759F: arch/arm/mach-pxa/include/mach/palmld.h
760F: arch/arm/mach-pxa/palmld.c
761F: arch/arm/mach-pxa/include/mach/palmte2.h
762F: arch/arm/mach-pxa/palmte2.c
763F: arch/arm/mach-pxa/include/mach/palmtc.h
764F: arch/arm/mach-pxa/palmtc.c
749 765
750ARM/PALM TREO 680 SUPPORT 766ARM/PALM TREO 680 SUPPORT
751M: Tomas Cech <sleep_walker@suse.cz> 767M: Tomas Cech <sleep_walker@suse.cz>
752L: linux-arm-kernel@lists.infradead.org 768L: linux-arm-kernel@lists.infradead.org
753W: http://hackndev.com 769W: http://hackndev.com
754S: Maintained 770S: Maintained
771F: arch/arm/mach-pxa/include/mach/treo680.h
772F: arch/arm/mach-pxa/treo680.c
755 773
756ARM/PALMZ72 SUPPORT 774ARM/PALMZ72 SUPPORT
757M: Sergey Lapin <slapin@ossfans.org> 775M: Sergey Lapin <slapin@ossfans.org>
758L: linux-arm-kernel@lists.infradead.org 776L: linux-arm-kernel@lists.infradead.org
759W: http://hackndev.com 777W: http://hackndev.com
760S: Maintained 778S: Maintained
779F: arch/arm/mach-pxa/include/mach/palmz72.h
780F: arch/arm/mach-pxa/palmz72.c
761 781
762ARM/PLEB SUPPORT 782ARM/PLEB SUPPORT
763M: Peter Chubb <pleb@gelato.unsw.edu.au> 783M: Peter Chubb <pleb@gelato.unsw.edu.au>
@@ -2682,7 +2702,7 @@ F: include/linux/intel-iommu.h
2682 2702
2683INTEL IOP-ADMA DMA DRIVER 2703INTEL IOP-ADMA DMA DRIVER
2684M: Dan Williams <dan.j.williams@intel.com> 2704M: Dan Williams <dan.j.williams@intel.com>
2685S: Supported 2705S: Maintained
2686F: drivers/dma/iop-adma.c 2706F: drivers/dma/iop-adma.c
2687 2707
2688INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT 2708INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
@@ -3623,6 +3643,13 @@ F: Documentation/blockdev/nbd.txt
3623F: drivers/block/nbd.c 3643F: drivers/block/nbd.c
3624F: include/linux/nbd.h 3644F: include/linux/nbd.h
3625 3645
3646NETWORK DROP MONITOR
3647M: Neil Horman <nhorman@tuxdriver.com>
3648L: netdev@vger.kernel.org
3649S: Maintained
3650W: https://fedorahosted.org/dropwatch/
3651F: net/core/drop_monitor.c
3652
3626NETWORKING [GENERAL] 3653NETWORKING [GENERAL]
3627M: "David S. Miller" <davem@davemloft.net> 3654M: "David S. Miller" <davem@davemloft.net>
3628L: netdev@vger.kernel.org 3655L: netdev@vger.kernel.org
@@ -3953,6 +3980,7 @@ F: drivers/block/paride/
3953PARISC ARCHITECTURE 3980PARISC ARCHITECTURE
3954M: Kyle McMartin <kyle@mcmartin.ca> 3981M: Kyle McMartin <kyle@mcmartin.ca>
3955M: Helge Deller <deller@gmx.de> 3982M: Helge Deller <deller@gmx.de>
3983M: "James E.J. Bottomley" <jejb@parisc-linux.org>
3956L: linux-parisc@vger.kernel.org 3984L: linux-parisc@vger.kernel.org
3957W: http://www.parisc-linux.org/ 3985W: http://www.parisc-linux.org/
3958T: git git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6.git 3986T: git git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6.git
diff --git a/Makefile b/Makefile
index 00444a8e304f..e50569ab5fe8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 32 3SUBLEVEL = 32
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc3
5NAME = Man-Eating Seals of Antiquity 5NAME = Man-Eating Seals of Antiquity
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index ef12794c3c68..8ba7044c554d 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -1032,6 +1032,7 @@ unsigned int sa1111_pll_clock(struct sa1111_dev *sadev)
1032 1032
1033 return __sa1111_pll_clock(sachip); 1033 return __sa1111_pll_clock(sachip);
1034} 1034}
1035EXPORT_SYMBOL(sa1111_pll_clock);
1035 1036
1036/** 1037/**
1037 * sa1111_select_audio_mode - select I2S or AC link mode 1038 * sa1111_select_audio_mode - select I2S or AC link mode
@@ -1059,6 +1060,7 @@ void sa1111_select_audio_mode(struct sa1111_dev *sadev, int mode)
1059 1060
1060 spin_unlock_irqrestore(&sachip->lock, flags); 1061 spin_unlock_irqrestore(&sachip->lock, flags);
1061} 1062}
1063EXPORT_SYMBOL(sa1111_select_audio_mode);
1062 1064
1063/** 1065/**
1064 * sa1111_set_audio_rate - set the audio sample rate 1066 * sa1111_set_audio_rate - set the audio sample rate
@@ -1083,6 +1085,7 @@ int sa1111_set_audio_rate(struct sa1111_dev *sadev, int rate)
1083 1085
1084 return 0; 1086 return 0;
1085} 1087}
1088EXPORT_SYMBOL(sa1111_set_audio_rate);
1086 1089
1087/** 1090/**
1088 * sa1111_get_audio_rate - get the audio sample rate 1091 * sa1111_get_audio_rate - get the audio sample rate
@@ -1100,6 +1103,7 @@ int sa1111_get_audio_rate(struct sa1111_dev *sadev)
1100 1103
1101 return __sa1111_pll_clock(sachip) / (256 * div); 1104 return __sa1111_pll_clock(sachip) / (256 * div);
1102} 1105}
1106EXPORT_SYMBOL(sa1111_get_audio_rate);
1103 1107
1104void sa1111_set_io_dir(struct sa1111_dev *sadev, 1108void sa1111_set_io_dir(struct sa1111_dev *sadev,
1105 unsigned int bits, unsigned int dir, 1109 unsigned int bits, unsigned int dir,
@@ -1128,6 +1132,7 @@ void sa1111_set_io_dir(struct sa1111_dev *sadev,
1128 MODIFY_BITS(gpio + SA1111_GPIO_PCSDR, (bits >> 16) & 255, sleep_dir >> 16); 1132 MODIFY_BITS(gpio + SA1111_GPIO_PCSDR, (bits >> 16) & 255, sleep_dir >> 16);
1129 spin_unlock_irqrestore(&sachip->lock, flags); 1133 spin_unlock_irqrestore(&sachip->lock, flags);
1130} 1134}
1135EXPORT_SYMBOL(sa1111_set_io_dir);
1131 1136
1132void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v) 1137void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v)
1133{ 1138{
@@ -1142,6 +1147,7 @@ void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v)
1142 MODIFY_BITS(gpio + SA1111_GPIO_PCDWR, (bits >> 16) & 255, v >> 16); 1147 MODIFY_BITS(gpio + SA1111_GPIO_PCDWR, (bits >> 16) & 255, v >> 16);
1143 spin_unlock_irqrestore(&sachip->lock, flags); 1148 spin_unlock_irqrestore(&sachip->lock, flags);
1144} 1149}
1150EXPORT_SYMBOL(sa1111_set_io);
1145 1151
1146void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v) 1152void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v)
1147{ 1153{
@@ -1156,6 +1162,7 @@ void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned i
1156 MODIFY_BITS(gpio + SA1111_GPIO_PCSSR, (bits >> 16) & 255, v >> 16); 1162 MODIFY_BITS(gpio + SA1111_GPIO_PCSSR, (bits >> 16) & 255, v >> 16);
1157 spin_unlock_irqrestore(&sachip->lock, flags); 1163 spin_unlock_irqrestore(&sachip->lock, flags);
1158} 1164}
1165EXPORT_SYMBOL(sa1111_set_sleep_io);
1159 1166
1160/* 1167/*
1161 * Individual device operations. 1168 * Individual device operations.
@@ -1176,6 +1183,7 @@ void sa1111_enable_device(struct sa1111_dev *sadev)
1176 sa1111_writel(val | sadev->skpcr_mask, sachip->base + SA1111_SKPCR); 1183 sa1111_writel(val | sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
1177 spin_unlock_irqrestore(&sachip->lock, flags); 1184 spin_unlock_irqrestore(&sachip->lock, flags);
1178} 1185}
1186EXPORT_SYMBOL(sa1111_enable_device);
1179 1187
1180/** 1188/**
1181 * sa1111_disable_device - disable an on-chip SA1111 function block 1189 * sa1111_disable_device - disable an on-chip SA1111 function block
@@ -1192,6 +1200,7 @@ void sa1111_disable_device(struct sa1111_dev *sadev)
1192 sa1111_writel(val & ~sadev->skpcr_mask, sachip->base + SA1111_SKPCR); 1200 sa1111_writel(val & ~sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
1193 spin_unlock_irqrestore(&sachip->lock, flags); 1201 spin_unlock_irqrestore(&sachip->lock, flags);
1194} 1202}
1203EXPORT_SYMBOL(sa1111_disable_device);
1195 1204
1196/* 1205/*
1197 * SA1111 "Register Access Bus." 1206 * SA1111 "Register Access Bus."
@@ -1259,17 +1268,20 @@ struct bus_type sa1111_bus_type = {
1259 .suspend = sa1111_bus_suspend, 1268 .suspend = sa1111_bus_suspend,
1260 .resume = sa1111_bus_resume, 1269 .resume = sa1111_bus_resume,
1261}; 1270};
1271EXPORT_SYMBOL(sa1111_bus_type);
1262 1272
1263int sa1111_driver_register(struct sa1111_driver *driver) 1273int sa1111_driver_register(struct sa1111_driver *driver)
1264{ 1274{
1265 driver->drv.bus = &sa1111_bus_type; 1275 driver->drv.bus = &sa1111_bus_type;
1266 return driver_register(&driver->drv); 1276 return driver_register(&driver->drv);
1267} 1277}
1278EXPORT_SYMBOL(sa1111_driver_register);
1268 1279
1269void sa1111_driver_unregister(struct sa1111_driver *driver) 1280void sa1111_driver_unregister(struct sa1111_driver *driver)
1270{ 1281{
1271 driver_unregister(&driver->drv); 1282 driver_unregister(&driver->drv);
1272} 1283}
1284EXPORT_SYMBOL(sa1111_driver_unregister);
1273 1285
1274static int __init sa1111_init(void) 1286static int __init sa1111_init(void)
1275{ 1287{
@@ -1290,16 +1302,3 @@ module_exit(sa1111_exit);
1290 1302
1291MODULE_DESCRIPTION("Intel Corporation SA1111 core driver"); 1303MODULE_DESCRIPTION("Intel Corporation SA1111 core driver");
1292MODULE_LICENSE("GPL"); 1304MODULE_LICENSE("GPL");
1293
1294EXPORT_SYMBOL(sa1111_select_audio_mode);
1295EXPORT_SYMBOL(sa1111_set_audio_rate);
1296EXPORT_SYMBOL(sa1111_get_audio_rate);
1297EXPORT_SYMBOL(sa1111_set_io_dir);
1298EXPORT_SYMBOL(sa1111_set_io);
1299EXPORT_SYMBOL(sa1111_set_sleep_io);
1300EXPORT_SYMBOL(sa1111_enable_device);
1301EXPORT_SYMBOL(sa1111_disable_device);
1302EXPORT_SYMBOL(sa1111_pll_clock);
1303EXPORT_SYMBOL(sa1111_bus_type);
1304EXPORT_SYMBOL(sa1111_driver_register);
1305EXPORT_SYMBOL(sa1111_driver_unregister);
diff --git a/arch/arm/configs/h3600_defconfig b/arch/arm/configs/h3600_defconfig
index 1502957db2c3..f6aed7747d4d 100644
--- a/arch/arm/configs/h3600_defconfig
+++ b/arch/arm/configs/h3600_defconfig
@@ -90,7 +90,6 @@ CONFIG_ARCH_SA1100=y
90# CONFIG_SA1100_COLLIE is not set 90# CONFIG_SA1100_COLLIE is not set
91# CONFIG_SA1100_H3100 is not set 91# CONFIG_SA1100_H3100 is not set
92CONFIG_SA1100_H3600=y 92CONFIG_SA1100_H3600=y
93CONFIG_SA1100_H3XXX=y
94# CONFIG_SA1100_BADGE4 is not set 93# CONFIG_SA1100_BADGE4 is not set
95# CONFIG_SA1100_JORNADA720 is not set 94# CONFIG_SA1100_JORNADA720 is not set
96# CONFIG_SA1100_HACKKIT is not set 95# CONFIG_SA1100_HACKKIT is not set
diff --git a/arch/arm/configs/iop33x_defconfig b/arch/arm/configs/iop33x_defconfig
index eec488298267..ed2d59d01829 100644
--- a/arch/arm/configs/iop33x_defconfig
+++ b/arch/arm/configs/iop33x_defconfig
@@ -1,29 +1,26 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.24-rc5 3# Linux kernel version: 2.6.31-rc6
4# Wed Dec 12 16:11:27 2007 4# Tue Aug 18 13:41:41 2009
5# 5#
6CONFIG_ARM=y 6CONFIG_ARM=y
7CONFIG_SYS_SUPPORTS_APM_EMULATION=y 7CONFIG_SYS_SUPPORTS_APM_EMULATION=y
8# CONFIG_GENERIC_GPIO is not set 8CONFIG_GENERIC_GPIO=y
9# CONFIG_GENERIC_TIME is not set
10# CONFIG_GENERIC_CLOCKEVENTS is not set
11CONFIG_MMU=y 9CONFIG_MMU=y
12# CONFIG_NO_IOPORT is not set
13CONFIG_GENERIC_HARDIRQS=y 10CONFIG_GENERIC_HARDIRQS=y
14CONFIG_STACKTRACE_SUPPORT=y 11CONFIG_STACKTRACE_SUPPORT=y
12CONFIG_HAVE_LATENCYTOP_SUPPORT=y
15CONFIG_LOCKDEP_SUPPORT=y 13CONFIG_LOCKDEP_SUPPORT=y
16CONFIG_TRACE_IRQFLAGS_SUPPORT=y 14CONFIG_TRACE_IRQFLAGS_SUPPORT=y
17CONFIG_HARDIRQS_SW_RESEND=y 15CONFIG_HARDIRQS_SW_RESEND=y
18CONFIG_GENERIC_IRQ_PROBE=y 16CONFIG_GENERIC_IRQ_PROBE=y
19CONFIG_RWSEM_GENERIC_SPINLOCK=y 17CONFIG_RWSEM_GENERIC_SPINLOCK=y
20# CONFIG_ARCH_HAS_ILOG2_U32 is not set
21# CONFIG_ARCH_HAS_ILOG2_U64 is not set
22CONFIG_GENERIC_HWEIGHT=y 18CONFIG_GENERIC_HWEIGHT=y
23CONFIG_GENERIC_CALIBRATE_DELAY=y 19CONFIG_GENERIC_CALIBRATE_DELAY=y
24CONFIG_ZONE_DMA=y 20CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
25CONFIG_VECTORS_BASE=0xffff0000 21CONFIG_VECTORS_BASE=0xffff0000
26CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 22CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
23CONFIG_CONSTRUCTORS=y
27 24
28# 25#
29# General setup 26# General setup
@@ -40,21 +37,39 @@ CONFIG_SYSVIPC_SYSCTL=y
40CONFIG_BSD_PROCESS_ACCT=y 37CONFIG_BSD_PROCESS_ACCT=y
41# CONFIG_BSD_PROCESS_ACCT_V3 is not set 38# CONFIG_BSD_PROCESS_ACCT_V3 is not set
42# CONFIG_TASKSTATS is not set 39# CONFIG_TASKSTATS is not set
43# CONFIG_USER_NS is not set
44# CONFIG_PID_NS is not set
45# CONFIG_AUDIT is not set 40# CONFIG_AUDIT is not set
41
42#
43# RCU Subsystem
44#
45# CONFIG_CLASSIC_RCU is not set
46CONFIG_TREE_RCU=y
47# CONFIG_PREEMPT_RCU is not set
48# CONFIG_RCU_TRACE is not set
49CONFIG_RCU_FANOUT=32
50# CONFIG_RCU_FANOUT_EXACT is not set
51# CONFIG_TREE_RCU_TRACE is not set
52# CONFIG_PREEMPT_RCU_TRACE is not set
46# CONFIG_IKCONFIG is not set 53# CONFIG_IKCONFIG is not set
47CONFIG_LOG_BUF_SHIFT=14 54CONFIG_LOG_BUF_SHIFT=14
55# CONFIG_GROUP_SCHED is not set
48# CONFIG_CGROUPS is not set 56# CONFIG_CGROUPS is not set
49CONFIG_FAIR_GROUP_SCHED=y 57# CONFIG_SYSFS_DEPRECATED_V2 is not set
50CONFIG_FAIR_USER_SCHED=y
51# CONFIG_FAIR_CGROUP_SCHED is not set
52CONFIG_SYSFS_DEPRECATED=y
53# CONFIG_RELAY is not set 58# CONFIG_RELAY is not set
59CONFIG_NAMESPACES=y
60# CONFIG_UTS_NS is not set
61# CONFIG_IPC_NS is not set
62# CONFIG_USER_NS is not set
63# CONFIG_PID_NS is not set
64# CONFIG_NET_NS is not set
54CONFIG_BLK_DEV_INITRD=y 65CONFIG_BLK_DEV_INITRD=y
55CONFIG_INITRAMFS_SOURCE="" 66CONFIG_INITRAMFS_SOURCE=""
67CONFIG_RD_GZIP=y
68CONFIG_RD_BZIP2=y
69CONFIG_RD_LZMA=y
56CONFIG_CC_OPTIMIZE_FOR_SIZE=y 70CONFIG_CC_OPTIMIZE_FOR_SIZE=y
57CONFIG_SYSCTL=y 71CONFIG_SYSCTL=y
72CONFIG_ANON_INODES=y
58# CONFIG_EMBEDDED is not set 73# CONFIG_EMBEDDED is not set
59CONFIG_UID16=y 74CONFIG_UID16=y
60CONFIG_SYSCTL_SYSCALL=y 75CONFIG_SYSCTL_SYSCALL=y
@@ -67,29 +82,48 @@ CONFIG_BUG=y
67CONFIG_ELF_CORE=y 82CONFIG_ELF_CORE=y
68CONFIG_BASE_FULL=y 83CONFIG_BASE_FULL=y
69CONFIG_FUTEX=y 84CONFIG_FUTEX=y
70CONFIG_ANON_INODES=y
71CONFIG_EPOLL=y 85CONFIG_EPOLL=y
72CONFIG_SIGNALFD=y 86CONFIG_SIGNALFD=y
87CONFIG_TIMERFD=y
73CONFIG_EVENTFD=y 88CONFIG_EVENTFD=y
74CONFIG_SHMEM=y 89CONFIG_SHMEM=y
90CONFIG_AIO=y
91
92#
93# Performance Counters
94#
75CONFIG_VM_EVENT_COUNTERS=y 95CONFIG_VM_EVENT_COUNTERS=y
96CONFIG_PCI_QUIRKS=y
97# CONFIG_STRIP_ASM_SYMS is not set
98CONFIG_COMPAT_BRK=y
76CONFIG_SLAB=y 99CONFIG_SLAB=y
77# CONFIG_SLUB is not set 100# CONFIG_SLUB is not set
78# CONFIG_SLOB is not set 101# CONFIG_SLOB is not set
102# CONFIG_PROFILING is not set
103# CONFIG_MARKERS is not set
104CONFIG_HAVE_OPROFILE=y
105# CONFIG_KPROBES is not set
106CONFIG_HAVE_KPROBES=y
107CONFIG_HAVE_KRETPROBES=y
108
109#
110# GCOV-based kernel profiling
111#
112# CONFIG_SLOW_WORK is not set
113CONFIG_HAVE_GENERIC_DMA_COHERENT=y
114CONFIG_SLABINFO=y
79CONFIG_RT_MUTEXES=y 115CONFIG_RT_MUTEXES=y
80# CONFIG_TINY_SHMEM is not set
81CONFIG_BASE_SMALL=0 116CONFIG_BASE_SMALL=0
82CONFIG_MODULES=y 117CONFIG_MODULES=y
118# CONFIG_MODULE_FORCE_LOAD is not set
83CONFIG_MODULE_UNLOAD=y 119CONFIG_MODULE_UNLOAD=y
84# CONFIG_MODULE_FORCE_UNLOAD is not set 120# CONFIG_MODULE_FORCE_UNLOAD is not set
85# CONFIG_MODVERSIONS is not set 121# CONFIG_MODVERSIONS is not set
86# CONFIG_MODULE_SRCVERSION_ALL is not set 122# CONFIG_MODULE_SRCVERSION_ALL is not set
87CONFIG_KMOD=y
88CONFIG_BLOCK=y 123CONFIG_BLOCK=y
89# CONFIG_LBD is not set 124CONFIG_LBDAF=y
90# CONFIG_BLK_DEV_IO_TRACE is not set
91# CONFIG_LSF is not set
92# CONFIG_BLK_DEV_BSG is not set 125# CONFIG_BLK_DEV_BSG is not set
126# CONFIG_BLK_DEV_INTEGRITY is not set
93 127
94# 128#
95# IO Schedulers 129# IO Schedulers
@@ -103,6 +137,7 @@ CONFIG_IOSCHED_CFQ=y
103CONFIG_DEFAULT_CFQ=y 137CONFIG_DEFAULT_CFQ=y
104# CONFIG_DEFAULT_NOOP is not set 138# CONFIG_DEFAULT_NOOP is not set
105CONFIG_DEFAULT_IOSCHED="cfq" 139CONFIG_DEFAULT_IOSCHED="cfq"
140# CONFIG_FREEZER is not set
106 141
107# 142#
108# System Type 143# System Type
@@ -112,15 +147,15 @@ CONFIG_DEFAULT_IOSCHED="cfq"
112# CONFIG_ARCH_REALVIEW is not set 147# CONFIG_ARCH_REALVIEW is not set
113# CONFIG_ARCH_VERSATILE is not set 148# CONFIG_ARCH_VERSATILE is not set
114# CONFIG_ARCH_AT91 is not set 149# CONFIG_ARCH_AT91 is not set
115# CONFIG_ARCH_CLPS7500 is not set
116# CONFIG_ARCH_CLPS711X is not set 150# CONFIG_ARCH_CLPS711X is not set
117# CONFIG_ARCH_CO285 is not set 151# CONFIG_ARCH_GEMINI is not set
118# CONFIG_ARCH_EBSA110 is not set 152# CONFIG_ARCH_EBSA110 is not set
119# CONFIG_ARCH_EP93XX is not set 153# CONFIG_ARCH_EP93XX is not set
120# CONFIG_ARCH_FOOTBRIDGE is not set 154# CONFIG_ARCH_FOOTBRIDGE is not set
155# CONFIG_ARCH_MXC is not set
156# CONFIG_ARCH_STMP3XXX is not set
121# CONFIG_ARCH_NETX is not set 157# CONFIG_ARCH_NETX is not set
122# CONFIG_ARCH_H720X is not set 158# CONFIG_ARCH_H720X is not set
123# CONFIG_ARCH_IMX is not set
124# CONFIG_ARCH_IOP13XX is not set 159# CONFIG_ARCH_IOP13XX is not set
125# CONFIG_ARCH_IOP32X is not set 160# CONFIG_ARCH_IOP32X is not set
126CONFIG_ARCH_IOP33X=y 161CONFIG_ARCH_IOP33X=y
@@ -128,19 +163,26 @@ CONFIG_ARCH_IOP33X=y
128# CONFIG_ARCH_IXP2000 is not set 163# CONFIG_ARCH_IXP2000 is not set
129# CONFIG_ARCH_IXP4XX is not set 164# CONFIG_ARCH_IXP4XX is not set
130# CONFIG_ARCH_L7200 is not set 165# CONFIG_ARCH_L7200 is not set
166# CONFIG_ARCH_KIRKWOOD is not set
167# CONFIG_ARCH_LOKI is not set
168# CONFIG_ARCH_MV78XX0 is not set
169# CONFIG_ARCH_ORION5X is not set
170# CONFIG_ARCH_MMP is not set
131# CONFIG_ARCH_KS8695 is not set 171# CONFIG_ARCH_KS8695 is not set
132# CONFIG_ARCH_NS9XXX is not set 172# CONFIG_ARCH_NS9XXX is not set
133# CONFIG_ARCH_MXC is not set 173# CONFIG_ARCH_W90X900 is not set
134# CONFIG_ARCH_PNX4008 is not set 174# CONFIG_ARCH_PNX4008 is not set
135# CONFIG_ARCH_PXA is not set 175# CONFIG_ARCH_PXA is not set
176# CONFIG_ARCH_MSM is not set
136# CONFIG_ARCH_RPC is not set 177# CONFIG_ARCH_RPC is not set
137# CONFIG_ARCH_SA1100 is not set 178# CONFIG_ARCH_SA1100 is not set
138# CONFIG_ARCH_S3C2410 is not set 179# CONFIG_ARCH_S3C2410 is not set
180# CONFIG_ARCH_S3C64XX is not set
139# CONFIG_ARCH_SHARK is not set 181# CONFIG_ARCH_SHARK is not set
140# CONFIG_ARCH_LH7A40X is not set 182# CONFIG_ARCH_LH7A40X is not set
183# CONFIG_ARCH_U300 is not set
141# CONFIG_ARCH_DAVINCI is not set 184# CONFIG_ARCH_DAVINCI is not set
142# CONFIG_ARCH_OMAP is not set 185# CONFIG_ARCH_OMAP is not set
143CONFIG_IOP3XX_ATU=y
144 186
145# 187#
146# IOP33x Implementation Options 188# IOP33x Implementation Options
@@ -151,14 +193,6 @@ CONFIG_IOP3XX_ATU=y
151# 193#
152CONFIG_ARCH_IQ80331=y 194CONFIG_ARCH_IQ80331=y
153CONFIG_MACH_IQ80332=y 195CONFIG_MACH_IQ80332=y
154
155#
156# Boot options
157#
158
159#
160# Power management
161#
162CONFIG_PLAT_IOP=y 196CONFIG_PLAT_IOP=y
163 197
164# 198#
@@ -168,6 +202,7 @@ CONFIG_CPU_32=y
168CONFIG_CPU_XSCALE=y 202CONFIG_CPU_XSCALE=y
169CONFIG_CPU_32v5=y 203CONFIG_CPU_32v5=y
170CONFIG_CPU_ABRT_EV5T=y 204CONFIG_CPU_ABRT_EV5T=y
205CONFIG_CPU_PABRT_NOIFAR=y
171CONFIG_CPU_CACHE_VIVT=y 206CONFIG_CPU_CACHE_VIVT=y
172CONFIG_CPU_TLB_V4WBI=y 207CONFIG_CPU_TLB_V4WBI=y
173CONFIG_CPU_CP15=y 208CONFIG_CPU_CP15=y
@@ -178,7 +213,6 @@ CONFIG_CPU_CP15_MMU=y
178# 213#
179# CONFIG_ARM_THUMB is not set 214# CONFIG_ARM_THUMB is not set
180# CONFIG_CPU_DCACHE_DISABLE is not set 215# CONFIG_CPU_DCACHE_DISABLE is not set
181# CONFIG_OUTER_CACHE is not set
182# CONFIG_IWMMXT is not set 216# CONFIG_IWMMXT is not set
183CONFIG_XSCALE_PMU=y 217CONFIG_XSCALE_PMU=y
184 218
@@ -190,41 +224,55 @@ CONFIG_PCI_SYSCALL=y
190# CONFIG_ARCH_SUPPORTS_MSI is not set 224# CONFIG_ARCH_SUPPORTS_MSI is not set
191CONFIG_PCI_LEGACY=y 225CONFIG_PCI_LEGACY=y
192# CONFIG_PCI_DEBUG is not set 226# CONFIG_PCI_DEBUG is not set
227# CONFIG_PCI_STUB is not set
228# CONFIG_PCI_IOV is not set
193# CONFIG_PCCARD is not set 229# CONFIG_PCCARD is not set
194 230
195# 231#
196# Kernel Features 232# Kernel Features
197# 233#
198# CONFIG_TICK_ONESHOT is not set 234CONFIG_VMSPLIT_3G=y
235# CONFIG_VMSPLIT_2G is not set
236# CONFIG_VMSPLIT_1G is not set
237CONFIG_PAGE_OFFSET=0xC0000000
199# CONFIG_PREEMPT is not set 238# CONFIG_PREEMPT is not set
200CONFIG_HZ=100 239CONFIG_HZ=100
201# CONFIG_AEABI is not set 240# CONFIG_AEABI is not set
202# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set 241# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
242# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
243# CONFIG_HIGHMEM is not set
203CONFIG_SELECT_MEMORY_MODEL=y 244CONFIG_SELECT_MEMORY_MODEL=y
204CONFIG_FLATMEM_MANUAL=y 245CONFIG_FLATMEM_MANUAL=y
205# CONFIG_DISCONTIGMEM_MANUAL is not set 246# CONFIG_DISCONTIGMEM_MANUAL is not set
206# CONFIG_SPARSEMEM_MANUAL is not set 247# CONFIG_SPARSEMEM_MANUAL is not set
207CONFIG_FLATMEM=y 248CONFIG_FLATMEM=y
208CONFIG_FLAT_NODE_MEM_MAP=y 249CONFIG_FLAT_NODE_MEM_MAP=y
209# CONFIG_SPARSEMEM_STATIC is not set 250CONFIG_PAGEFLAGS_EXTENDED=y
210# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
211CONFIG_SPLIT_PTLOCK_CPUS=4096 251CONFIG_SPLIT_PTLOCK_CPUS=4096
212# CONFIG_RESOURCES_64BIT is not set 252# CONFIG_PHYS_ADDR_T_64BIT is not set
213CONFIG_ZONE_DMA_FLAG=1 253CONFIG_ZONE_DMA_FLAG=0
214CONFIG_BOUNCE=y
215CONFIG_VIRT_TO_BUS=y 254CONFIG_VIRT_TO_BUS=y
255CONFIG_HAVE_MLOCK=y
256CONFIG_HAVE_MLOCKED_PAGE_BIT=y
257CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
216CONFIG_ALIGNMENT_TRAP=y 258CONFIG_ALIGNMENT_TRAP=y
259# CONFIG_UACCESS_WITH_MEMCPY is not set
217 260
218# 261#
219# Boot options 262# Boot options
220# 263#
221CONFIG_ZBOOT_ROM_TEXT=0x0 264CONFIG_ZBOOT_ROM_TEXT=0x0
222CONFIG_ZBOOT_ROM_BSS=0x0 265CONFIG_ZBOOT_ROM_BSS=0x0
223CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs ip=bootp cachepolicy=writealloc" 266CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs ip=bootp cachepolicy=writealloc iop3xx_init_atu=y"
224# CONFIG_XIP_KERNEL is not set 267# CONFIG_XIP_KERNEL is not set
225# CONFIG_KEXEC is not set 268# CONFIG_KEXEC is not set
226 269
227# 270#
271# CPU Power Management
272#
273# CONFIG_CPU_IDLE is not set
274
275#
228# Floating point emulation 276# Floating point emulation
229# 277#
230 278
@@ -239,6 +287,8 @@ CONFIG_FPE_NWFPE=y
239# Userspace binary formats 287# Userspace binary formats
240# 288#
241CONFIG_BINFMT_ELF=y 289CONFIG_BINFMT_ELF=y
290# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
291CONFIG_HAVE_AOUT=y
242CONFIG_BINFMT_AOUT=y 292CONFIG_BINFMT_AOUT=y
243# CONFIG_BINFMT_MISC is not set 293# CONFIG_BINFMT_MISC is not set
244# CONFIG_ARTHUR is not set 294# CONFIG_ARTHUR is not set
@@ -247,11 +297,7 @@ CONFIG_BINFMT_AOUT=y
247# Power management options 297# Power management options
248# 298#
249# CONFIG_PM is not set 299# CONFIG_PM is not set
250CONFIG_SUSPEND_UP_POSSIBLE=y 300CONFIG_ARCH_SUSPEND_POSSIBLE=y
251
252#
253# Networking
254#
255CONFIG_NET=y 301CONFIG_NET=y
256 302
257# 303#
@@ -264,6 +310,7 @@ CONFIG_XFRM=y
264# CONFIG_XFRM_USER is not set 310# CONFIG_XFRM_USER is not set
265# CONFIG_XFRM_SUB_POLICY is not set 311# CONFIG_XFRM_SUB_POLICY is not set
266# CONFIG_XFRM_MIGRATE is not set 312# CONFIG_XFRM_MIGRATE is not set
313# CONFIG_XFRM_STATISTICS is not set
267# CONFIG_NET_KEY is not set 314# CONFIG_NET_KEY is not set
268CONFIG_INET=y 315CONFIG_INET=y
269CONFIG_IP_MULTICAST=y 316CONFIG_IP_MULTICAST=y
@@ -310,6 +357,7 @@ CONFIG_IPV6=y
310# CONFIG_IPV6_SIT is not set 357# CONFIG_IPV6_SIT is not set
311# CONFIG_IPV6_TUNNEL is not set 358# CONFIG_IPV6_TUNNEL is not set
312# CONFIG_IPV6_MULTIPLE_TABLES is not set 359# CONFIG_IPV6_MULTIPLE_TABLES is not set
360# CONFIG_IPV6_MROUTE is not set
313# CONFIG_NETWORK_SECMARK is not set 361# CONFIG_NETWORK_SECMARK is not set
314# CONFIG_NETFILTER is not set 362# CONFIG_NETFILTER is not set
315# CONFIG_IP_DCCP is not set 363# CONFIG_IP_DCCP is not set
@@ -317,6 +365,7 @@ CONFIG_IPV6=y
317# CONFIG_TIPC is not set 365# CONFIG_TIPC is not set
318# CONFIG_ATM is not set 366# CONFIG_ATM is not set
319# CONFIG_BRIDGE is not set 367# CONFIG_BRIDGE is not set
368# CONFIG_NET_DSA is not set
320# CONFIG_VLAN_8021Q is not set 369# CONFIG_VLAN_8021Q is not set
321# CONFIG_DECNET is not set 370# CONFIG_DECNET is not set
322# CONFIG_LLC2 is not set 371# CONFIG_LLC2 is not set
@@ -326,24 +375,31 @@ CONFIG_IPV6=y
326# CONFIG_LAPB is not set 375# CONFIG_LAPB is not set
327# CONFIG_ECONET is not set 376# CONFIG_ECONET is not set
328# CONFIG_WAN_ROUTER is not set 377# CONFIG_WAN_ROUTER is not set
378# CONFIG_PHONET is not set
379# CONFIG_IEEE802154 is not set
329# CONFIG_NET_SCHED is not set 380# CONFIG_NET_SCHED is not set
381# CONFIG_DCB is not set
330 382
331# 383#
332# Network testing 384# Network testing
333# 385#
334# CONFIG_NET_PKTGEN is not set 386# CONFIG_NET_PKTGEN is not set
335# CONFIG_HAMRADIO is not set 387# CONFIG_HAMRADIO is not set
388# CONFIG_CAN is not set
336# CONFIG_IRDA is not set 389# CONFIG_IRDA is not set
337# CONFIG_BT is not set 390# CONFIG_BT is not set
338# CONFIG_AF_RXRPC is not set 391# CONFIG_AF_RXRPC is not set
392CONFIG_WIRELESS=y
393# CONFIG_CFG80211 is not set
394# CONFIG_WIRELESS_OLD_REGULATORY is not set
395# CONFIG_WIRELESS_EXT is not set
396# CONFIG_LIB80211 is not set
339 397
340# 398#
341# Wireless 399# CFG80211 needs to be enabled for MAC80211
342# 400#
343# CONFIG_CFG80211 is not set 401CONFIG_MAC80211_DEFAULT_PS_VALUE=0
344# CONFIG_WIRELESS_EXT is not set 402# CONFIG_WIMAX is not set
345# CONFIG_MAC80211 is not set
346# CONFIG_IEEE80211 is not set
347# CONFIG_RFKILL is not set 403# CONFIG_RFKILL is not set
348# CONFIG_NET_9P is not set 404# CONFIG_NET_9P is not set
349 405
@@ -357,7 +413,9 @@ CONFIG_IPV6=y
357CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 413CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
358CONFIG_STANDALONE=y 414CONFIG_STANDALONE=y
359CONFIG_PREVENT_FIRMWARE_BUILD=y 415CONFIG_PREVENT_FIRMWARE_BUILD=y
360# CONFIG_FW_LOADER is not set 416CONFIG_FW_LOADER=y
417CONFIG_FIRMWARE_IN_KERNEL=y
418CONFIG_EXTRA_FIRMWARE=""
361# CONFIG_DEBUG_DRIVER is not set 419# CONFIG_DEBUG_DRIVER is not set
362# CONFIG_DEBUG_DEVRES is not set 420# CONFIG_DEBUG_DEVRES is not set
363# CONFIG_SYS_HYPERVISOR is not set 421# CONFIG_SYS_HYPERVISOR is not set
@@ -366,12 +424,14 @@ CONFIG_MTD=y
366# CONFIG_MTD_DEBUG is not set 424# CONFIG_MTD_DEBUG is not set
367# CONFIG_MTD_CONCAT is not set 425# CONFIG_MTD_CONCAT is not set
368CONFIG_MTD_PARTITIONS=y 426CONFIG_MTD_PARTITIONS=y
427# CONFIG_MTD_TESTS is not set
369CONFIG_MTD_REDBOOT_PARTS=y 428CONFIG_MTD_REDBOOT_PARTS=y
370CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 429CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
371CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y 430CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
372CONFIG_MTD_REDBOOT_PARTS_READONLY=y 431CONFIG_MTD_REDBOOT_PARTS_READONLY=y
373# CONFIG_MTD_CMDLINE_PARTS is not set 432# CONFIG_MTD_CMDLINE_PARTS is not set
374# CONFIG_MTD_AFS_PARTS is not set 433# CONFIG_MTD_AFS_PARTS is not set
434# CONFIG_MTD_AR7_PARTS is not set
375 435
376# 436#
377# User Modules And Translation Layers 437# User Modules And Translation Layers
@@ -421,9 +481,7 @@ CONFIG_MTD_CFI_UTIL=y
421# 481#
422# CONFIG_MTD_COMPLEX_MAPPINGS is not set 482# CONFIG_MTD_COMPLEX_MAPPINGS is not set
423CONFIG_MTD_PHYSMAP=y 483CONFIG_MTD_PHYSMAP=y
424CONFIG_MTD_PHYSMAP_START=0x0 484# CONFIG_MTD_PHYSMAP_COMPAT is not set
425CONFIG_MTD_PHYSMAP_LEN=0x0
426CONFIG_MTD_PHYSMAP_BANKWIDTH=1
427# CONFIG_MTD_ARM_INTEGRATOR is not set 485# CONFIG_MTD_ARM_INTEGRATOR is not set
428# CONFIG_MTD_INTEL_VR_NOR is not set 486# CONFIG_MTD_INTEL_VR_NOR is not set
429# CONFIG_MTD_PLATRAM is not set 487# CONFIG_MTD_PLATRAM is not set
@@ -447,6 +505,11 @@ CONFIG_MTD_PHYSMAP_BANKWIDTH=1
447# CONFIG_MTD_ONENAND is not set 505# CONFIG_MTD_ONENAND is not set
448 506
449# 507#
508# LPDDR flash memory drivers
509#
510# CONFIG_MTD_LPDDR is not set
511
512#
450# UBI - Unsorted block images 513# UBI - Unsorted block images
451# 514#
452# CONFIG_MTD_UBI is not set 515# CONFIG_MTD_UBI is not set
@@ -463,14 +526,29 @@ CONFIG_BLK_DEV_NBD=y
463CONFIG_BLK_DEV_RAM=y 526CONFIG_BLK_DEV_RAM=y
464CONFIG_BLK_DEV_RAM_COUNT=16 527CONFIG_BLK_DEV_RAM_COUNT=16
465CONFIG_BLK_DEV_RAM_SIZE=8192 528CONFIG_BLK_DEV_RAM_SIZE=8192
466CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 529# CONFIG_BLK_DEV_XIP is not set
467# CONFIG_CDROM_PKTCDVD is not set 530# CONFIG_CDROM_PKTCDVD is not set
468# CONFIG_ATA_OVER_ETH is not set 531# CONFIG_ATA_OVER_ETH is not set
532# CONFIG_MG_DISK is not set
469CONFIG_MISC_DEVICES=y 533CONFIG_MISC_DEVICES=y
470# CONFIG_PHANTOM is not set 534# CONFIG_PHANTOM is not set
471# CONFIG_EEPROM_93CX6 is not set
472# CONFIG_SGI_IOC4 is not set 535# CONFIG_SGI_IOC4 is not set
473# CONFIG_TIFM_CORE is not set 536# CONFIG_TIFM_CORE is not set
537# CONFIG_ICS932S401 is not set
538# CONFIG_ENCLOSURE_SERVICES is not set
539# CONFIG_HP_ILO is not set
540# CONFIG_ISL29003 is not set
541# CONFIG_C2PORT is not set
542
543#
544# EEPROM support
545#
546# CONFIG_EEPROM_AT24 is not set
547# CONFIG_EEPROM_LEGACY is not set
548# CONFIG_EEPROM_MAX6875 is not set
549# CONFIG_EEPROM_93CX6 is not set
550# CONFIG_CB710_CORE is not set
551CONFIG_HAVE_IDE=y
474# CONFIG_IDE is not set 552# CONFIG_IDE is not set
475 553
476# 554#
@@ -492,10 +570,6 @@ CONFIG_BLK_DEV_SD=y
492# CONFIG_BLK_DEV_SR is not set 570# CONFIG_BLK_DEV_SR is not set
493CONFIG_CHR_DEV_SG=y 571CONFIG_CHR_DEV_SG=y
494# CONFIG_CHR_DEV_SCH is not set 572# CONFIG_CHR_DEV_SCH is not set
495
496#
497# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
498#
499# CONFIG_SCSI_MULTI_LUN is not set 573# CONFIG_SCSI_MULTI_LUN is not set
500# CONFIG_SCSI_CONSTANTS is not set 574# CONFIG_SCSI_CONSTANTS is not set
501# CONFIG_SCSI_LOGGING is not set 575# CONFIG_SCSI_LOGGING is not set
@@ -512,6 +586,8 @@ CONFIG_SCSI_WAIT_SCAN=m
512# CONFIG_SCSI_SRP_ATTRS is not set 586# CONFIG_SCSI_SRP_ATTRS is not set
513CONFIG_SCSI_LOWLEVEL=y 587CONFIG_SCSI_LOWLEVEL=y
514# CONFIG_ISCSI_TCP is not set 588# CONFIG_ISCSI_TCP is not set
589# CONFIG_SCSI_CXGB3_ISCSI is not set
590# CONFIG_SCSI_BNX2_ISCSI is not set
515# CONFIG_BLK_DEV_3W_XXXX_RAID is not set 591# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
516# CONFIG_SCSI_3W_9XXX is not set 592# CONFIG_SCSI_3W_9XXX is not set
517# CONFIG_SCSI_ACARD is not set 593# CONFIG_SCSI_ACARD is not set
@@ -520,13 +596,18 @@ CONFIG_SCSI_LOWLEVEL=y
520# CONFIG_SCSI_AIC7XXX_OLD is not set 596# CONFIG_SCSI_AIC7XXX_OLD is not set
521# CONFIG_SCSI_AIC79XX is not set 597# CONFIG_SCSI_AIC79XX is not set
522# CONFIG_SCSI_AIC94XX is not set 598# CONFIG_SCSI_AIC94XX is not set
599# CONFIG_SCSI_MVSAS is not set
523# CONFIG_SCSI_DPT_I2O is not set 600# CONFIG_SCSI_DPT_I2O is not set
524# CONFIG_SCSI_ADVANSYS is not set 601# CONFIG_SCSI_ADVANSYS is not set
525# CONFIG_SCSI_ARCMSR is not set 602# CONFIG_SCSI_ARCMSR is not set
526# CONFIG_MEGARAID_NEWGEN is not set 603# CONFIG_MEGARAID_NEWGEN is not set
527# CONFIG_MEGARAID_LEGACY is not set 604# CONFIG_MEGARAID_LEGACY is not set
528# CONFIG_MEGARAID_SAS is not set 605# CONFIG_MEGARAID_SAS is not set
606# CONFIG_SCSI_MPT2SAS is not set
529# CONFIG_SCSI_HPTIOP is not set 607# CONFIG_SCSI_HPTIOP is not set
608# CONFIG_LIBFC is not set
609# CONFIG_LIBFCOE is not set
610# CONFIG_FCOE is not set
530# CONFIG_SCSI_DMX3191D is not set 611# CONFIG_SCSI_DMX3191D is not set
531# CONFIG_SCSI_FUTURE_DOMAIN is not set 612# CONFIG_SCSI_FUTURE_DOMAIN is not set
532# CONFIG_SCSI_IPS is not set 613# CONFIG_SCSI_IPS is not set
@@ -543,15 +624,18 @@ CONFIG_SCSI_LOWLEVEL=y
543# CONFIG_SCSI_NSP32 is not set 624# CONFIG_SCSI_NSP32 is not set
544# CONFIG_SCSI_DEBUG is not set 625# CONFIG_SCSI_DEBUG is not set
545# CONFIG_SCSI_SRP is not set 626# CONFIG_SCSI_SRP is not set
627# CONFIG_SCSI_DH is not set
628# CONFIG_SCSI_OSD_INITIATOR is not set
546# CONFIG_ATA is not set 629# CONFIG_ATA is not set
547CONFIG_MD=y 630CONFIG_MD=y
548CONFIG_BLK_DEV_MD=y 631CONFIG_BLK_DEV_MD=y
632CONFIG_MD_AUTODETECT=y
549CONFIG_MD_LINEAR=y 633CONFIG_MD_LINEAR=y
550CONFIG_MD_RAID0=y 634CONFIG_MD_RAID0=y
551CONFIG_MD_RAID1=y 635CONFIG_MD_RAID1=y
552# CONFIG_MD_RAID10 is not set 636# CONFIG_MD_RAID10 is not set
553CONFIG_MD_RAID456=y 637CONFIG_MD_RAID456=y
554# CONFIG_MD_RAID5_RESHAPE is not set 638CONFIG_MD_RAID6_PQ=y
555# CONFIG_MD_MULTIPATH is not set 639# CONFIG_MD_MULTIPATH is not set
556# CONFIG_MD_FAULTY is not set 640# CONFIG_MD_FAULTY is not set
557CONFIG_BLK_DEV_DM=y 641CONFIG_BLK_DEV_DM=y
@@ -568,27 +652,34 @@ CONFIG_BLK_DEV_DM=y
568# 652#
569# IEEE 1394 (FireWire) support 653# IEEE 1394 (FireWire) support
570# 654#
655
656#
657# You can enable one or both FireWire driver stacks.
658#
659
660#
661# See the help texts for more information.
662#
571# CONFIG_FIREWIRE is not set 663# CONFIG_FIREWIRE is not set
572# CONFIG_IEEE1394 is not set 664# CONFIG_IEEE1394 is not set
573# CONFIG_I2O is not set 665# CONFIG_I2O is not set
574CONFIG_NETDEVICES=y 666CONFIG_NETDEVICES=y
575# CONFIG_NETDEVICES_MULTIQUEUE is not set
576# CONFIG_DUMMY is not set 667# CONFIG_DUMMY is not set
577# CONFIG_BONDING is not set 668# CONFIG_BONDING is not set
578# CONFIG_MACVLAN is not set 669# CONFIG_MACVLAN is not set
579# CONFIG_EQUALIZER is not set 670# CONFIG_EQUALIZER is not set
580# CONFIG_TUN is not set 671# CONFIG_TUN is not set
581# CONFIG_VETH is not set 672# CONFIG_VETH is not set
582# CONFIG_IP1000 is not set
583# CONFIG_ARCNET is not set 673# CONFIG_ARCNET is not set
584# CONFIG_NET_ETHERNET is not set 674# CONFIG_NET_ETHERNET is not set
585CONFIG_NETDEV_1000=y 675CONFIG_NETDEV_1000=y
586# CONFIG_ACENIC is not set 676# CONFIG_ACENIC is not set
587# CONFIG_DL2K is not set 677# CONFIG_DL2K is not set
588CONFIG_E1000=y 678CONFIG_E1000=y
589CONFIG_E1000_NAPI=y
590# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
591# CONFIG_E1000E is not set 679# CONFIG_E1000E is not set
680# CONFIG_IP1000 is not set
681# CONFIG_IGB is not set
682# CONFIG_IGBVF is not set
592# CONFIG_NS83820 is not set 683# CONFIG_NS83820 is not set
593# CONFIG_HAMACHI is not set 684# CONFIG_HAMACHI is not set
594# CONFIG_YELLOWFIN is not set 685# CONFIG_YELLOWFIN is not set
@@ -596,23 +687,34 @@ CONFIG_E1000_NAPI=y
596# CONFIG_SIS190 is not set 687# CONFIG_SIS190 is not set
597# CONFIG_SKGE is not set 688# CONFIG_SKGE is not set
598# CONFIG_SKY2 is not set 689# CONFIG_SKY2 is not set
599# CONFIG_SK98LIN is not set
600# CONFIG_VIA_VELOCITY is not set 690# CONFIG_VIA_VELOCITY is not set
601# CONFIG_TIGON3 is not set 691# CONFIG_TIGON3 is not set
602# CONFIG_BNX2 is not set 692# CONFIG_BNX2 is not set
693# CONFIG_CNIC is not set
603# CONFIG_QLA3XXX is not set 694# CONFIG_QLA3XXX is not set
604# CONFIG_ATL1 is not set 695# CONFIG_ATL1 is not set
696# CONFIG_ATL1E is not set
697# CONFIG_ATL1C is not set
698# CONFIG_JME is not set
605CONFIG_NETDEV_10000=y 699CONFIG_NETDEV_10000=y
606# CONFIG_CHELSIO_T1 is not set 700# CONFIG_CHELSIO_T1 is not set
701CONFIG_CHELSIO_T3_DEPENDS=y
607# CONFIG_CHELSIO_T3 is not set 702# CONFIG_CHELSIO_T3 is not set
703# CONFIG_ENIC is not set
608# CONFIG_IXGBE is not set 704# CONFIG_IXGBE is not set
609# CONFIG_IXGB is not set 705# CONFIG_IXGB is not set
610# CONFIG_S2IO is not set 706# CONFIG_S2IO is not set
707# CONFIG_VXGE is not set
611# CONFIG_MYRI10GE is not set 708# CONFIG_MYRI10GE is not set
612# CONFIG_NETXEN_NIC is not set 709# CONFIG_NETXEN_NIC is not set
613# CONFIG_NIU is not set 710# CONFIG_NIU is not set
711# CONFIG_MLX4_EN is not set
614# CONFIG_MLX4_CORE is not set 712# CONFIG_MLX4_CORE is not set
615# CONFIG_TEHUTI is not set 713# CONFIG_TEHUTI is not set
714# CONFIG_BNX2X is not set
715# CONFIG_QLGE is not set
716# CONFIG_SFC is not set
717# CONFIG_BE2NET is not set
616# CONFIG_TR is not set 718# CONFIG_TR is not set
617 719
618# 720#
@@ -620,13 +722,16 @@ CONFIG_NETDEV_10000=y
620# 722#
621# CONFIG_WLAN_PRE80211 is not set 723# CONFIG_WLAN_PRE80211 is not set
622# CONFIG_WLAN_80211 is not set 724# CONFIG_WLAN_80211 is not set
725
726#
727# Enable WiMAX (Networking options) to see the WiMAX drivers
728#
623# CONFIG_WAN is not set 729# CONFIG_WAN is not set
624# CONFIG_FDDI is not set 730# CONFIG_FDDI is not set
625# CONFIG_HIPPI is not set 731# CONFIG_HIPPI is not set
626# CONFIG_PPP is not set 732# CONFIG_PPP is not set
627# CONFIG_SLIP is not set 733# CONFIG_SLIP is not set
628# CONFIG_NET_FC is not set 734# CONFIG_NET_FC is not set
629# CONFIG_SHAPER is not set
630# CONFIG_NETCONSOLE is not set 735# CONFIG_NETCONSOLE is not set
631# CONFIG_NETPOLL is not set 736# CONFIG_NETPOLL is not set
632# CONFIG_NET_POLL_CONTROLLER is not set 737# CONFIG_NET_POLL_CONTROLLER is not set
@@ -670,10 +775,13 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
670# Character devices 775# Character devices
671# 776#
672CONFIG_VT=y 777CONFIG_VT=y
778CONFIG_CONSOLE_TRANSLATIONS=y
673CONFIG_VT_CONSOLE=y 779CONFIG_VT_CONSOLE=y
674CONFIG_HW_CONSOLE=y 780CONFIG_HW_CONSOLE=y
675# CONFIG_VT_HW_CONSOLE_BINDING is not set 781# CONFIG_VT_HW_CONSOLE_BINDING is not set
782CONFIG_DEVKMEM=y
676# CONFIG_SERIAL_NONSTANDARD is not set 783# CONFIG_SERIAL_NONSTANDARD is not set
784# CONFIG_NOZOMI is not set
677 785
678# 786#
679# Serial drivers 787# Serial drivers
@@ -692,11 +800,12 @@ CONFIG_SERIAL_CORE=y
692CONFIG_SERIAL_CORE_CONSOLE=y 800CONFIG_SERIAL_CORE_CONSOLE=y
693# CONFIG_SERIAL_JSM is not set 801# CONFIG_SERIAL_JSM is not set
694CONFIG_UNIX98_PTYS=y 802CONFIG_UNIX98_PTYS=y
803# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
695CONFIG_LEGACY_PTYS=y 804CONFIG_LEGACY_PTYS=y
696CONFIG_LEGACY_PTY_COUNT=256 805CONFIG_LEGACY_PTY_COUNT=256
697# CONFIG_IPMI_HANDLER is not set 806# CONFIG_IPMI_HANDLER is not set
698CONFIG_HW_RANDOM=y 807CONFIG_HW_RANDOM=y
699# CONFIG_NVRAM is not set 808# CONFIG_HW_RANDOM_TIMERIOMEM is not set
700# CONFIG_R3964 is not set 809# CONFIG_R3964 is not set
701# CONFIG_APPLICOM is not set 810# CONFIG_APPLICOM is not set
702# CONFIG_RAW_DRIVER is not set 811# CONFIG_RAW_DRIVER is not set
@@ -705,16 +814,14 @@ CONFIG_DEVPORT=y
705CONFIG_I2C=y 814CONFIG_I2C=y
706CONFIG_I2C_BOARDINFO=y 815CONFIG_I2C_BOARDINFO=y
707CONFIG_I2C_CHARDEV=y 816CONFIG_I2C_CHARDEV=y
817CONFIG_I2C_HELPER_AUTO=y
708 818
709# 819#
710# I2C Algorithms 820# I2C Hardware Bus support
711# 821#
712# CONFIG_I2C_ALGOBIT is not set
713# CONFIG_I2C_ALGOPCF is not set
714# CONFIG_I2C_ALGOPCA is not set
715 822
716# 823#
717# I2C Hardware Bus support 824# PC SMBus host controller drivers
718# 825#
719# CONFIG_I2C_ALI1535 is not set 826# CONFIG_I2C_ALI1535 is not set
720# CONFIG_I2C_ALI1563 is not set 827# CONFIG_I2C_ALI1563 is not set
@@ -722,50 +829,82 @@ CONFIG_I2C_CHARDEV=y
722# CONFIG_I2C_AMD756 is not set 829# CONFIG_I2C_AMD756 is not set
723# CONFIG_I2C_AMD8111 is not set 830# CONFIG_I2C_AMD8111 is not set
724# CONFIG_I2C_I801 is not set 831# CONFIG_I2C_I801 is not set
725# CONFIG_I2C_I810 is not set 832# CONFIG_I2C_ISCH is not set
726# CONFIG_I2C_PIIX4 is not set 833# CONFIG_I2C_PIIX4 is not set
727CONFIG_I2C_IOP3XX=y
728# CONFIG_I2C_NFORCE2 is not set 834# CONFIG_I2C_NFORCE2 is not set
729# CONFIG_I2C_OCORES is not set
730# CONFIG_I2C_PARPORT_LIGHT is not set
731# CONFIG_I2C_PROSAVAGE is not set
732# CONFIG_I2C_SAVAGE4 is not set
733# CONFIG_I2C_SIMTEC is not set
734# CONFIG_I2C_SIS5595 is not set 835# CONFIG_I2C_SIS5595 is not set
735# CONFIG_I2C_SIS630 is not set 836# CONFIG_I2C_SIS630 is not set
736# CONFIG_I2C_SIS96X is not set 837# CONFIG_I2C_SIS96X is not set
737# CONFIG_I2C_TAOS_EVM is not set
738# CONFIG_I2C_STUB is not set
739# CONFIG_I2C_VIA is not set 838# CONFIG_I2C_VIA is not set
740# CONFIG_I2C_VIAPRO is not set 839# CONFIG_I2C_VIAPRO is not set
840
841#
842# I2C system bus drivers (mostly embedded / system-on-chip)
843#
844# CONFIG_I2C_GPIO is not set
845CONFIG_I2C_IOP3XX=y
846# CONFIG_I2C_OCORES is not set
847# CONFIG_I2C_SIMTEC is not set
848
849#
850# External I2C/SMBus adapter drivers
851#
852# CONFIG_I2C_PARPORT_LIGHT is not set
853# CONFIG_I2C_TAOS_EVM is not set
854
855#
856# Graphics adapter I2C/DDC channel drivers
857#
741# CONFIG_I2C_VOODOO3 is not set 858# CONFIG_I2C_VOODOO3 is not set
742 859
743# 860#
861# Other I2C/SMBus bus drivers
862#
863# CONFIG_I2C_PCA_PLATFORM is not set
864# CONFIG_I2C_STUB is not set
865
866#
744# Miscellaneous I2C Chip support 867# Miscellaneous I2C Chip support
745# 868#
746# CONFIG_SENSORS_DS1337 is not set
747# CONFIG_SENSORS_DS1374 is not set
748# CONFIG_DS1682 is not set 869# CONFIG_DS1682 is not set
749# CONFIG_EEPROM_LEGACY is not set
750# CONFIG_SENSORS_PCF8574 is not set 870# CONFIG_SENSORS_PCF8574 is not set
871# CONFIG_PCF8575 is not set
751# CONFIG_SENSORS_PCA9539 is not set 872# CONFIG_SENSORS_PCA9539 is not set
752# CONFIG_SENSORS_PCF8591 is not set
753# CONFIG_SENSORS_MAX6875 is not set
754# CONFIG_SENSORS_TSL2550 is not set 873# CONFIG_SENSORS_TSL2550 is not set
755# CONFIG_I2C_DEBUG_CORE is not set 874# CONFIG_I2C_DEBUG_CORE is not set
756# CONFIG_I2C_DEBUG_ALGO is not set 875# CONFIG_I2C_DEBUG_ALGO is not set
757# CONFIG_I2C_DEBUG_BUS is not set 876# CONFIG_I2C_DEBUG_BUS is not set
758# CONFIG_I2C_DEBUG_CHIP is not set 877# CONFIG_I2C_DEBUG_CHIP is not set
878# CONFIG_SPI is not set
879CONFIG_ARCH_REQUIRE_GPIOLIB=y
880CONFIG_GPIOLIB=y
881# CONFIG_DEBUG_GPIO is not set
882# CONFIG_GPIO_SYSFS is not set
759 883
760# 884#
761# SPI support 885# Memory mapped GPIO expanders:
886#
887
888#
889# I2C GPIO expanders:
890#
891# CONFIG_GPIO_MAX732X is not set
892# CONFIG_GPIO_PCA953X is not set
893# CONFIG_GPIO_PCF857X is not set
894
895#
896# PCI GPIO expanders:
897#
898# CONFIG_GPIO_BT8XX is not set
899
900#
901# SPI GPIO expanders:
762# 902#
763# CONFIG_SPI is not set
764# CONFIG_SPI_MASTER is not set
765# CONFIG_W1 is not set 903# CONFIG_W1 is not set
766# CONFIG_POWER_SUPPLY is not set 904# CONFIG_POWER_SUPPLY is not set
767CONFIG_HWMON=y 905CONFIG_HWMON=y
768# CONFIG_HWMON_VID is not set 906# CONFIG_HWMON_VID is not set
907# CONFIG_SENSORS_AD7414 is not set
769# CONFIG_SENSORS_AD7418 is not set 908# CONFIG_SENSORS_AD7418 is not set
770# CONFIG_SENSORS_ADM1021 is not set 909# CONFIG_SENSORS_ADM1021 is not set
771# CONFIG_SENSORS_ADM1025 is not set 910# CONFIG_SENSORS_ADM1025 is not set
@@ -773,13 +912,17 @@ CONFIG_HWMON=y
773# CONFIG_SENSORS_ADM1029 is not set 912# CONFIG_SENSORS_ADM1029 is not set
774# CONFIG_SENSORS_ADM1031 is not set 913# CONFIG_SENSORS_ADM1031 is not set
775# CONFIG_SENSORS_ADM9240 is not set 914# CONFIG_SENSORS_ADM9240 is not set
915# CONFIG_SENSORS_ADT7462 is not set
776# CONFIG_SENSORS_ADT7470 is not set 916# CONFIG_SENSORS_ADT7470 is not set
917# CONFIG_SENSORS_ADT7473 is not set
918# CONFIG_SENSORS_ADT7475 is not set
777# CONFIG_SENSORS_ATXP1 is not set 919# CONFIG_SENSORS_ATXP1 is not set
778# CONFIG_SENSORS_DS1621 is not set 920# CONFIG_SENSORS_DS1621 is not set
779# CONFIG_SENSORS_I5K_AMB is not set 921# CONFIG_SENSORS_I5K_AMB is not set
780# CONFIG_SENSORS_F71805F is not set 922# CONFIG_SENSORS_F71805F is not set
781# CONFIG_SENSORS_F71882FG is not set 923# CONFIG_SENSORS_F71882FG is not set
782# CONFIG_SENSORS_F75375S is not set 924# CONFIG_SENSORS_F75375S is not set
925# CONFIG_SENSORS_G760A is not set
783# CONFIG_SENSORS_GL518SM is not set 926# CONFIG_SENSORS_GL518SM is not set
784# CONFIG_SENSORS_GL520SM is not set 927# CONFIG_SENSORS_GL520SM is not set
785# CONFIG_SENSORS_IT87 is not set 928# CONFIG_SENSORS_IT87 is not set
@@ -794,16 +937,23 @@ CONFIG_HWMON=y
794# CONFIG_SENSORS_LM90 is not set 937# CONFIG_SENSORS_LM90 is not set
795# CONFIG_SENSORS_LM92 is not set 938# CONFIG_SENSORS_LM92 is not set
796# CONFIG_SENSORS_LM93 is not set 939# CONFIG_SENSORS_LM93 is not set
940# CONFIG_SENSORS_LTC4215 is not set
941# CONFIG_SENSORS_LTC4245 is not set
942# CONFIG_SENSORS_LM95241 is not set
797# CONFIG_SENSORS_MAX1619 is not set 943# CONFIG_SENSORS_MAX1619 is not set
798# CONFIG_SENSORS_MAX6650 is not set 944# CONFIG_SENSORS_MAX6650 is not set
799# CONFIG_SENSORS_PC87360 is not set 945# CONFIG_SENSORS_PC87360 is not set
800# CONFIG_SENSORS_PC87427 is not set 946# CONFIG_SENSORS_PC87427 is not set
947# CONFIG_SENSORS_PCF8591 is not set
948# CONFIG_SENSORS_SHT15 is not set
801# CONFIG_SENSORS_SIS5595 is not set 949# CONFIG_SENSORS_SIS5595 is not set
802# CONFIG_SENSORS_DME1737 is not set 950# CONFIG_SENSORS_DME1737 is not set
803# CONFIG_SENSORS_SMSC47M1 is not set 951# CONFIG_SENSORS_SMSC47M1 is not set
804# CONFIG_SENSORS_SMSC47M192 is not set 952# CONFIG_SENSORS_SMSC47M192 is not set
805# CONFIG_SENSORS_SMSC47B397 is not set 953# CONFIG_SENSORS_SMSC47B397 is not set
954# CONFIG_SENSORS_ADS7828 is not set
806# CONFIG_SENSORS_THMC50 is not set 955# CONFIG_SENSORS_THMC50 is not set
956# CONFIG_SENSORS_TMP401 is not set
807# CONFIG_SENSORS_VIA686A is not set 957# CONFIG_SENSORS_VIA686A is not set
808# CONFIG_SENSORS_VT1211 is not set 958# CONFIG_SENSORS_VT1211 is not set
809# CONFIG_SENSORS_VT8231 is not set 959# CONFIG_SENSORS_VT8231 is not set
@@ -812,28 +962,38 @@ CONFIG_HWMON=y
812# CONFIG_SENSORS_W83792D is not set 962# CONFIG_SENSORS_W83792D is not set
813# CONFIG_SENSORS_W83793 is not set 963# CONFIG_SENSORS_W83793 is not set
814# CONFIG_SENSORS_W83L785TS is not set 964# CONFIG_SENSORS_W83L785TS is not set
965# CONFIG_SENSORS_W83L786NG is not set
815# CONFIG_SENSORS_W83627HF is not set 966# CONFIG_SENSORS_W83627HF is not set
816# CONFIG_SENSORS_W83627EHF is not set 967# CONFIG_SENSORS_W83627EHF is not set
817# CONFIG_HWMON_DEBUG_CHIP is not set 968# CONFIG_HWMON_DEBUG_CHIP is not set
969# CONFIG_THERMAL is not set
970# CONFIG_THERMAL_HWMON is not set
818# CONFIG_WATCHDOG is not set 971# CONFIG_WATCHDOG is not set
972CONFIG_SSB_POSSIBLE=y
819 973
820# 974#
821# Sonics Silicon Backplane 975# Sonics Silicon Backplane
822# 976#
823CONFIG_SSB_POSSIBLE=y
824# CONFIG_SSB is not set 977# CONFIG_SSB is not set
825 978
826# 979#
827# Multifunction device drivers 980# Multifunction device drivers
828# 981#
982# CONFIG_MFD_CORE is not set
829# CONFIG_MFD_SM501 is not set 983# CONFIG_MFD_SM501 is not set
830 984# CONFIG_MFD_ASIC3 is not set
831# 985# CONFIG_HTC_EGPIO is not set
832# Multimedia devices 986# CONFIG_HTC_PASIC3 is not set
833# 987# CONFIG_TPS65010 is not set
834# CONFIG_VIDEO_DEV is not set 988# CONFIG_TWL4030_CORE is not set
835# CONFIG_DVB_CORE is not set 989# CONFIG_MFD_TMIO is not set
836CONFIG_DAB=y 990# CONFIG_MFD_TC6393XB is not set
991# CONFIG_PMIC_DA903X is not set
992# CONFIG_MFD_WM8400 is not set
993# CONFIG_MFD_WM8350_I2C is not set
994# CONFIG_MFD_PCF50633 is not set
995# CONFIG_AB3100_CORE is not set
996# CONFIG_MEDIA_SUPPORT is not set
837 997
838# 998#
839# Graphics support 999# Graphics support
@@ -854,15 +1014,16 @@ CONFIG_DAB=y
854# 1014#
855# CONFIG_VGA_CONSOLE is not set 1015# CONFIG_VGA_CONSOLE is not set
856CONFIG_DUMMY_CONSOLE=y 1016CONFIG_DUMMY_CONSOLE=y
857
858#
859# Sound
860#
861# CONFIG_SOUND is not set 1017# CONFIG_SOUND is not set
862CONFIG_HID_SUPPORT=y 1018CONFIG_HID_SUPPORT=y
863CONFIG_HID=y 1019CONFIG_HID=y
864# CONFIG_HID_DEBUG is not set 1020# CONFIG_HID_DEBUG is not set
865# CONFIG_HIDRAW is not set 1021# CONFIG_HIDRAW is not set
1022# CONFIG_HID_PID is not set
1023
1024#
1025# Special HID drivers
1026#
866CONFIG_USB_SUPPORT=y 1027CONFIG_USB_SUPPORT=y
867CONFIG_USB_ARCH_HAS_HCD=y 1028CONFIG_USB_ARCH_HAS_HCD=y
868CONFIG_USB_ARCH_HAS_OHCI=y 1029CONFIG_USB_ARCH_HAS_OHCI=y
@@ -870,14 +1031,21 @@ CONFIG_USB_ARCH_HAS_EHCI=y
870# CONFIG_USB is not set 1031# CONFIG_USB is not set
871 1032
872# 1033#
873# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 1034# Enable Host or Gadget support to see Inventra options
874# 1035#
875 1036
876# 1037#
877# USB Gadget Support 1038# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
878# 1039#
879# CONFIG_USB_GADGET is not set 1040# CONFIG_USB_GADGET is not set
1041
1042#
1043# OTG and related infrastructure
1044#
1045# CONFIG_UWB is not set
880# CONFIG_MMC is not set 1046# CONFIG_MMC is not set
1047# CONFIG_MEMSTICK is not set
1048# CONFIG_ACCESSIBILITY is not set
881# CONFIG_NEW_LEDS is not set 1049# CONFIG_NEW_LEDS is not set
882CONFIG_RTC_LIB=y 1050CONFIG_RTC_LIB=y
883# CONFIG_RTC_CLASS is not set 1051# CONFIG_RTC_CLASS is not set
@@ -893,6 +1061,12 @@ CONFIG_DMA_ENGINE=y
893# DMA Clients 1061# DMA Clients
894# 1062#
895CONFIG_NET_DMA=y 1063CONFIG_NET_DMA=y
1064# CONFIG_ASYNC_TX_DMA is not set
1065# CONFIG_DMATEST is not set
1066# CONFIG_AUXDISPLAY is not set
1067# CONFIG_REGULATOR is not set
1068# CONFIG_UIO is not set
1069# CONFIG_STAGING is not set
896 1070
897# 1071#
898# File systems 1072# File systems
@@ -901,10 +1075,11 @@ CONFIG_EXT2_FS=y
901# CONFIG_EXT2_FS_XATTR is not set 1075# CONFIG_EXT2_FS_XATTR is not set
902# CONFIG_EXT2_FS_XIP is not set 1076# CONFIG_EXT2_FS_XIP is not set
903CONFIG_EXT3_FS=y 1077CONFIG_EXT3_FS=y
1078# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
904CONFIG_EXT3_FS_XATTR=y 1079CONFIG_EXT3_FS_XATTR=y
905# CONFIG_EXT3_FS_POSIX_ACL is not set 1080# CONFIG_EXT3_FS_POSIX_ACL is not set
906# CONFIG_EXT3_FS_SECURITY is not set 1081# CONFIG_EXT3_FS_SECURITY is not set
907# CONFIG_EXT4DEV_FS is not set 1082# CONFIG_EXT4_FS is not set
908CONFIG_JBD=y 1083CONFIG_JBD=y
909CONFIG_FS_MBCACHE=y 1084CONFIG_FS_MBCACHE=y
910# CONFIG_REISERFS_FS is not set 1085# CONFIG_REISERFS_FS is not set
@@ -913,17 +1088,23 @@ CONFIG_FS_MBCACHE=y
913# CONFIG_XFS_FS is not set 1088# CONFIG_XFS_FS is not set
914# CONFIG_GFS2_FS is not set 1089# CONFIG_GFS2_FS is not set
915# CONFIG_OCFS2_FS is not set 1090# CONFIG_OCFS2_FS is not set
916# CONFIG_MINIX_FS is not set 1091# CONFIG_BTRFS_FS is not set
917# CONFIG_ROMFS_FS is not set 1092CONFIG_FILE_LOCKING=y
1093CONFIG_FSNOTIFY=y
1094CONFIG_DNOTIFY=y
918CONFIG_INOTIFY=y 1095CONFIG_INOTIFY=y
919CONFIG_INOTIFY_USER=y 1096CONFIG_INOTIFY_USER=y
920# CONFIG_QUOTA is not set 1097# CONFIG_QUOTA is not set
921CONFIG_DNOTIFY=y
922# CONFIG_AUTOFS_FS is not set 1098# CONFIG_AUTOFS_FS is not set
923# CONFIG_AUTOFS4_FS is not set 1099# CONFIG_AUTOFS4_FS is not set
924# CONFIG_FUSE_FS is not set 1100# CONFIG_FUSE_FS is not set
925 1101
926# 1102#
1103# Caches
1104#
1105# CONFIG_FSCACHE is not set
1106
1107#
927# CD-ROM/DVD Filesystems 1108# CD-ROM/DVD Filesystems
928# 1109#
929# CONFIG_ISO9660_FS is not set 1110# CONFIG_ISO9660_FS is not set
@@ -941,15 +1122,13 @@ CONFIG_DNOTIFY=y
941# 1122#
942CONFIG_PROC_FS=y 1123CONFIG_PROC_FS=y
943CONFIG_PROC_SYSCTL=y 1124CONFIG_PROC_SYSCTL=y
1125CONFIG_PROC_PAGE_MONITOR=y
944CONFIG_SYSFS=y 1126CONFIG_SYSFS=y
945CONFIG_TMPFS=y 1127CONFIG_TMPFS=y
946# CONFIG_TMPFS_POSIX_ACL is not set 1128# CONFIG_TMPFS_POSIX_ACL is not set
947# CONFIG_HUGETLB_PAGE is not set 1129# CONFIG_HUGETLB_PAGE is not set
948# CONFIG_CONFIGFS_FS is not set 1130# CONFIG_CONFIGFS_FS is not set
949 1131CONFIG_MISC_FILESYSTEMS=y
950#
951# Miscellaneous filesystems
952#
953# CONFIG_ADFS_FS is not set 1132# CONFIG_ADFS_FS is not set
954# CONFIG_AFFS_FS is not set 1133# CONFIG_AFFS_FS is not set
955# CONFIG_HFS_FS is not set 1134# CONFIG_HFS_FS is not set
@@ -959,29 +1138,31 @@ CONFIG_TMPFS=y
959# CONFIG_EFS_FS is not set 1138# CONFIG_EFS_FS is not set
960# CONFIG_JFFS2_FS is not set 1139# CONFIG_JFFS2_FS is not set
961CONFIG_CRAMFS=y 1140CONFIG_CRAMFS=y
1141# CONFIG_SQUASHFS is not set
962# CONFIG_VXFS_FS is not set 1142# CONFIG_VXFS_FS is not set
1143# CONFIG_MINIX_FS is not set
1144# CONFIG_OMFS_FS is not set
963# CONFIG_HPFS_FS is not set 1145# CONFIG_HPFS_FS is not set
964# CONFIG_QNX4FS_FS is not set 1146# CONFIG_QNX4FS_FS is not set
1147# CONFIG_ROMFS_FS is not set
965# CONFIG_SYSV_FS is not set 1148# CONFIG_SYSV_FS is not set
966# CONFIG_UFS_FS is not set 1149# CONFIG_UFS_FS is not set
1150# CONFIG_NILFS2_FS is not set
967CONFIG_NETWORK_FILESYSTEMS=y 1151CONFIG_NETWORK_FILESYSTEMS=y
968CONFIG_NFS_FS=y 1152CONFIG_NFS_FS=y
969CONFIG_NFS_V3=y 1153CONFIG_NFS_V3=y
970# CONFIG_NFS_V3_ACL is not set 1154# CONFIG_NFS_V3_ACL is not set
971# CONFIG_NFS_V4 is not set 1155# CONFIG_NFS_V4 is not set
972# CONFIG_NFS_DIRECTIO is not set 1156CONFIG_ROOT_NFS=y
973CONFIG_NFSD=y 1157CONFIG_NFSD=y
974CONFIG_NFSD_V3=y 1158CONFIG_NFSD_V3=y
975# CONFIG_NFSD_V3_ACL is not set 1159# CONFIG_NFSD_V3_ACL is not set
976# CONFIG_NFSD_V4 is not set 1160# CONFIG_NFSD_V4 is not set
977# CONFIG_NFSD_TCP is not set
978CONFIG_ROOT_NFS=y
979CONFIG_LOCKD=y 1161CONFIG_LOCKD=y
980CONFIG_LOCKD_V4=y 1162CONFIG_LOCKD_V4=y
981CONFIG_EXPORTFS=y 1163CONFIG_EXPORTFS=y
982CONFIG_NFS_COMMON=y 1164CONFIG_NFS_COMMON=y
983CONFIG_SUNRPC=y 1165CONFIG_SUNRPC=y
984# CONFIG_SUNRPC_BIND34 is not set
985# CONFIG_RPCSEC_GSS_KRB5 is not set 1166# CONFIG_RPCSEC_GSS_KRB5 is not set
986# CONFIG_RPCSEC_GSS_SPKM3 is not set 1167# CONFIG_RPCSEC_GSS_SPKM3 is not set
987# CONFIG_SMB_FS is not set 1168# CONFIG_SMB_FS is not set
@@ -1013,9 +1194,6 @@ CONFIG_MSDOS_PARTITION=y
1013# CONFIG_SYSV68_PARTITION is not set 1194# CONFIG_SYSV68_PARTITION is not set
1014# CONFIG_NLS is not set 1195# CONFIG_NLS is not set
1015# CONFIG_DLM is not set 1196# CONFIG_DLM is not set
1016CONFIG_INSTRUMENTATION=y
1017# CONFIG_PROFILING is not set
1018# CONFIG_MARKERS is not set
1019 1197
1020# 1198#
1021# Kernel hacking 1199# Kernel hacking
@@ -1023,6 +1201,7 @@ CONFIG_INSTRUMENTATION=y
1023# CONFIG_PRINTK_TIME is not set 1201# CONFIG_PRINTK_TIME is not set
1024CONFIG_ENABLE_WARN_DEPRECATED=y 1202CONFIG_ENABLE_WARN_DEPRECATED=y
1025CONFIG_ENABLE_MUST_CHECK=y 1203CONFIG_ENABLE_MUST_CHECK=y
1204CONFIG_FRAME_WARN=1024
1026CONFIG_MAGIC_SYSRQ=y 1205CONFIG_MAGIC_SYSRQ=y
1027# CONFIG_UNUSED_SYMBOLS is not set 1206# CONFIG_UNUSED_SYMBOLS is not set
1028# CONFIG_DEBUG_FS is not set 1207# CONFIG_DEBUG_FS is not set
@@ -1030,10 +1209,17 @@ CONFIG_MAGIC_SYSRQ=y
1030CONFIG_DEBUG_KERNEL=y 1209CONFIG_DEBUG_KERNEL=y
1031# CONFIG_DEBUG_SHIRQ is not set 1210# CONFIG_DEBUG_SHIRQ is not set
1032CONFIG_DETECT_SOFTLOCKUP=y 1211CONFIG_DETECT_SOFTLOCKUP=y
1212# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
1213CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
1214CONFIG_DETECT_HUNG_TASK=y
1215# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
1216CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
1033CONFIG_SCHED_DEBUG=y 1217CONFIG_SCHED_DEBUG=y
1034# CONFIG_SCHEDSTATS is not set 1218# CONFIG_SCHEDSTATS is not set
1035# CONFIG_TIMER_STATS is not set 1219# CONFIG_TIMER_STATS is not set
1220# CONFIG_DEBUG_OBJECTS is not set
1036# CONFIG_DEBUG_SLAB is not set 1221# CONFIG_DEBUG_SLAB is not set
1222# CONFIG_DEBUG_KMEMLEAK is not set
1037# CONFIG_DEBUG_RT_MUTEXES is not set 1223# CONFIG_DEBUG_RT_MUTEXES is not set
1038# CONFIG_RT_MUTEX_TESTER is not set 1224# CONFIG_RT_MUTEX_TESTER is not set
1039# CONFIG_DEBUG_SPINLOCK is not set 1225# CONFIG_DEBUG_SPINLOCK is not set
@@ -1047,16 +1233,41 @@ CONFIG_SCHED_DEBUG=y
1047CONFIG_DEBUG_BUGVERBOSE=y 1233CONFIG_DEBUG_BUGVERBOSE=y
1048# CONFIG_DEBUG_INFO is not set 1234# CONFIG_DEBUG_INFO is not set
1049# CONFIG_DEBUG_VM is not set 1235# CONFIG_DEBUG_VM is not set
1236# CONFIG_DEBUG_WRITECOUNT is not set
1237CONFIG_DEBUG_MEMORY_INIT=y
1050# CONFIG_DEBUG_LIST is not set 1238# CONFIG_DEBUG_LIST is not set
1051# CONFIG_DEBUG_SG is not set 1239# CONFIG_DEBUG_SG is not set
1240# CONFIG_DEBUG_NOTIFIERS is not set
1052CONFIG_FRAME_POINTER=y 1241CONFIG_FRAME_POINTER=y
1053# CONFIG_FORCED_INLINING is not set
1054# CONFIG_BOOT_PRINTK_DELAY is not set 1242# CONFIG_BOOT_PRINTK_DELAY is not set
1055# CONFIG_RCU_TORTURE_TEST is not set 1243# CONFIG_RCU_TORTURE_TEST is not set
1244# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1245# CONFIG_BACKTRACE_SELF_TEST is not set
1246# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
1056# CONFIG_FAULT_INJECTION is not set 1247# CONFIG_FAULT_INJECTION is not set
1248# CONFIG_LATENCYTOP is not set
1249# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1250# CONFIG_PAGE_POISONING is not set
1251CONFIG_HAVE_FUNCTION_TRACER=y
1252CONFIG_TRACING_SUPPORT=y
1253CONFIG_FTRACE=y
1254# CONFIG_FUNCTION_TRACER is not set
1255# CONFIG_SCHED_TRACER is not set
1256# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1257# CONFIG_BOOT_TRACER is not set
1258CONFIG_BRANCH_PROFILE_NONE=y
1259# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
1260# CONFIG_PROFILE_ALL_BRANCHES is not set
1261# CONFIG_STACK_TRACER is not set
1262# CONFIG_KMEMTRACE is not set
1263# CONFIG_WORKQUEUE_TRACER is not set
1264# CONFIG_BLK_DEV_IO_TRACE is not set
1057# CONFIG_SAMPLES is not set 1265# CONFIG_SAMPLES is not set
1266CONFIG_HAVE_ARCH_KGDB=y
1267# CONFIG_KGDB is not set
1058CONFIG_DEBUG_USER=y 1268CONFIG_DEBUG_USER=y
1059# CONFIG_DEBUG_ERRORS is not set 1269# CONFIG_DEBUG_ERRORS is not set
1270# CONFIG_DEBUG_STACK_USAGE is not set
1060CONFIG_DEBUG_LL=y 1271CONFIG_DEBUG_LL=y
1061# CONFIG_DEBUG_ICEDCC is not set 1272# CONFIG_DEBUG_ICEDCC is not set
1062 1273
@@ -1065,24 +1276,117 @@ CONFIG_DEBUG_LL=y
1065# 1276#
1066# CONFIG_KEYS is not set 1277# CONFIG_KEYS is not set
1067# CONFIG_SECURITY is not set 1278# CONFIG_SECURITY is not set
1279# CONFIG_SECURITYFS is not set
1068# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1280# CONFIG_SECURITY_FILE_CAPABILITIES is not set
1069CONFIG_XOR_BLOCKS=y 1281CONFIG_XOR_BLOCKS=y
1070CONFIG_ASYNC_CORE=y 1282CONFIG_ASYNC_CORE=y
1071CONFIG_ASYNC_MEMCPY=y 1283CONFIG_ASYNC_MEMCPY=y
1072CONFIG_ASYNC_XOR=y 1284CONFIG_ASYNC_XOR=y
1073# CONFIG_CRYPTO is not set 1285CONFIG_CRYPTO=y
1286
1287#
1288# Crypto core or helper
1289#
1290# CONFIG_CRYPTO_FIPS is not set
1291# CONFIG_CRYPTO_MANAGER is not set
1292# CONFIG_CRYPTO_MANAGER2 is not set
1293# CONFIG_CRYPTO_GF128MUL is not set
1294# CONFIG_CRYPTO_NULL is not set
1295# CONFIG_CRYPTO_CRYPTD is not set
1296# CONFIG_CRYPTO_AUTHENC is not set
1297# CONFIG_CRYPTO_TEST is not set
1298
1299#
1300# Authenticated Encryption with Associated Data
1301#
1302# CONFIG_CRYPTO_CCM is not set
1303# CONFIG_CRYPTO_GCM is not set
1304# CONFIG_CRYPTO_SEQIV is not set
1305
1306#
1307# Block modes
1308#
1309# CONFIG_CRYPTO_CBC is not set
1310# CONFIG_CRYPTO_CTR is not set
1311# CONFIG_CRYPTO_CTS is not set
1312# CONFIG_CRYPTO_ECB is not set
1313# CONFIG_CRYPTO_LRW is not set
1314# CONFIG_CRYPTO_PCBC is not set
1315# CONFIG_CRYPTO_XTS is not set
1316
1317#
1318# Hash modes
1319#
1320# CONFIG_CRYPTO_HMAC is not set
1321# CONFIG_CRYPTO_XCBC is not set
1322
1323#
1324# Digest
1325#
1326# CONFIG_CRYPTO_CRC32C is not set
1327# CONFIG_CRYPTO_MD4 is not set
1328# CONFIG_CRYPTO_MD5 is not set
1329# CONFIG_CRYPTO_MICHAEL_MIC is not set
1330# CONFIG_CRYPTO_RMD128 is not set
1331# CONFIG_CRYPTO_RMD160 is not set
1332# CONFIG_CRYPTO_RMD256 is not set
1333# CONFIG_CRYPTO_RMD320 is not set
1334# CONFIG_CRYPTO_SHA1 is not set
1335# CONFIG_CRYPTO_SHA256 is not set
1336# CONFIG_CRYPTO_SHA512 is not set
1337# CONFIG_CRYPTO_TGR192 is not set
1338# CONFIG_CRYPTO_WP512 is not set
1339
1340#
1341# Ciphers
1342#
1343# CONFIG_CRYPTO_AES is not set
1344# CONFIG_CRYPTO_ANUBIS is not set
1345# CONFIG_CRYPTO_ARC4 is not set
1346# CONFIG_CRYPTO_BLOWFISH is not set
1347# CONFIG_CRYPTO_CAMELLIA is not set
1348# CONFIG_CRYPTO_CAST5 is not set
1349# CONFIG_CRYPTO_CAST6 is not set
1350# CONFIG_CRYPTO_DES is not set
1351# CONFIG_CRYPTO_FCRYPT is not set
1352# CONFIG_CRYPTO_KHAZAD is not set
1353# CONFIG_CRYPTO_SALSA20 is not set
1354# CONFIG_CRYPTO_SEED is not set
1355# CONFIG_CRYPTO_SERPENT is not set
1356# CONFIG_CRYPTO_TEA is not set
1357# CONFIG_CRYPTO_TWOFISH is not set
1358
1359#
1360# Compression
1361#
1362# CONFIG_CRYPTO_DEFLATE is not set
1363# CONFIG_CRYPTO_ZLIB is not set
1364# CONFIG_CRYPTO_LZO is not set
1365
1366#
1367# Random Number Generation
1368#
1369# CONFIG_CRYPTO_ANSI_CPRNG is not set
1370CONFIG_CRYPTO_HW=y
1371# CONFIG_CRYPTO_DEV_HIFN_795X is not set
1372# CONFIG_BINARY_PRINTF is not set
1074 1373
1075# 1374#
1076# Library routines 1375# Library routines
1077# 1376#
1377CONFIG_GENERIC_FIND_LAST_BIT=y
1078# CONFIG_CRC_CCITT is not set 1378# CONFIG_CRC_CCITT is not set
1079# CONFIG_CRC16 is not set 1379# CONFIG_CRC16 is not set
1380# CONFIG_CRC_T10DIF is not set
1080# CONFIG_CRC_ITU_T is not set 1381# CONFIG_CRC_ITU_T is not set
1081# CONFIG_CRC32 is not set 1382# CONFIG_CRC32 is not set
1082# CONFIG_CRC7 is not set 1383# CONFIG_CRC7 is not set
1083# CONFIG_LIBCRC32C is not set 1384# CONFIG_LIBCRC32C is not set
1084CONFIG_ZLIB_INFLATE=y 1385CONFIG_ZLIB_INFLATE=y
1085CONFIG_PLIST=y 1386CONFIG_DECOMPRESS_GZIP=y
1387CONFIG_DECOMPRESS_BZIP2=y
1388CONFIG_DECOMPRESS_LZMA=y
1086CONFIG_HAS_IOMEM=y 1389CONFIG_HAS_IOMEM=y
1087CONFIG_HAS_IOPORT=y 1390CONFIG_HAS_IOPORT=y
1088CONFIG_HAS_DMA=y 1391CONFIG_HAS_DMA=y
1392CONFIG_NLATTR=y
diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h
index a0e39d5d00c9..234a3fc1c78e 100644
--- a/arch/arm/include/asm/glue.h
+++ b/arch/arm/include/asm/glue.h
@@ -120,25 +120,39 @@
120#endif 120#endif
121 121
122/* 122/*
123 * Prefetch abort handler. If the CPU has an IFAR use that, otherwise 123 * Prefetch Abort Model
124 * use the address of the aborted instruction 124 * ================
125 *
126 * We have the following to choose from:
127 * legacy - no IFSR, no IFAR
128 * v6 - ARMv6: IFSR, no IFAR
129 * v7 - ARMv7: IFSR and IFAR
125 */ 130 */
131
126#undef CPU_PABORT_HANDLER 132#undef CPU_PABORT_HANDLER
127#undef MULTI_PABORT 133#undef MULTI_PABORT
128 134
129#ifdef CONFIG_CPU_PABRT_IFAR 135#ifdef CONFIG_CPU_PABRT_LEGACY
136# ifdef CPU_PABORT_HANDLER
137# define MULTI_PABORT 1
138# else
139# define CPU_PABORT_HANDLER legacy_pabort
140# endif
141#endif
142
143#ifdef CONFIG_CPU_PABRT_V6
130# ifdef CPU_PABORT_HANDLER 144# ifdef CPU_PABORT_HANDLER
131# define MULTI_PABORT 1 145# define MULTI_PABORT 1
132# else 146# else
133# define CPU_PABORT_HANDLER(reg, insn) mrc p15, 0, reg, cr6, cr0, 2 147# define CPU_PABORT_HANDLER v6_pabort
134# endif 148# endif
135#endif 149#endif
136 150
137#ifdef CONFIG_CPU_PABRT_NOIFAR 151#ifdef CONFIG_CPU_PABRT_V7
138# ifdef CPU_PABORT_HANDLER 152# ifdef CPU_PABORT_HANDLER
139# define MULTI_PABORT 1 153# define MULTI_PABORT 1
140# else 154# else
141# define CPU_PABORT_HANDLER(reg, insn) mov reg, insn 155# define CPU_PABORT_HANDLER v7_pabort
142# endif 156# endif
143#endif 157#endif
144 158
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
index 4b8e7f559929..8d60ad267e3a 100644
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ b/arch/arm/include/asm/hardware/iop3xx.h
@@ -215,6 +215,7 @@ extern int iop3xx_get_init_atu(void);
215 * IOP3XX I/O and Mem space regions for PCI autoconfiguration 215 * IOP3XX I/O and Mem space regions for PCI autoconfiguration
216 */ 216 */
217#define IOP3XX_PCI_LOWER_MEM_PA 0x80000000 217#define IOP3XX_PCI_LOWER_MEM_PA 0x80000000
218#define IOP3XX_PCI_MEM_WINDOW_SIZE 0x08000000
218 219
219#define IOP3XX_PCI_IO_WINDOW_SIZE 0x00010000 220#define IOP3XX_PCI_IO_WINDOW_SIZE 0x00010000
220#define IOP3XX_PCI_LOWER_IO_PA 0x90000000 221#define IOP3XX_PCI_LOWER_IO_PA 0x90000000
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
new file mode 100644
index 000000000000..59303e200845
--- /dev/null
+++ b/arch/arm/include/asm/smp_plat.h
@@ -0,0 +1,16 @@
1/*
2 * ARM specific SMP header, this contains our implementation
3 * details.
4 */
5#ifndef __ASMARM_SMP_PLAT_H
6#define __ASMARM_SMP_PLAT_H
7
8#include <asm/cputype.h>
9
10/* all SMP configurations have the extended CPUID registers */
11static inline int tlb_ops_need_broadcast(void)
12{
13 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
14}
15
16#endif
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 89f7eade20af..7020217fc49f 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -456,6 +456,7 @@
456 * Unimplemented (or alternatively implemented) syscalls 456 * Unimplemented (or alternatively implemented) syscalls
457 */ 457 */
458#define __IGNORE_fadvise64_64 1 458#define __IGNORE_fadvise64_64 1
459#define __IGNORE_migrate_pages 1
459 460
460#endif /* __KERNEL__ */ 461#endif /* __KERNEL__ */
461#endif /* __ASM_ARM_UNISTD_H */ 462#endif /* __ASM_ARM_UNISTD_H */
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 0a2ba51cf35d..322410be573c 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -311,22 +311,16 @@ __pabt_svc:
311 tst r3, #PSR_I_BIT 311 tst r3, #PSR_I_BIT
312 biceq r9, r9, #PSR_I_BIT 312 biceq r9, r9, #PSR_I_BIT
313 313
314 @
315 @ set args, then call main handler
316 @
317 @ r0 - address of faulting instruction
318 @ r1 - pointer to registers on stack
319 @
320#ifdef MULTI_PABORT
321 mov r0, r2 @ pass address of aborted instruction. 314 mov r0, r2 @ pass address of aborted instruction.
315#ifdef MULTI_PABORT
322 ldr r4, .LCprocfns 316 ldr r4, .LCprocfns
323 mov lr, pc 317 mov lr, pc
324 ldr pc, [r4, #PROCESSOR_PABT_FUNC] 318 ldr pc, [r4, #PROCESSOR_PABT_FUNC]
325#else 319#else
326 CPU_PABORT_HANDLER(r0, r2) 320 bl CPU_PABORT_HANDLER
327#endif 321#endif
328 msr cpsr_c, r9 @ Maybe enable interrupts 322 msr cpsr_c, r9 @ Maybe enable interrupts
329 mov r1, sp @ regs 323 mov r2, sp @ regs
330 bl do_PrefetchAbort @ call abort handler 324 bl do_PrefetchAbort @ call abort handler
331 325
332 @ 326 @
@@ -701,16 +695,16 @@ ENDPROC(__und_usr_unknown)
701__pabt_usr: 695__pabt_usr:
702 usr_entry 696 usr_entry
703 697
704#ifdef MULTI_PABORT
705 mov r0, r2 @ pass address of aborted instruction. 698 mov r0, r2 @ pass address of aborted instruction.
699#ifdef MULTI_PABORT
706 ldr r4, .LCprocfns 700 ldr r4, .LCprocfns
707 mov lr, pc 701 mov lr, pc
708 ldr pc, [r4, #PROCESSOR_PABT_FUNC] 702 ldr pc, [r4, #PROCESSOR_PABT_FUNC]
709#else 703#else
710 CPU_PABORT_HANDLER(r0, r2) 704 bl CPU_PABORT_HANDLER
711#endif 705#endif
712 enable_irq @ Enable interrupts 706 enable_irq @ Enable interrupts
713 mov r1, sp @ regs 707 mov r2, sp @ regs
714 bl do_PrefetchAbort @ call abort handler 708 bl do_PrefetchAbort @ call abort handler
715 UNWIND(.fnend ) 709 UNWIND(.fnend )
716 /* fall through */ 710 /* fall through */
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 807cfebb0f44..f0fe95b7085d 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -126,7 +126,7 @@ ENTRY(__gnu_mcount_nc)
126 cmp r0, r2 126 cmp r0, r2
127 bne gnu_trace 127 bne gnu_trace
128 ldmia sp!, {r0-r3, ip, lr} 128 ldmia sp!, {r0-r3, ip, lr}
129 bx ip 129 mov pc, ip
130 130
131gnu_trace: 131gnu_trace:
132 ldr r1, [sp, #20] @ lr of instrumented routine 132 ldr r1, [sp, #20] @ lr of instrumented routine
@@ -135,7 +135,7 @@ gnu_trace:
135 mov lr, pc 135 mov lr, pc
136 mov pc, r2 136 mov pc, r2
137 ldmia sp!, {r0-r3, ip, lr} 137 ldmia sp!, {r0-r3, ip, lr}
138 bx ip 138 mov pc, ip
139 139
140ENTRY(mcount) 140ENTRY(mcount)
141 stmdb sp!, {r0-r3, lr} 141 stmdb sp!, {r0-r3, lr}
@@ -425,13 +425,6 @@ sys_mmap2:
425#endif 425#endif
426ENDPROC(sys_mmap2) 426ENDPROC(sys_mmap2)
427 427
428ENTRY(pabort_ifar)
429 mrc p15, 0, r0, cr6, cr0, 2
430ENTRY(pabort_noifar)
431 mov pc, lr
432ENDPROC(pabort_ifar)
433ENDPROC(pabort_noifar)
434
435#ifdef CONFIG_OABI_COMPAT 428#ifdef CONFIG_OABI_COMPAT
436 429
437/* 430/*
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 93ad576b2d74..885a7214418d 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -13,6 +13,7 @@
13 13
14#define ATAG_CORE 0x54410001 14#define ATAG_CORE 0x54410001
15#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) 15#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
16#define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2)
16 17
17 .align 2 18 .align 2
18 .type __switch_data, %object 19 .type __switch_data, %object
@@ -251,7 +252,8 @@ __vet_atags:
251 bne 1f 252 bne 1f
252 253
253 ldr r5, [r2, #0] @ is first tag ATAG_CORE? 254 ldr r5, [r2, #0] @ is first tag ATAG_CORE?
254 subs r5, r5, #ATAG_CORE_SIZE 255 cmp r5, #ATAG_CORE_SIZE
256 cmpne r5, #ATAG_CORE_SIZE_EMPTY
255 bne 1f 257 bne 1f
256 ldr r5, [r2, #4] 258 ldr r5, [r2, #4]
257 ldr r6, =ATAG_CORE 259 ldr r6, =ATAG_CORE
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index e0d32770bb3d..57162af53dc9 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -36,6 +36,7 @@
36#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
37#include <asm/ptrace.h> 37#include <asm/ptrace.h>
38#include <asm/localtimer.h> 38#include <asm/localtimer.h>
39#include <asm/smp_plat.h>
39 40
40/* 41/*
41 * as from 2.5, kernels no longer have an init_tasks structure 42 * as from 2.5, kernels no longer have an init_tasks structure
@@ -153,7 +154,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
153/* 154/*
154 * __cpu_disable runs on the processor to be shutdown. 155 * __cpu_disable runs on the processor to be shutdown.
155 */ 156 */
156int __cpuexit __cpu_disable(void) 157int __cpu_disable(void)
157{ 158{
158 unsigned int cpu = smp_processor_id(); 159 unsigned int cpu = smp_processor_id();
159 struct task_struct *p; 160 struct task_struct *p;
@@ -200,7 +201,7 @@ int __cpuexit __cpu_disable(void)
200 * called on the thread which is asking for a CPU to be shutdown - 201 * called on the thread which is asking for a CPU to be shutdown -
201 * waits until shutdown has completed, or it is timed out. 202 * waits until shutdown has completed, or it is timed out.
202 */ 203 */
203void __cpuexit __cpu_die(unsigned int cpu) 204void __cpu_die(unsigned int cpu)
204{ 205{
205 if (!platform_cpu_kill(cpu)) 206 if (!platform_cpu_kill(cpu))
206 printk("CPU%u: unable to kill\n", cpu); 207 printk("CPU%u: unable to kill\n", cpu);
@@ -214,7 +215,7 @@ void __cpuexit __cpu_die(unsigned int cpu)
214 * of the other hotplug-cpu capable cores, so presumably coming 215 * of the other hotplug-cpu capable cores, so presumably coming
215 * out of idle fixes this. 216 * out of idle fixes this.
216 */ 217 */
217void __cpuexit cpu_die(void) 218void __ref cpu_die(void)
218{ 219{
219 unsigned int cpu = smp_processor_id(); 220 unsigned int cpu = smp_processor_id();
220 221
@@ -586,12 +587,6 @@ struct tlb_args {
586 unsigned long ta_end; 587 unsigned long ta_end;
587}; 588};
588 589
589/* all SMP configurations have the extended CPUID registers */
590static inline int tlb_ops_need_broadcast(void)
591{
592 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
593}
594
595static inline void ipi_flush_tlb_all(void *ignored) 590static inline void ipi_flush_tlb_all(void *ignored)
596{ 591{
597 local_flush_tlb_all(); 592 local_flush_tlb_all();
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index d8c88c633c6f..a73a34dccf2a 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -166,10 +166,12 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
166 clockevents_register_device(clk); 166 clockevents_register_device(clk);
167} 167}
168 168
169#ifdef CONFIG_HOTPLUG_CPU
169/* 170/*
170 * take a local timer down 171 * take a local timer down
171 */ 172 */
172void __cpuexit twd_timer_stop(void) 173void twd_timer_stop(void)
173{ 174{
174 __raw_writel(0, twd_base + TWD_TIMER_CONTROL); 175 __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
175} 176}
177#endif
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 57eb0f6f6005..467b69ed1021 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -418,12 +418,14 @@ static int bad_syscall(int n, struct pt_regs *regs)
418static inline void 418static inline void
419do_cache_op(unsigned long start, unsigned long end, int flags) 419do_cache_op(unsigned long start, unsigned long end, int flags)
420{ 420{
421 struct mm_struct *mm = current->active_mm;
421 struct vm_area_struct *vma; 422 struct vm_area_struct *vma;
422 423
423 if (end < start || flags) 424 if (end < start || flags)
424 return; 425 return;
425 426
426 vma = find_vma(current->active_mm, start); 427 down_read(&mm->mmap_sem);
428 vma = find_vma(mm, start);
427 if (vma && vma->vm_start < end) { 429 if (vma && vma->vm_start < end) {
428 if (start < vma->vm_start) 430 if (start < vma->vm_start)
429 start = vma->vm_start; 431 start = vma->vm_start;
@@ -432,6 +434,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
432 434
433 flush_cache_user_range(vma, start, end); 435 flush_cache_user_range(vma, start, end);
434 } 436 }
437 up_read(&mm->mmap_sem);
435} 438}
436 439
437/* 440/*
diff --git a/arch/arm/mach-bcmring/core.c b/arch/arm/mach-bcmring/core.c
index 492c649f451e..4b4f69251b31 100644
--- a/arch/arm/mach-bcmring/core.c
+++ b/arch/arm/mach-bcmring/core.c
@@ -31,7 +31,6 @@
31#include <linux/clocksource.h> 31#include <linux/clocksource.h>
32#include <linux/clockchips.h> 32#include <linux/clockchips.h>
33 33
34#include <linux/amba/bus.h>
35#include <mach/csp/mm_addr.h> 34#include <mach/csp/mm_addr.h>
36#include <mach/hardware.h> 35#include <mach/hardware.h>
37#include <asm/clkdev.h> 36#include <asm/clkdev.h>
@@ -45,7 +44,6 @@
45#include <asm/mach/irq.h> 44#include <asm/mach/irq.h>
46#include <asm/mach/time.h> 45#include <asm/mach/time.h>
47#include <asm/mach/map.h> 46#include <asm/mach/map.h>
48#include <asm/mach/mmc.h>
49 47
50#include <cfg_global.h> 48#include <cfg_global.h>
51 49
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index a1d5e7dac741..52dd8046b305 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -35,7 +35,6 @@
35#include <mach/common.h> 35#include <mach/common.h>
36#include <mach/i2c.h> 36#include <mach/i2c.h>
37#include <mach/serial.h> 37#include <mach/serial.h>
38#include <mach/common.h>
39#include <mach/mmc.h> 38#include <mach/mmc.h>
40#include <mach/nand.h> 39#include <mach/nand.h>
41 40
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index f1d72b225450..901cc205015e 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -486,7 +486,7 @@ int __init pci_v3_setup(int nr, struct pci_sys_data *sys)
486 return ret; 486 return ret;
487} 487}
488 488
489struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *sys) 489struct pci_bus * __init pci_v3_scan_bus(int nr, struct pci_sys_data *sys)
490{ 490{
491 return pci_scan_bus(sys->busnr, &pci_v3_ops, sys); 491 return pci_scan_bus(sys->busnr, &pci_v3_ops, sys);
492} 492}
diff --git a/arch/arm/mach-iop32x/include/mach/iop32x.h b/arch/arm/mach-iop32x/include/mach/iop32x.h
index abd9eb49f103..941f363aca56 100644
--- a/arch/arm/mach-iop32x/include/mach/iop32x.h
+++ b/arch/arm/mach-iop32x/include/mach/iop32x.h
@@ -31,7 +31,5 @@
31#define IOP32X_MAX_RAM_SIZE 0x40000000UL 31#define IOP32X_MAX_RAM_SIZE 0x40000000UL
32#define IOP3XX_MAX_RAM_SIZE IOP32X_MAX_RAM_SIZE 32#define IOP3XX_MAX_RAM_SIZE IOP32X_MAX_RAM_SIZE
33#define IOP3XX_PCI_LOWER_MEM_BA 0x80000000 33#define IOP3XX_PCI_LOWER_MEM_BA 0x80000000
34#define IOP32X_PCI_MEM_WINDOW_SIZE 0x04000000
35#define IOP3XX_PCI_MEM_WINDOW_SIZE IOP32X_PCI_MEM_WINDOW_SIZE
36 34
37#endif 35#endif
diff --git a/arch/arm/mach-iop33x/include/mach/iop33x.h b/arch/arm/mach-iop33x/include/mach/iop33x.h
index 24567316ec88..a89c0a234bff 100644
--- a/arch/arm/mach-iop33x/include/mach/iop33x.h
+++ b/arch/arm/mach-iop33x/include/mach/iop33x.h
@@ -36,8 +36,6 @@
36#define IOP33X_MAX_RAM_SIZE 0x80000000UL 36#define IOP33X_MAX_RAM_SIZE 0x80000000UL
37#define IOP3XX_MAX_RAM_SIZE IOP33X_MAX_RAM_SIZE 37#define IOP3XX_MAX_RAM_SIZE IOP33X_MAX_RAM_SIZE
38#define IOP3XX_PCI_LOWER_MEM_BA (PHYS_OFFSET + IOP33X_MAX_RAM_SIZE) 38#define IOP3XX_PCI_LOWER_MEM_BA (PHYS_OFFSET + IOP33X_MAX_RAM_SIZE)
39#define IOP33X_PCI_MEM_WINDOW_SIZE 0x08000000
40#define IOP3XX_PCI_MEM_WINDOW_SIZE IOP33X_PCI_MEM_WINDOW_SIZE
41 39
42 40
43#endif 41#endif
diff --git a/arch/arm/mach-ns9xxx/clock.c b/arch/arm/mach-ns9xxx/clock.c
index 44ed20d4a388..cf81cbc57544 100644
--- a/arch/arm/mach-ns9xxx/clock.c
+++ b/arch/arm/mach-ns9xxx/clock.c
@@ -195,7 +195,7 @@ static int clk_debugfs_open(struct inode *inode, struct file *file)
195 return single_open(file, clk_debugfs_show, NULL); 195 return single_open(file, clk_debugfs_show, NULL);
196} 196}
197 197
198static struct file_operations clk_debugfs_operations = { 198static const struct file_operations clk_debugfs_operations = {
199 .open = clk_debugfs_open, 199 .open = clk_debugfs_open,
200 .read = seq_read, 200 .read = seq_read,
201 .llseek = seq_lseek, 201 .llseek = seq_lseek,
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
index fafcd32e6907..489556eecbd1 100644
--- a/arch/arm/mach-omap2/clock34xx.c
+++ b/arch/arm/mach-omap2/clock34xx.c
@@ -338,6 +338,13 @@ static struct omap_clk omap34xx_clks[] = {
338 */ 338 */
339#define SDRC_MPURATE_LOOPS 96 339#define SDRC_MPURATE_LOOPS 96
340 340
341/*
342 * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
343 * that are sourced by DPLL5, and both of these require this clock
344 * to be at 120 MHz for proper operation.
345 */
346#define DPLL5_FREQ_FOR_USBHOST 120000000
347
341/** 348/**
342 * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI 349 * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI
343 * @clk: struct clk * being enabled 350 * @clk: struct clk * being enabled
@@ -1056,6 +1063,28 @@ void omap2_clk_prepare_for_reboot(void)
1056#endif 1063#endif
1057} 1064}
1058 1065
1066static void omap3_clk_lock_dpll5(void)
1067{
1068 struct clk *dpll5_clk;
1069 struct clk *dpll5_m2_clk;
1070
1071 dpll5_clk = clk_get(NULL, "dpll5_ck");
1072 clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST);
1073 clk_enable(dpll5_clk);
1074
1075 /* Enable autoidle to allow it to enter low power bypass */
1076 omap3_dpll_allow_idle(dpll5_clk);
1077
1078 /* Program dpll5_m2_clk divider for no division */
1079 dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck");
1080 clk_enable(dpll5_m2_clk);
1081 clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST);
1082
1083 clk_disable(dpll5_m2_clk);
1084 clk_disable(dpll5_clk);
1085 return;
1086}
1087
1059/* REVISIT: Move this init stuff out into clock.c */ 1088/* REVISIT: Move this init stuff out into clock.c */
1060 1089
1061/* 1090/*
@@ -1148,6 +1177,12 @@ int __init omap2_clk_init(void)
1148 */ 1177 */
1149 clk_enable_init_clocks(); 1178 clk_enable_init_clocks();
1150 1179
1180 /*
1181 * Lock DPLL5 and put it in autoidle.
1182 */
1183 if (omap_rev() >= OMAP3430_REV_ES2_0)
1184 omap3_clk_lock_dpll5();
1185
1151 /* Avoid sleeping during omap2_clk_prepare_for_reboot() */ 1186 /* Avoid sleeping during omap2_clk_prepare_for_reboot() */
1152 /* REVISIT: not yet ready for 343x */ 1187 /* REVISIT: not yet ready for 343x */
1153#if 0 1188#if 0
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 1b4c1600f8d8..2fc4d6abbd0a 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -541,7 +541,7 @@ static int __init pm_dbg_init(void)
541 printk(KERN_ERR "%s: only OMAP3 supported\n", __func__); 541 printk(KERN_ERR "%s: only OMAP3 supported\n", __func__);
542 return -ENODEV; 542 return -ENODEV;
543 } 543 }
544 544
545 d = debugfs_create_dir("pm_debug", NULL); 545 d = debugfs_create_dir("pm_debug", NULL);
546 if (IS_ERR(d)) 546 if (IS_ERR(d))
547 return PTR_ERR(d); 547 return PTR_ERR(d);
@@ -551,7 +551,7 @@ static int __init pm_dbg_init(void)
551 (void) debugfs_create_file("time", S_IRUGO, 551 (void) debugfs_create_file("time", S_IRUGO,
552 d, (void *)DEBUG_FILE_TIMERS, &debug_fops); 552 d, (void *)DEBUG_FILE_TIMERS, &debug_fops);
553 553
554 pwrdm_for_each(pwrdms_setup, (void *)d); 554 pwrdm_for_each_nolock(pwrdms_setup, (void *)d);
555 555
556 pm_dbg_dir = debugfs_create_dir("registers", d); 556 pm_dbg_dir = debugfs_create_dir("registers", d);
557 if (IS_ERR(pm_dbg_dir)) 557 if (IS_ERR(pm_dbg_dir))
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 0ff5a6c53aa0..378c2f618358 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -51,97 +51,112 @@ static void (*_omap_sram_idle)(u32 *addr, int save_state);
51 51
52static struct powerdomain *mpu_pwrdm; 52static struct powerdomain *mpu_pwrdm;
53 53
54/* PRCM Interrupt Handler for wakeups */ 54/*
55static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) 55 * PRCM Interrupt Handler Helper Function
56 *
57 * The purpose of this function is to clear any wake-up events latched
58 * in the PRCM PM_WKST_x registers. It is possible that a wake-up event
59 * may occur whilst attempting to clear a PM_WKST_x register and thus
60 * set another bit in this register. A while loop is used to ensure
61 * that any peripheral wake-up events occurring while attempting to
62 * clear the PM_WKST_x are detected and cleared.
63 */
64static int prcm_clear_mod_irqs(s16 module, u8 regs)
56{ 65{
57 u32 wkst, irqstatus_mpu; 66 u32 wkst, fclk, iclk, clken;
58 u32 fclk, iclk; 67 u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
59 68 u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1;
60 /* WKUP */ 69 u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1;
61 wkst = prm_read_mod_reg(WKUP_MOD, PM_WKST); 70 u16 grpsel_off = (regs == 3) ?
71 OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
72 int c = 0;
73
74 wkst = prm_read_mod_reg(module, wkst_off);
75 wkst &= prm_read_mod_reg(module, grpsel_off);
62 if (wkst) { 76 if (wkst) {
63 iclk = cm_read_mod_reg(WKUP_MOD, CM_ICLKEN); 77 iclk = cm_read_mod_reg(module, iclk_off);
64 fclk = cm_read_mod_reg(WKUP_MOD, CM_FCLKEN); 78 fclk = cm_read_mod_reg(module, fclk_off);
65 cm_set_mod_reg_bits(wkst, WKUP_MOD, CM_ICLKEN); 79 while (wkst) {
66 cm_set_mod_reg_bits(wkst, WKUP_MOD, CM_FCLKEN); 80 clken = wkst;
67 prm_write_mod_reg(wkst, WKUP_MOD, PM_WKST); 81 cm_set_mod_reg_bits(clken, module, iclk_off);
68 while (prm_read_mod_reg(WKUP_MOD, PM_WKST)) 82 /*
69 cpu_relax(); 83 * For USBHOST, we don't know whether HOST1 or
70 cm_write_mod_reg(iclk, WKUP_MOD, CM_ICLKEN); 84 * HOST2 woke us up, so enable both f-clocks
71 cm_write_mod_reg(fclk, WKUP_MOD, CM_FCLKEN); 85 */
86 if (module == OMAP3430ES2_USBHOST_MOD)
87 clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
88 cm_set_mod_reg_bits(clken, module, fclk_off);
89 prm_write_mod_reg(wkst, module, wkst_off);
90 wkst = prm_read_mod_reg(module, wkst_off);
91 c++;
92 }
93 cm_write_mod_reg(iclk, module, iclk_off);
94 cm_write_mod_reg(fclk, module, fclk_off);
72 } 95 }
73 96
74 /* CORE */ 97 return c;
75 wkst = prm_read_mod_reg(CORE_MOD, PM_WKST1); 98}
76 if (wkst) {
77 iclk = cm_read_mod_reg(CORE_MOD, CM_ICLKEN1);
78 fclk = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
79 cm_set_mod_reg_bits(wkst, CORE_MOD, CM_ICLKEN1);
80 cm_set_mod_reg_bits(wkst, CORE_MOD, CM_FCLKEN1);
81 prm_write_mod_reg(wkst, CORE_MOD, PM_WKST1);
82 while (prm_read_mod_reg(CORE_MOD, PM_WKST1))
83 cpu_relax();
84 cm_write_mod_reg(iclk, CORE_MOD, CM_ICLKEN1);
85 cm_write_mod_reg(fclk, CORE_MOD, CM_FCLKEN1);
86 }
87 wkst = prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_WKST3);
88 if (wkst) {
89 iclk = cm_read_mod_reg(CORE_MOD, CM_ICLKEN3);
90 fclk = cm_read_mod_reg(CORE_MOD, OMAP3430ES2_CM_FCLKEN3);
91 cm_set_mod_reg_bits(wkst, CORE_MOD, CM_ICLKEN3);
92 cm_set_mod_reg_bits(wkst, CORE_MOD, OMAP3430ES2_CM_FCLKEN3);
93 prm_write_mod_reg(wkst, CORE_MOD, OMAP3430ES2_PM_WKST3);
94 while (prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_WKST3))
95 cpu_relax();
96 cm_write_mod_reg(iclk, CORE_MOD, CM_ICLKEN3);
97 cm_write_mod_reg(fclk, CORE_MOD, OMAP3430ES2_CM_FCLKEN3);
98 }
99 99
100 /* PER */ 100static int _prcm_int_handle_wakeup(void)
101 wkst = prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKST); 101{
102 if (wkst) { 102 int c;
103 iclk = cm_read_mod_reg(OMAP3430_PER_MOD, CM_ICLKEN);
104 fclk = cm_read_mod_reg(OMAP3430_PER_MOD, CM_FCLKEN);
105 cm_set_mod_reg_bits(wkst, OMAP3430_PER_MOD, CM_ICLKEN);
106 cm_set_mod_reg_bits(wkst, OMAP3430_PER_MOD, CM_FCLKEN);
107 prm_write_mod_reg(wkst, OMAP3430_PER_MOD, PM_WKST);
108 while (prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKST))
109 cpu_relax();
110 cm_write_mod_reg(iclk, OMAP3430_PER_MOD, CM_ICLKEN);
111 cm_write_mod_reg(fclk, OMAP3430_PER_MOD, CM_FCLKEN);
112 }
113 103
104 c = prcm_clear_mod_irqs(WKUP_MOD, 1);
105 c += prcm_clear_mod_irqs(CORE_MOD, 1);
106 c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1);
114 if (omap_rev() > OMAP3430_REV_ES1_0) { 107 if (omap_rev() > OMAP3430_REV_ES1_0) {
115 /* USBHOST */ 108 c += prcm_clear_mod_irqs(CORE_MOD, 3);
116 wkst = prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, PM_WKST); 109 c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1);
117 if (wkst) {
118 iclk = cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD,
119 CM_ICLKEN);
120 fclk = cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD,
121 CM_FCLKEN);
122 cm_set_mod_reg_bits(wkst, OMAP3430ES2_USBHOST_MOD,
123 CM_ICLKEN);
124 cm_set_mod_reg_bits(wkst, OMAP3430ES2_USBHOST_MOD,
125 CM_FCLKEN);
126 prm_write_mod_reg(wkst, OMAP3430ES2_USBHOST_MOD,
127 PM_WKST);
128 while (prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD,
129 PM_WKST))
130 cpu_relax();
131 cm_write_mod_reg(iclk, OMAP3430ES2_USBHOST_MOD,
132 CM_ICLKEN);
133 cm_write_mod_reg(fclk, OMAP3430ES2_USBHOST_MOD,
134 CM_FCLKEN);
135 }
136 } 110 }
137 111
138 irqstatus_mpu = prm_read_mod_reg(OCP_MOD, 112 return c;
139 OMAP3_PRM_IRQSTATUS_MPU_OFFSET); 113}
140 prm_write_mod_reg(irqstatus_mpu, OCP_MOD, 114
141 OMAP3_PRM_IRQSTATUS_MPU_OFFSET); 115/*
116 * PRCM Interrupt Handler
117 *
118 * The PRM_IRQSTATUS_MPU register indicates if there are any pending
119 * interrupts from the PRCM for the MPU. These bits must be cleared in
120 * order to clear the PRCM interrupt. The PRCM interrupt handler is
121 * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear
122 * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU
123 * register indicates that a wake-up event is pending for the MPU and
124 * this bit can only be cleared if the all the wake-up events latched
125 * in the various PM_WKST_x registers have been cleared. The interrupt
126 * handler is implemented using a do-while loop so that if a wake-up
127 * event occurred during the processing of the prcm interrupt handler
128 * (setting a bit in the corresponding PM_WKST_x register and thus
129 * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register)
130 * this would be handled.
131 */
132static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
133{
134 u32 irqstatus_mpu;
135 int c = 0;
136
137 do {
138 irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
139 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
140
141 if (irqstatus_mpu & (OMAP3430_WKUP_ST | OMAP3430_IO_ST)) {
142 c = _prcm_int_handle_wakeup();
143
144 /*
145 * Is the MPU PRCM interrupt handler racing with the
146 * IVA2 PRCM interrupt handler ?
147 */
148 WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup "
149 "but no wakeup sources are marked\n");
150 } else {
151 /* XXX we need to expand our PRCM interrupt handler */
152 WARN(1, "prcm: WARNING: PRCM interrupt received, but "
153 "no code to handle it (%08x)\n", irqstatus_mpu);
154 }
155
156 prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
157 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
142 158
143 while (prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET)) 159 } while (prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET));
144 cpu_relax();
145 160
146 return IRQ_HANDLED; 161 return IRQ_HANDLED;
147} 162}
@@ -624,6 +639,16 @@ static void __init prcm_setup_regs(void)
624 prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN, 639 prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN,
625 OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); 640 OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
626 641
642 /* Enable GPIO wakeups in PER */
643 prm_write_mod_reg(OMAP3430_EN_GPIO2 | OMAP3430_EN_GPIO3 |
644 OMAP3430_EN_GPIO4 | OMAP3430_EN_GPIO5 |
645 OMAP3430_EN_GPIO6, OMAP3430_PER_MOD, PM_WKEN);
646 /* and allow them to wake up MPU */
647 prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2 | OMAP3430_EN_GPIO3 |
648 OMAP3430_GRPSEL_GPIO4 | OMAP3430_EN_GPIO5 |
649 OMAP3430_GRPSEL_GPIO6,
650 OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
651
627 /* Don't attach IVA interrupts */ 652 /* Don't attach IVA interrupts */
628 prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL); 653 prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
629 prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1); 654 prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 2594cbff3947..f00289abd30f 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -273,35 +273,50 @@ struct powerdomain *pwrdm_lookup(const char *name)
273} 273}
274 274
275/** 275/**
276 * pwrdm_for_each - call function on each registered clockdomain 276 * pwrdm_for_each_nolock - call function on each registered clockdomain
277 * @fn: callback function * 277 * @fn: callback function *
278 * 278 *
279 * Call the supplied function for each registered powerdomain. The 279 * Call the supplied function for each registered powerdomain. The
280 * callback function can return anything but 0 to bail out early from 280 * callback function can return anything but 0 to bail out early from
281 * the iterator. The callback function is called with the pwrdm_rwlock 281 * the iterator. Returns the last return value of the callback function, which
282 * held for reading, so no powerdomain structure manipulation 282 * should be 0 for success or anything else to indicate failure; or -EINVAL if
283 * functions should be called from the callback, although hardware 283 * the function pointer is null.
284 * powerdomain control functions are fine. Returns the last return
285 * value of the callback function, which should be 0 for success or
286 * anything else to indicate failure; or -EINVAL if the function
287 * pointer is null.
288 */ 284 */
289int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), 285int pwrdm_for_each_nolock(int (*fn)(struct powerdomain *pwrdm, void *user),
290 void *user) 286 void *user)
291{ 287{
292 struct powerdomain *temp_pwrdm; 288 struct powerdomain *temp_pwrdm;
293 unsigned long flags;
294 int ret = 0; 289 int ret = 0;
295 290
296 if (!fn) 291 if (!fn)
297 return -EINVAL; 292 return -EINVAL;
298 293
299 read_lock_irqsave(&pwrdm_rwlock, flags);
300 list_for_each_entry(temp_pwrdm, &pwrdm_list, node) { 294 list_for_each_entry(temp_pwrdm, &pwrdm_list, node) {
301 ret = (*fn)(temp_pwrdm, user); 295 ret = (*fn)(temp_pwrdm, user);
302 if (ret) 296 if (ret)
303 break; 297 break;
304 } 298 }
299
300 return ret;
301}
302
303/**
304 * pwrdm_for_each - call function on each registered clockdomain
305 * @fn: callback function *
306 *
307 * This function is the same as 'pwrdm_for_each_nolock()', but keeps the
308 * &pwrdm_rwlock locked for reading, so no powerdomain structure manipulation
309 * functions should be called from the callback, although hardware powerdomain
310 * control functions are fine.
311 */
312int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user),
313 void *user)
314{
315 unsigned long flags;
316 int ret;
317
318 read_lock_irqsave(&pwrdm_rwlock, flags);
319 ret = pwrdm_for_each_nolock(fn, user);
305 read_unlock_irqrestore(&pwrdm_rwlock, flags); 320 read_unlock_irqrestore(&pwrdm_rwlock, flags);
306 321
307 return ret; 322 return ret;
diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig
index 81ffff7ed498..4e5c07f4e456 100644
--- a/arch/arm/mach-sa1100/Kconfig
+++ b/arch/arm/mach-sa1100/Kconfig
@@ -71,11 +71,6 @@ config SA1100_H3600
71 <http://www.handhelds.org/Compaq/index.html#iPAQ_H3600> 71 <http://www.handhelds.org/Compaq/index.html#iPAQ_H3600>
72 <http://www.compaq.com/products/handhelds/pocketpc/> 72 <http://www.compaq.com/products/handhelds/pocketpc/>
73 73
74config SA1100_H3XXX
75 bool
76 depends on SA1100_H3100 || SA1100_H3600
77 default y
78
79config SA1100_BADGE4 74config SA1100_BADGE4
80 bool "HP Labs BadgePAD 4" 75 bool "HP Labs BadgePAD 4"
81 select SA1111 76 select SA1111
diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c
index 95d92e8e56a8..b9cbb56d6e9d 100644
--- a/arch/arm/mach-sa1100/time.c
+++ b/arch/arm/mach-sa1100/time.c
@@ -77,7 +77,7 @@ static struct clock_event_device ckevt_sa1100_osmr0 = {
77 .set_mode = sa1100_osmr0_set_mode, 77 .set_mode = sa1100_osmr0_set_mode,
78}; 78};
79 79
80static cycle_t sa1100_read_oscr(void) 80static cycle_t sa1100_read_oscr(struct clocksource *s)
81{ 81{
82 return OSCR; 82 return OSCR;
83} 83}
diff --git a/arch/arm/mach-u300/gpio.c b/arch/arm/mach-u300/gpio.c
index 63c8f27fb15a..0b35826b7d1d 100644
--- a/arch/arm/mach-u300/gpio.c
+++ b/arch/arm/mach-u300/gpio.c
@@ -281,6 +281,16 @@ int gpio_unregister_callback(unsigned gpio)
281} 281}
282EXPORT_SYMBOL(gpio_unregister_callback); 282EXPORT_SYMBOL(gpio_unregister_callback);
283 283
284/* Non-zero means valid */
285int gpio_is_valid(int number)
286{
287 if (number >= 0 &&
288 number < (U300_GPIO_NUM_PORTS * U300_GPIO_PINS_PER_PORT))
289 return 1;
290 return 0;
291}
292EXPORT_SYMBOL(gpio_is_valid);
293
284int gpio_request(unsigned gpio, const char *label) 294int gpio_request(unsigned gpio, const char *label)
285{ 295{
286 if (gpio_pin[gpio].users) 296 if (gpio_pin[gpio].users)
diff --git a/arch/arm/mach-u300/include/mach/gpio.h b/arch/arm/mach-u300/include/mach/gpio.h
index c8174128d7eb..7b1fc984abb6 100644
--- a/arch/arm/mach-u300/include/mach/gpio.h
+++ b/arch/arm/mach-u300/include/mach/gpio.h
@@ -258,6 +258,7 @@
258#define PIN_TO_PORT(val) (val >> 3) 258#define PIN_TO_PORT(val) (val >> 3)
259 259
260/* These can be found in arch/arm/mach-u300/gpio.c */ 260/* These can be found in arch/arm/mach-u300/gpio.c */
261extern int gpio_is_valid(int number);
261extern int gpio_request(unsigned gpio, const char *label); 262extern int gpio_request(unsigned gpio, const char *label);
262extern void gpio_free(unsigned gpio); 263extern void gpio_free(unsigned gpio);
263extern int gpio_direction_input(unsigned gpio); 264extern int gpio_direction_input(unsigned gpio);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 8d43e58f9244..e993140edd88 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -17,7 +17,7 @@ config CPU_ARM610
17 select CPU_CP15_MMU 17 select CPU_CP15_MMU
18 select CPU_COPY_V3 if MMU 18 select CPU_COPY_V3 if MMU
19 select CPU_TLB_V3 if MMU 19 select CPU_TLB_V3 if MMU
20 select CPU_PABRT_NOIFAR 20 select CPU_PABRT_LEGACY
21 help 21 help
22 The ARM610 is the successor to the ARM3 processor 22 The ARM610 is the successor to the ARM3 processor
23 and was produced by VLSI Technology Inc. 23 and was produced by VLSI Technology Inc.
@@ -31,7 +31,7 @@ config CPU_ARM7TDMI
31 depends on !MMU 31 depends on !MMU
32 select CPU_32v4T 32 select CPU_32v4T
33 select CPU_ABRT_LV4T 33 select CPU_ABRT_LV4T
34 select CPU_PABRT_NOIFAR 34 select CPU_PABRT_LEGACY
35 select CPU_CACHE_V4 35 select CPU_CACHE_V4
36 help 36 help
37 A 32-bit RISC microprocessor based on the ARM7 processor core 37 A 32-bit RISC microprocessor based on the ARM7 processor core
@@ -49,7 +49,7 @@ config CPU_ARM710
49 select CPU_CP15_MMU 49 select CPU_CP15_MMU
50 select CPU_COPY_V3 if MMU 50 select CPU_COPY_V3 if MMU
51 select CPU_TLB_V3 if MMU 51 select CPU_TLB_V3 if MMU
52 select CPU_PABRT_NOIFAR 52 select CPU_PABRT_LEGACY
53 help 53 help
54 A 32-bit RISC microprocessor based on the ARM7 processor core 54 A 32-bit RISC microprocessor based on the ARM7 processor core
55 designed by Advanced RISC Machines Ltd. The ARM710 is the 55 designed by Advanced RISC Machines Ltd. The ARM710 is the
@@ -64,7 +64,7 @@ config CPU_ARM720T
64 bool "Support ARM720T processor" if ARCH_INTEGRATOR 64 bool "Support ARM720T processor" if ARCH_INTEGRATOR
65 select CPU_32v4T 65 select CPU_32v4T
66 select CPU_ABRT_LV4T 66 select CPU_ABRT_LV4T
67 select CPU_PABRT_NOIFAR 67 select CPU_PABRT_LEGACY
68 select CPU_CACHE_V4 68 select CPU_CACHE_V4
69 select CPU_CACHE_VIVT 69 select CPU_CACHE_VIVT
70 select CPU_CP15_MMU 70 select CPU_CP15_MMU
@@ -83,7 +83,7 @@ config CPU_ARM740T
83 depends on !MMU 83 depends on !MMU
84 select CPU_32v4T 84 select CPU_32v4T
85 select CPU_ABRT_LV4T 85 select CPU_ABRT_LV4T
86 select CPU_PABRT_NOIFAR 86 select CPU_PABRT_LEGACY
87 select CPU_CACHE_V3 # although the core is v4t 87 select CPU_CACHE_V3 # although the core is v4t
88 select CPU_CP15_MPU 88 select CPU_CP15_MPU
89 help 89 help
@@ -100,7 +100,7 @@ config CPU_ARM9TDMI
100 depends on !MMU 100 depends on !MMU
101 select CPU_32v4T 101 select CPU_32v4T
102 select CPU_ABRT_NOMMU 102 select CPU_ABRT_NOMMU
103 select CPU_PABRT_NOIFAR 103 select CPU_PABRT_LEGACY
104 select CPU_CACHE_V4 104 select CPU_CACHE_V4
105 help 105 help
106 A 32-bit RISC microprocessor based on the ARM9 processor core 106 A 32-bit RISC microprocessor based on the ARM9 processor core
@@ -114,7 +114,7 @@ config CPU_ARM920T
114 bool "Support ARM920T processor" if ARCH_INTEGRATOR 114 bool "Support ARM920T processor" if ARCH_INTEGRATOR
115 select CPU_32v4T 115 select CPU_32v4T
116 select CPU_ABRT_EV4T 116 select CPU_ABRT_EV4T
117 select CPU_PABRT_NOIFAR 117 select CPU_PABRT_LEGACY
118 select CPU_CACHE_V4WT 118 select CPU_CACHE_V4WT
119 select CPU_CACHE_VIVT 119 select CPU_CACHE_VIVT
120 select CPU_CP15_MMU 120 select CPU_CP15_MMU
@@ -135,7 +135,7 @@ config CPU_ARM922T
135 bool "Support ARM922T processor" if ARCH_INTEGRATOR 135 bool "Support ARM922T processor" if ARCH_INTEGRATOR
136 select CPU_32v4T 136 select CPU_32v4T
137 select CPU_ABRT_EV4T 137 select CPU_ABRT_EV4T
138 select CPU_PABRT_NOIFAR 138 select CPU_PABRT_LEGACY
139 select CPU_CACHE_V4WT 139 select CPU_CACHE_V4WT
140 select CPU_CACHE_VIVT 140 select CPU_CACHE_VIVT
141 select CPU_CP15_MMU 141 select CPU_CP15_MMU
@@ -154,7 +154,7 @@ config CPU_ARM925T
154 bool "Support ARM925T processor" if ARCH_OMAP1 154 bool "Support ARM925T processor" if ARCH_OMAP1
155 select CPU_32v4T 155 select CPU_32v4T
156 select CPU_ABRT_EV4T 156 select CPU_ABRT_EV4T
157 select CPU_PABRT_NOIFAR 157 select CPU_PABRT_LEGACY
158 select CPU_CACHE_V4WT 158 select CPU_CACHE_V4WT
159 select CPU_CACHE_VIVT 159 select CPU_CACHE_VIVT
160 select CPU_CP15_MMU 160 select CPU_CP15_MMU
@@ -173,7 +173,7 @@ config CPU_ARM926T
173 bool "Support ARM926T processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB 173 bool "Support ARM926T processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB
174 select CPU_32v5 174 select CPU_32v5
175 select CPU_ABRT_EV5TJ 175 select CPU_ABRT_EV5TJ
176 select CPU_PABRT_NOIFAR 176 select CPU_PABRT_LEGACY
177 select CPU_CACHE_VIVT 177 select CPU_CACHE_VIVT
178 select CPU_CP15_MMU 178 select CPU_CP15_MMU
179 select CPU_COPY_V4WB if MMU 179 select CPU_COPY_V4WB if MMU
@@ -191,7 +191,7 @@ config CPU_FA526
191 bool 191 bool
192 select CPU_32v4 192 select CPU_32v4
193 select CPU_ABRT_EV4 193 select CPU_ABRT_EV4
194 select CPU_PABRT_NOIFAR 194 select CPU_PABRT_LEGACY
195 select CPU_CACHE_VIVT 195 select CPU_CACHE_VIVT
196 select CPU_CP15_MMU 196 select CPU_CP15_MMU
197 select CPU_CACHE_FA 197 select CPU_CACHE_FA
@@ -210,7 +210,7 @@ config CPU_ARM940T
210 depends on !MMU 210 depends on !MMU
211 select CPU_32v4T 211 select CPU_32v4T
212 select CPU_ABRT_NOMMU 212 select CPU_ABRT_NOMMU
213 select CPU_PABRT_NOIFAR 213 select CPU_PABRT_LEGACY
214 select CPU_CACHE_VIVT 214 select CPU_CACHE_VIVT
215 select CPU_CP15_MPU 215 select CPU_CP15_MPU
216 help 216 help
@@ -228,7 +228,7 @@ config CPU_ARM946E
228 depends on !MMU 228 depends on !MMU
229 select CPU_32v5 229 select CPU_32v5
230 select CPU_ABRT_NOMMU 230 select CPU_ABRT_NOMMU
231 select CPU_PABRT_NOIFAR 231 select CPU_PABRT_LEGACY
232 select CPU_CACHE_VIVT 232 select CPU_CACHE_VIVT
233 select CPU_CP15_MPU 233 select CPU_CP15_MPU
234 help 234 help
@@ -244,7 +244,7 @@ config CPU_ARM1020
244 bool "Support ARM1020T (rev 0) processor" if ARCH_INTEGRATOR 244 bool "Support ARM1020T (rev 0) processor" if ARCH_INTEGRATOR
245 select CPU_32v5 245 select CPU_32v5
246 select CPU_ABRT_EV4T 246 select CPU_ABRT_EV4T
247 select CPU_PABRT_NOIFAR 247 select CPU_PABRT_LEGACY
248 select CPU_CACHE_V4WT 248 select CPU_CACHE_V4WT
249 select CPU_CACHE_VIVT 249 select CPU_CACHE_VIVT
250 select CPU_CP15_MMU 250 select CPU_CP15_MMU
@@ -262,7 +262,7 @@ config CPU_ARM1020E
262 bool "Support ARM1020E processor" if ARCH_INTEGRATOR 262 bool "Support ARM1020E processor" if ARCH_INTEGRATOR
263 select CPU_32v5 263 select CPU_32v5
264 select CPU_ABRT_EV4T 264 select CPU_ABRT_EV4T
265 select CPU_PABRT_NOIFAR 265 select CPU_PABRT_LEGACY
266 select CPU_CACHE_V4WT 266 select CPU_CACHE_V4WT
267 select CPU_CACHE_VIVT 267 select CPU_CACHE_VIVT
268 select CPU_CP15_MMU 268 select CPU_CP15_MMU
@@ -275,7 +275,7 @@ config CPU_ARM1022
275 bool "Support ARM1022E processor" if ARCH_INTEGRATOR 275 bool "Support ARM1022E processor" if ARCH_INTEGRATOR
276 select CPU_32v5 276 select CPU_32v5
277 select CPU_ABRT_EV4T 277 select CPU_ABRT_EV4T
278 select CPU_PABRT_NOIFAR 278 select CPU_PABRT_LEGACY
279 select CPU_CACHE_VIVT 279 select CPU_CACHE_VIVT
280 select CPU_CP15_MMU 280 select CPU_CP15_MMU
281 select CPU_COPY_V4WB if MMU # can probably do better 281 select CPU_COPY_V4WB if MMU # can probably do better
@@ -293,7 +293,7 @@ config CPU_ARM1026
293 bool "Support ARM1026EJ-S processor" if ARCH_INTEGRATOR 293 bool "Support ARM1026EJ-S processor" if ARCH_INTEGRATOR
294 select CPU_32v5 294 select CPU_32v5
295 select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 295 select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10
296 select CPU_PABRT_NOIFAR 296 select CPU_PABRT_LEGACY
297 select CPU_CACHE_VIVT 297 select CPU_CACHE_VIVT
298 select CPU_CP15_MMU 298 select CPU_CP15_MMU
299 select CPU_COPY_V4WB if MMU # can probably do better 299 select CPU_COPY_V4WB if MMU # can probably do better
@@ -311,7 +311,7 @@ config CPU_SA110
311 select CPU_32v3 if ARCH_RPC 311 select CPU_32v3 if ARCH_RPC
312 select CPU_32v4 if !ARCH_RPC 312 select CPU_32v4 if !ARCH_RPC
313 select CPU_ABRT_EV4 313 select CPU_ABRT_EV4
314 select CPU_PABRT_NOIFAR 314 select CPU_PABRT_LEGACY
315 select CPU_CACHE_V4WB 315 select CPU_CACHE_V4WB
316 select CPU_CACHE_VIVT 316 select CPU_CACHE_VIVT
317 select CPU_CP15_MMU 317 select CPU_CP15_MMU
@@ -331,7 +331,7 @@ config CPU_SA1100
331 bool 331 bool
332 select CPU_32v4 332 select CPU_32v4
333 select CPU_ABRT_EV4 333 select CPU_ABRT_EV4
334 select CPU_PABRT_NOIFAR 334 select CPU_PABRT_LEGACY
335 select CPU_CACHE_V4WB 335 select CPU_CACHE_V4WB
336 select CPU_CACHE_VIVT 336 select CPU_CACHE_VIVT
337 select CPU_CP15_MMU 337 select CPU_CP15_MMU
@@ -342,7 +342,7 @@ config CPU_XSCALE
342 bool 342 bool
343 select CPU_32v5 343 select CPU_32v5
344 select CPU_ABRT_EV5T 344 select CPU_ABRT_EV5T
345 select CPU_PABRT_NOIFAR 345 select CPU_PABRT_LEGACY
346 select CPU_CACHE_VIVT 346 select CPU_CACHE_VIVT
347 select CPU_CP15_MMU 347 select CPU_CP15_MMU
348 select CPU_TLB_V4WBI if MMU 348 select CPU_TLB_V4WBI if MMU
@@ -352,7 +352,7 @@ config CPU_XSC3
352 bool 352 bool
353 select CPU_32v5 353 select CPU_32v5
354 select CPU_ABRT_EV5T 354 select CPU_ABRT_EV5T
355 select CPU_PABRT_NOIFAR 355 select CPU_PABRT_LEGACY
356 select CPU_CACHE_VIVT 356 select CPU_CACHE_VIVT
357 select CPU_CP15_MMU 357 select CPU_CP15_MMU
358 select CPU_TLB_V4WBI if MMU 358 select CPU_TLB_V4WBI if MMU
@@ -363,7 +363,7 @@ config CPU_MOHAWK
363 bool 363 bool
364 select CPU_32v5 364 select CPU_32v5
365 select CPU_ABRT_EV5T 365 select CPU_ABRT_EV5T
366 select CPU_PABRT_NOIFAR 366 select CPU_PABRT_LEGACY
367 select CPU_CACHE_VIVT 367 select CPU_CACHE_VIVT
368 select CPU_CP15_MMU 368 select CPU_CP15_MMU
369 select CPU_TLB_V4WBI if MMU 369 select CPU_TLB_V4WBI if MMU
@@ -374,7 +374,7 @@ config CPU_FEROCEON
374 bool 374 bool
375 select CPU_32v5 375 select CPU_32v5
376 select CPU_ABRT_EV5T 376 select CPU_ABRT_EV5T
377 select CPU_PABRT_NOIFAR 377 select CPU_PABRT_LEGACY
378 select CPU_CACHE_VIVT 378 select CPU_CACHE_VIVT
379 select CPU_CP15_MMU 379 select CPU_CP15_MMU
380 select CPU_COPY_FEROCEON if MMU 380 select CPU_COPY_FEROCEON if MMU
@@ -394,7 +394,7 @@ config CPU_V6
394 bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX 394 bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
395 select CPU_32v6 395 select CPU_32v6
396 select CPU_ABRT_EV6 396 select CPU_ABRT_EV6
397 select CPU_PABRT_NOIFAR 397 select CPU_PABRT_V6
398 select CPU_CACHE_V6 398 select CPU_CACHE_V6
399 select CPU_CACHE_VIPT 399 select CPU_CACHE_VIPT
400 select CPU_CP15_MMU 400 select CPU_CP15_MMU
@@ -420,7 +420,7 @@ config CPU_V7
420 select CPU_32v6K 420 select CPU_32v6K
421 select CPU_32v7 421 select CPU_32v7
422 select CPU_ABRT_EV7 422 select CPU_ABRT_EV7
423 select CPU_PABRT_IFAR 423 select CPU_PABRT_V7
424 select CPU_CACHE_V7 424 select CPU_CACHE_V7
425 select CPU_CACHE_VIPT 425 select CPU_CACHE_VIPT
426 select CPU_CP15_MMU 426 select CPU_CP15_MMU
@@ -482,10 +482,13 @@ config CPU_ABRT_EV6
482config CPU_ABRT_EV7 482config CPU_ABRT_EV7
483 bool 483 bool
484 484
485config CPU_PABRT_IFAR 485config CPU_PABRT_LEGACY
486 bool 486 bool
487 487
488config CPU_PABRT_NOIFAR 488config CPU_PABRT_V6
489 bool
490
491config CPU_PABRT_V7
489 bool 492 bool
490 493
491# The cache model 494# The cache model
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 63e3f6dd0e21..055cb2aa8134 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -27,6 +27,10 @@ obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o
27obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o 27obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o
28obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o 28obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o
29 29
30obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o
31obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o
32obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o
33
30obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o 34obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
31obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o 35obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
32obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o 36obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 379f78556055..ae0e25f5a70e 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -519,9 +519,58 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
519 arm_notify_die("", regs, &info, fsr, 0); 519 arm_notify_die("", regs, &info, fsr, 0);
520} 520}
521 521
522
523static struct fsr_info ifsr_info[] = {
524 { do_bad, SIGBUS, 0, "unknown 0" },
525 { do_bad, SIGBUS, 0, "unknown 1" },
526 { do_bad, SIGBUS, 0, "debug event" },
527 { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" },
528 { do_bad, SIGBUS, 0, "unknown 4" },
529 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
530 { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" },
531 { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
532 { do_bad, SIGBUS, 0, "external abort on non-linefetch" },
533 { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
534 { do_bad, SIGBUS, 0, "unknown 10" },
535 { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
536 { do_bad, SIGBUS, 0, "external abort on translation" },
537 { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
538 { do_bad, SIGBUS, 0, "external abort on translation" },
539 { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
540 { do_bad, SIGBUS, 0, "unknown 16" },
541 { do_bad, SIGBUS, 0, "unknown 17" },
542 { do_bad, SIGBUS, 0, "unknown 18" },
543 { do_bad, SIGBUS, 0, "unknown 19" },
544 { do_bad, SIGBUS, 0, "unknown 20" },
545 { do_bad, SIGBUS, 0, "unknown 21" },
546 { do_bad, SIGBUS, 0, "unknown 22" },
547 { do_bad, SIGBUS, 0, "unknown 23" },
548 { do_bad, SIGBUS, 0, "unknown 24" },
549 { do_bad, SIGBUS, 0, "unknown 25" },
550 { do_bad, SIGBUS, 0, "unknown 26" },
551 { do_bad, SIGBUS, 0, "unknown 27" },
552 { do_bad, SIGBUS, 0, "unknown 28" },
553 { do_bad, SIGBUS, 0, "unknown 29" },
554 { do_bad, SIGBUS, 0, "unknown 30" },
555 { do_bad, SIGBUS, 0, "unknown 31" },
556};
557
522asmlinkage void __exception 558asmlinkage void __exception
523do_PrefetchAbort(unsigned long addr, struct pt_regs *regs) 559do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
524{ 560{
525 do_translation_fault(addr, FSR_LNX_PF, regs); 561 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
562 struct siginfo info;
563
564 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
565 return;
566
567 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
568 inf->name, ifsr, addr);
569
570 info.si_signo = inf->sig;
571 info.si_errno = 0;
572 info.si_code = inf->code;
573 info.si_addr = (void __user *)addr;
574 arm_notify_die("", regs, &info, ifsr, 0);
526} 575}
527 576
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index f7457fea6de8..2b7996401b0f 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -124,7 +124,7 @@ int valid_phys_addr_range(unsigned long addr, size_t size)
124{ 124{
125 if (addr < PHYS_OFFSET) 125 if (addr < PHYS_OFFSET)
126 return 0; 126 return 0;
127 if (addr + size >= __pa(high_memory - 1)) 127 if (addr + size > __pa(high_memory - 1) + 1)
128 return 0; 128 return 0;
129 129
130 return 1; 130 return 1;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4426ee67ceca..02243eeccf50 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -21,6 +21,7 @@
21#include <asm/cachetype.h> 21#include <asm/cachetype.h>
22#include <asm/setup.h> 22#include <asm/setup.h>
23#include <asm/sizes.h> 23#include <asm/sizes.h>
24#include <asm/smp_plat.h>
24#include <asm/tlb.h> 25#include <asm/tlb.h>
25#include <asm/highmem.h> 26#include <asm/highmem.h>
26 27
@@ -709,10 +710,6 @@ static void __init sanity_check_meminfo(void)
709 if (meminfo.nr_banks >= NR_BANKS) { 710 if (meminfo.nr_banks >= NR_BANKS) {
710 printk(KERN_CRIT "NR_BANKS too low, " 711 printk(KERN_CRIT "NR_BANKS too low, "
711 "ignoring high memory\n"); 712 "ignoring high memory\n");
712 } else if (cache_is_vipt_aliasing()) {
713 printk(KERN_CRIT "HIGHMEM is not yet supported "
714 "with VIPT aliasing cache, "
715 "ignoring high memory\n");
716 } else { 713 } else {
717 memmove(bank + 1, bank, 714 memmove(bank + 1, bank,
718 (meminfo.nr_banks - i) * sizeof(*bank)); 715 (meminfo.nr_banks - i) * sizeof(*bank));
@@ -726,6 +723,8 @@ static void __init sanity_check_meminfo(void)
726 bank->size = VMALLOC_MIN - __va(bank->start); 723 bank->size = VMALLOC_MIN - __va(bank->start);
727 } 724 }
728#else 725#else
726 bank->highmem = highmem;
727
729 /* 728 /*
730 * Check whether this memory bank would entirely overlap 729 * Check whether this memory bank would entirely overlap
731 * the vmalloc area. 730 * the vmalloc area.
@@ -754,6 +753,38 @@ static void __init sanity_check_meminfo(void)
754#endif 753#endif
755 j++; 754 j++;
756 } 755 }
756#ifdef CONFIG_HIGHMEM
757 if (highmem) {
758 const char *reason = NULL;
759
760 if (cache_is_vipt_aliasing()) {
761 /*
762 * Interactions between kmap and other mappings
763 * make highmem support with aliasing VIPT caches
764 * rather difficult.
765 */
766 reason = "with VIPT aliasing cache";
767#ifdef CONFIG_SMP
768 } else if (tlb_ops_need_broadcast()) {
769 /*
770 * kmap_high needs to occasionally flush TLB entries,
771 * however, if the TLB entries need to be broadcast
772 * we may deadlock:
773 * kmap_high(irqs off)->flush_all_zero_pkmaps->
774 * flush_tlb_kernel_range->smp_call_function_many
775 * (must not be called with irqs off)
776 */
777 reason = "without hardware TLB ops broadcasting";
778#endif
779 }
780 if (reason) {
781 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
782 reason);
783 while (j > 0 && meminfo.bank[j - 1].highmem)
784 j--;
785 }
786 }
787#endif
757 meminfo.nr_banks = j; 788 meminfo.nr_banks = j;
758} 789}
759 790
diff --git a/arch/arm/mm/pabort-legacy.S b/arch/arm/mm/pabort-legacy.S
new file mode 100644
index 000000000000..87970eba88ea
--- /dev/null
+++ b/arch/arm/mm/pabort-legacy.S
@@ -0,0 +1,19 @@
1#include <linux/linkage.h>
2#include <asm/assembler.h>
3
4/*
5 * Function: legacy_pabort
6 *
7 * Params : r0 = address of aborted instruction
8 *
9 * Returns : r0 = address of abort
10 * : r1 = Simulated IFSR with section translation fault status
11 *
12 * Purpose : obtain information about current prefetch abort.
13 */
14
15 .align 5
16ENTRY(legacy_pabort)
17 mov r1, #5
18 mov pc, lr
19ENDPROC(legacy_pabort)
diff --git a/arch/arm/mm/pabort-v6.S b/arch/arm/mm/pabort-v6.S
new file mode 100644
index 000000000000..06e3d1ef2115
--- /dev/null
+++ b/arch/arm/mm/pabort-v6.S
@@ -0,0 +1,19 @@
1#include <linux/linkage.h>
2#include <asm/assembler.h>
3
4/*
5 * Function: v6_pabort
6 *
7 * Params : r0 = address of aborted instruction
8 *
9 * Returns : r0 = address of abort
10 * : r1 = IFSR
11 *
12 * Purpose : obtain information about current prefetch abort.
13 */
14
15 .align 5
16ENTRY(v6_pabort)
17 mrc p15, 0, r1, c5, c0, 1 @ get IFSR
18 mov pc, lr
19ENDPROC(v6_pabort)
diff --git a/arch/arm/mm/pabort-v7.S b/arch/arm/mm/pabort-v7.S
new file mode 100644
index 000000000000..a8b3b300a18d
--- /dev/null
+++ b/arch/arm/mm/pabort-v7.S
@@ -0,0 +1,20 @@
1#include <linux/linkage.h>
2#include <asm/assembler.h>
3
4/*
5 * Function: v6_pabort
6 *
7 * Params : r0 = address of aborted instruction
8 *
9 * Returns : r0 = address of abort
10 * : r1 = IFSR
11 *
12 * Purpose : obtain information about current prefetch abort.
13 */
14
15 .align 5
16ENTRY(v7_pabort)
17 mrc p15, 0, r0, c6, c0, 2 @ get IFAR
18 mrc p15, 0, r1, c5, c0, 1 @ get IFSR
19 mov pc, lr
20ENDPROC(v7_pabort)
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index b5551bf010aa..d9fb4b98c49f 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -449,7 +449,7 @@ arm1020_crval:
449 .type arm1020_processor_functions, #object 449 .type arm1020_processor_functions, #object
450arm1020_processor_functions: 450arm1020_processor_functions:
451 .word v4t_early_abort 451 .word v4t_early_abort
452 .word pabort_noifar 452 .word legacy_pabort
453 .word cpu_arm1020_proc_init 453 .word cpu_arm1020_proc_init
454 .word cpu_arm1020_proc_fin 454 .word cpu_arm1020_proc_fin
455 .word cpu_arm1020_reset 455 .word cpu_arm1020_reset
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 8bc6740c29eb..7453b75dcea5 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -430,7 +430,7 @@ arm1020e_crval:
430 .type arm1020e_processor_functions, #object 430 .type arm1020e_processor_functions, #object
431arm1020e_processor_functions: 431arm1020e_processor_functions:
432 .word v4t_early_abort 432 .word v4t_early_abort
433 .word pabort_noifar 433 .word legacy_pabort
434 .word cpu_arm1020e_proc_init 434 .word cpu_arm1020e_proc_init
435 .word cpu_arm1020e_proc_fin 435 .word cpu_arm1020e_proc_fin
436 .word cpu_arm1020e_reset 436 .word cpu_arm1020e_reset
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 2cd03e66c0a3..8eb72d75a8b6 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -413,7 +413,7 @@ arm1022_crval:
413 .type arm1022_processor_functions, #object 413 .type arm1022_processor_functions, #object
414arm1022_processor_functions: 414arm1022_processor_functions:
415 .word v4t_early_abort 415 .word v4t_early_abort
416 .word pabort_noifar 416 .word legacy_pabort
417 .word cpu_arm1022_proc_init 417 .word cpu_arm1022_proc_init
418 .word cpu_arm1022_proc_fin 418 .word cpu_arm1022_proc_fin
419 .word cpu_arm1022_reset 419 .word cpu_arm1022_reset
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index ad961a897f6e..3b59f0d67139 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -408,7 +408,7 @@ arm1026_crval:
408 .type arm1026_processor_functions, #object 408 .type arm1026_processor_functions, #object
409arm1026_processor_functions: 409arm1026_processor_functions:
410 .word v5t_early_abort 410 .word v5t_early_abort
411 .word pabort_noifar 411 .word legacy_pabort
412 .word cpu_arm1026_proc_init 412 .word cpu_arm1026_proc_init
413 .word cpu_arm1026_proc_fin 413 .word cpu_arm1026_proc_fin
414 .word cpu_arm1026_reset 414 .word cpu_arm1026_reset
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 80d6e1de069a..3f9cd3d8f6d5 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -278,7 +278,7 @@ __arm7_setup: mov r0, #0
278 .type arm6_processor_functions, #object 278 .type arm6_processor_functions, #object
279ENTRY(arm6_processor_functions) 279ENTRY(arm6_processor_functions)
280 .word cpu_arm6_data_abort 280 .word cpu_arm6_data_abort
281 .word pabort_noifar 281 .word legacy_pabort
282 .word cpu_arm6_proc_init 282 .word cpu_arm6_proc_init
283 .word cpu_arm6_proc_fin 283 .word cpu_arm6_proc_fin
284 .word cpu_arm6_reset 284 .word cpu_arm6_reset
@@ -295,7 +295,7 @@ ENTRY(arm6_processor_functions)
295 .type arm7_processor_functions, #object 295 .type arm7_processor_functions, #object
296ENTRY(arm7_processor_functions) 296ENTRY(arm7_processor_functions)
297 .word cpu_arm7_data_abort 297 .word cpu_arm7_data_abort
298 .word pabort_noifar 298 .word legacy_pabort
299 .word cpu_arm7_proc_init 299 .word cpu_arm7_proc_init
300 .word cpu_arm7_proc_fin 300 .word cpu_arm7_proc_fin
301 .word cpu_arm7_reset 301 .word cpu_arm7_reset
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 85ae18695f10..0b62de244666 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -181,7 +181,7 @@ arm720_crval:
181 .type arm720_processor_functions, #object 181 .type arm720_processor_functions, #object
182ENTRY(arm720_processor_functions) 182ENTRY(arm720_processor_functions)
183 .word v4t_late_abort 183 .word v4t_late_abort
184 .word pabort_noifar 184 .word legacy_pabort
185 .word cpu_arm720_proc_init 185 .word cpu_arm720_proc_init
186 .word cpu_arm720_proc_fin 186 .word cpu_arm720_proc_fin
187 .word cpu_arm720_reset 187 .word cpu_arm720_reset
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 4f95bee63e95..01860cdeb2ec 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -126,7 +126,7 @@ __arm740_setup:
126 .type arm740_processor_functions, #object 126 .type arm740_processor_functions, #object
127ENTRY(arm740_processor_functions) 127ENTRY(arm740_processor_functions)
128 .word v4t_late_abort 128 .word v4t_late_abort
129 .word pabort_noifar 129 .word legacy_pabort
130 .word cpu_arm740_proc_init 130 .word cpu_arm740_proc_init
131 .word cpu_arm740_proc_fin 131 .word cpu_arm740_proc_fin
132 .word cpu_arm740_reset 132 .word cpu_arm740_reset
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 93e05fa7bed4..1201b9863829 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -64,7 +64,7 @@ __arm7tdmi_setup:
64 .type arm7tdmi_processor_functions, #object 64 .type arm7tdmi_processor_functions, #object
65ENTRY(arm7tdmi_processor_functions) 65ENTRY(arm7tdmi_processor_functions)
66 .word v4t_late_abort 66 .word v4t_late_abort
67 .word pabort_noifar 67 .word legacy_pabort
68 .word cpu_arm7tdmi_proc_init 68 .word cpu_arm7tdmi_proc_init
69 .word cpu_arm7tdmi_proc_fin 69 .word cpu_arm7tdmi_proc_fin
70 .word cpu_arm7tdmi_reset 70 .word cpu_arm7tdmi_reset
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 914d688394fc..2b7c197cc58d 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -395,7 +395,7 @@ arm920_crval:
395 .type arm920_processor_functions, #object 395 .type arm920_processor_functions, #object
396arm920_processor_functions: 396arm920_processor_functions:
397 .word v4t_early_abort 397 .word v4t_early_abort
398 .word pabort_noifar 398 .word legacy_pabort
399 .word cpu_arm920_proc_init 399 .word cpu_arm920_proc_init
400 .word cpu_arm920_proc_fin 400 .word cpu_arm920_proc_fin
401 .word cpu_arm920_reset 401 .word cpu_arm920_reset
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 51c9c9859e58..06a1aa4e3398 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -399,7 +399,7 @@ arm922_crval:
399 .type arm922_processor_functions, #object 399 .type arm922_processor_functions, #object
400arm922_processor_functions: 400arm922_processor_functions:
401 .word v4t_early_abort 401 .word v4t_early_abort
402 .word pabort_noifar 402 .word legacy_pabort
403 .word cpu_arm922_proc_init 403 .word cpu_arm922_proc_init
404 .word cpu_arm922_proc_fin 404 .word cpu_arm922_proc_fin
405 .word cpu_arm922_reset 405 .word cpu_arm922_reset
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 2724526d89c1..cb53435a85ae 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -462,7 +462,7 @@ arm925_crval:
462 .type arm925_processor_functions, #object 462 .type arm925_processor_functions, #object
463arm925_processor_functions: 463arm925_processor_functions:
464 .word v4t_early_abort 464 .word v4t_early_abort
465 .word pabort_noifar 465 .word legacy_pabort
466 .word cpu_arm925_proc_init 466 .word cpu_arm925_proc_init
467 .word cpu_arm925_proc_fin 467 .word cpu_arm925_proc_fin
468 .word cpu_arm925_reset 468 .word cpu_arm925_reset
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 54466937bff9..1c4848704bb3 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -415,7 +415,7 @@ arm926_crval:
415 .type arm926_processor_functions, #object 415 .type arm926_processor_functions, #object
416arm926_processor_functions: 416arm926_processor_functions:
417 .word v5tj_early_abort 417 .word v5tj_early_abort
418 .word pabort_noifar 418 .word legacy_pabort
419 .word cpu_arm926_proc_init 419 .word cpu_arm926_proc_init
420 .word cpu_arm926_proc_fin 420 .word cpu_arm926_proc_fin
421 .word cpu_arm926_reset 421 .word cpu_arm926_reset
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index f595117caf55..5b0f8464c8f2 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -322,7 +322,7 @@ __arm940_setup:
322 .type arm940_processor_functions, #object 322 .type arm940_processor_functions, #object
323ENTRY(arm940_processor_functions) 323ENTRY(arm940_processor_functions)
324 .word nommu_early_abort 324 .word nommu_early_abort
325 .word pabort_noifar 325 .word legacy_pabort
326 .word cpu_arm940_proc_init 326 .word cpu_arm940_proc_init
327 .word cpu_arm940_proc_fin 327 .word cpu_arm940_proc_fin
328 .word cpu_arm940_reset 328 .word cpu_arm940_reset
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index e03f6ff1fb26..40c0449a139b 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -377,7 +377,7 @@ __arm946_setup:
377 .type arm946_processor_functions, #object 377 .type arm946_processor_functions, #object
378ENTRY(arm946_processor_functions) 378ENTRY(arm946_processor_functions)
379 .word nommu_early_abort 379 .word nommu_early_abort
380 .word pabort_noifar 380 .word legacy_pabort
381 .word cpu_arm946_proc_init 381 .word cpu_arm946_proc_init
382 .word cpu_arm946_proc_fin 382 .word cpu_arm946_proc_fin
383 .word cpu_arm946_reset 383 .word cpu_arm946_reset
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index be6c11d2b3fb..28545c29dbcd 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -64,7 +64,7 @@ __arm9tdmi_setup:
64 .type arm9tdmi_processor_functions, #object 64 .type arm9tdmi_processor_functions, #object
65ENTRY(arm9tdmi_processor_functions) 65ENTRY(arm9tdmi_processor_functions)
66 .word nommu_early_abort 66 .word nommu_early_abort
67 .word pabort_noifar 67 .word legacy_pabort
68 .word cpu_arm9tdmi_proc_init 68 .word cpu_arm9tdmi_proc_init
69 .word cpu_arm9tdmi_proc_fin 69 .word cpu_arm9tdmi_proc_fin
70 .word cpu_arm9tdmi_reset 70 .word cpu_arm9tdmi_reset
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 08b8a955d5d7..08f5ac237ad4 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -191,7 +191,7 @@ fa526_cr1_set:
191 .type fa526_processor_functions, #object 191 .type fa526_processor_functions, #object
192fa526_processor_functions: 192fa526_processor_functions:
193 .word v4_early_abort 193 .word v4_early_abort
194 .word pabort_noifar 194 .word legacy_pabort
195 .word cpu_fa526_proc_init 195 .word cpu_fa526_proc_init
196 .word cpu_fa526_proc_fin 196 .word cpu_fa526_proc_fin
197 .word cpu_fa526_reset 197 .word cpu_fa526_reset
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 0fe1f8fc3488..d0d7795200fc 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -499,7 +499,7 @@ feroceon_crval:
499 .type feroceon_processor_functions, #object 499 .type feroceon_processor_functions, #object
500feroceon_processor_functions: 500feroceon_processor_functions:
501 .word v5t_early_abort 501 .word v5t_early_abort
502 .word pabort_noifar 502 .word legacy_pabort
503 .word cpu_feroceon_proc_init 503 .word cpu_feroceon_proc_init
504 .word cpu_feroceon_proc_fin 504 .word cpu_feroceon_proc_fin
505 .word cpu_feroceon_reset 505 .word cpu_feroceon_reset
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 540f5078496b..52b5fd74fbb3 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -359,7 +359,7 @@ mohawk_crval:
359 .type mohawk_processor_functions, #object 359 .type mohawk_processor_functions, #object
360mohawk_processor_functions: 360mohawk_processor_functions:
361 .word v5t_early_abort 361 .word v5t_early_abort
362 .word pabort_noifar 362 .word legacy_pabort
363 .word cpu_mohawk_proc_init 363 .word cpu_mohawk_proc_init
364 .word cpu_mohawk_proc_fin 364 .word cpu_mohawk_proc_fin
365 .word cpu_mohawk_reset 365 .word cpu_mohawk_reset
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 90a7e5279f29..7b706b389906 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -199,7 +199,7 @@ sa110_crval:
199 .type sa110_processor_functions, #object 199 .type sa110_processor_functions, #object
200ENTRY(sa110_processor_functions) 200ENTRY(sa110_processor_functions)
201 .word v4_early_abort 201 .word v4_early_abort
202 .word pabort_noifar 202 .word legacy_pabort
203 .word cpu_sa110_proc_init 203 .word cpu_sa110_proc_init
204 .word cpu_sa110_proc_fin 204 .word cpu_sa110_proc_fin
205 .word cpu_sa110_reset 205 .word cpu_sa110_reset
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 451e2d953e2a..ee7700242c19 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -214,7 +214,7 @@ sa1100_crval:
214 .type sa1100_processor_functions, #object 214 .type sa1100_processor_functions, #object
215ENTRY(sa1100_processor_functions) 215ENTRY(sa1100_processor_functions)
216 .word v4_early_abort 216 .word v4_early_abort
217 .word pabort_noifar 217 .word legacy_pabort
218 .word cpu_sa1100_proc_init 218 .word cpu_sa1100_proc_init
219 .word cpu_sa1100_proc_fin 219 .word cpu_sa1100_proc_fin
220 .word cpu_sa1100_reset 220 .word cpu_sa1100_reset
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 524ddae92595..194737d60a22 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -191,7 +191,7 @@ v6_crval:
191 .type v6_processor_functions, #object 191 .type v6_processor_functions, #object
192ENTRY(v6_processor_functions) 192ENTRY(v6_processor_functions)
193 .word v6_early_abort 193 .word v6_early_abort
194 .word pabort_noifar 194 .word v6_pabort
195 .word cpu_v6_proc_init 195 .word cpu_v6_proc_init
196 .word cpu_v6_proc_fin 196 .word cpu_v6_proc_fin
197 .word cpu_v6_reset 197 .word cpu_v6_reset
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index f3fa1c32fe92..23ebcf6eab9f 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -295,7 +295,7 @@ __v7_setup_stack:
295 .type v7_processor_functions, #object 295 .type v7_processor_functions, #object
296ENTRY(v7_processor_functions) 296ENTRY(v7_processor_functions)
297 .word v7_early_abort 297 .word v7_early_abort
298 .word pabort_ifar 298 .word v7_pabort
299 .word cpu_v7_proc_init 299 .word cpu_v7_proc_init
300 .word cpu_v7_proc_fin 300 .word cpu_v7_proc_fin
301 .word cpu_v7_reset 301 .word cpu_v7_reset
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 33515c214b92..2028f3702881 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -428,7 +428,7 @@ xsc3_crval:
428 .type xsc3_processor_functions, #object 428 .type xsc3_processor_functions, #object
429ENTRY(xsc3_processor_functions) 429ENTRY(xsc3_processor_functions)
430 .word v5t_early_abort 430 .word v5t_early_abort
431 .word pabort_noifar 431 .word legacy_pabort
432 .word cpu_xsc3_proc_init 432 .word cpu_xsc3_proc_init
433 .word cpu_xsc3_proc_fin 433 .word cpu_xsc3_proc_fin
434 .word cpu_xsc3_reset 434 .word cpu_xsc3_reset
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 423394260bcb..f056c283682d 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -511,7 +511,7 @@ xscale_crval:
511 .type xscale_processor_functions, #object 511 .type xscale_processor_functions, #object
512ENTRY(xscale_processor_functions) 512ENTRY(xscale_processor_functions)
513 .word v5t_early_abort 513 .word v5t_early_abort
514 .word pabort_noifar 514 .word legacy_pabort
515 .word cpu_xscale_proc_init 515 .word cpu_xscale_proc_init
516 .word cpu_xscale_proc_fin 516 .word cpu_xscale_proc_fin
517 .word cpu_xscale_reset 517 .word cpu_xscale_reset
diff --git a/arch/arm/plat-iop/pci.c b/arch/arm/plat-iop/pci.c
index 77fa7cc7d162..ce31f316ac75 100644
--- a/arch/arm/plat-iop/pci.c
+++ b/arch/arm/plat-iop/pci.c
@@ -257,7 +257,8 @@ void __init iop3xx_atu_setup(void)
257 *IOP3XX_OUMWTVR0 = 0; 257 *IOP3XX_OUMWTVR0 = 0;
258 258
259 /* Outbound window 1 */ 259 /* Outbound window 1 */
260 *IOP3XX_OMWTVR1 = IOP3XX_PCI_LOWER_MEM_BA + IOP3XX_PCI_MEM_WINDOW_SIZE; 260 *IOP3XX_OMWTVR1 = IOP3XX_PCI_LOWER_MEM_BA +
261 IOP3XX_PCI_MEM_WINDOW_SIZE / 2;
261 *IOP3XX_OUMWTVR1 = 0; 262 *IOP3XX_OUMWTVR1 = 0;
262 263
263 /* BAR 3 ( Disabled ) */ 264 /* BAR 3 ( Disabled ) */
diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c
index 3695bbe3ee28..8da95d57c21f 100644
--- a/arch/arm/plat-iop/time.c
+++ b/arch/arm/plat-iop/time.c
@@ -85,7 +85,7 @@ void __init iop_init_time(unsigned long tick_rate)
85{ 85{
86 u32 timer_ctl; 86 u32 timer_ctl;
87 87
88 ticks_per_jiffy = (tick_rate + HZ/2) / HZ; 88 ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ);
89 ticks_per_usec = tick_rate / 1000000; 89 ticks_per_usec = tick_rate / 1000000;
90 next_jiffy_time = 0xffffffff; 90 next_jiffy_time = 0xffffffff;
91 iop_tick_rate = tick_rate; 91 iop_tick_rate = tick_rate;
diff --git a/arch/arm/plat-omap/include/mach/cpu.h b/arch/arm/plat-omap/include/mach/cpu.h
index 11e73d9e8928..f129efb3075e 100644
--- a/arch/arm/plat-omap/include/mach/cpu.h
+++ b/arch/arm/plat-omap/include/mach/cpu.h
@@ -303,32 +303,21 @@ IS_OMAP_TYPE(3430, 0x3430)
303#define cpu_is_omap2430() 0 303#define cpu_is_omap2430() 0
304#define cpu_is_omap3430() 0 304#define cpu_is_omap3430() 0
305 305
306#if defined(MULTI_OMAP1)
307# if defined(CONFIG_ARCH_OMAP730)
308# undef cpu_is_omap730
309# define cpu_is_omap730() is_omap730()
310# endif
311# if defined(CONFIG_ARCH_OMAP850)
312# undef cpu_is_omap850
313# define cpu_is_omap850() is_omap850()
314# endif
315#else
316# if defined(CONFIG_ARCH_OMAP730)
317# undef cpu_is_omap730
318# define cpu_is_omap730() 1
319# endif
320#endif
321#else
322# if defined(CONFIG_ARCH_OMAP850)
323# undef cpu_is_omap850
324# define cpu_is_omap850() 1
325# endif
326#endif
327
328/* 306/*
329 * Whether we have MULTI_OMAP1 or not, we still need to distinguish 307 * Whether we have MULTI_OMAP1 or not, we still need to distinguish
330 * between 330 vs. 1510 and 1611B/5912 vs. 1710. 308 * between 730 vs 850, 330 vs. 1510 and 1611B/5912 vs. 1710.
331 */ 309 */
310
311#if defined(CONFIG_ARCH_OMAP730)
312# undef cpu_is_omap730
313# define cpu_is_omap730() is_omap730()
314#endif
315
316#if defined(CONFIG_ARCH_OMAP850)
317# undef cpu_is_omap850
318# define cpu_is_omap850() is_omap850()
319#endif
320
332#if defined(CONFIG_ARCH_OMAP15XX) 321#if defined(CONFIG_ARCH_OMAP15XX)
333# undef cpu_is_omap310 322# undef cpu_is_omap310
334# undef cpu_is_omap1510 323# undef cpu_is_omap1510
@@ -433,3 +422,5 @@ IS_OMAP_TYPE(3430, 0x3430)
433 422
434int omap_chip_is(struct omap_chip_id oci); 423int omap_chip_is(struct omap_chip_id oci);
435void omap2_check_revision(void); 424void omap2_check_revision(void);
425
426#endif
diff --git a/arch/arm/plat-omap/include/mach/powerdomain.h b/arch/arm/plat-omap/include/mach/powerdomain.h
index 6271d8556a40..fa6461423bd0 100644
--- a/arch/arm/plat-omap/include/mach/powerdomain.h
+++ b/arch/arm/plat-omap/include/mach/powerdomain.h
@@ -135,6 +135,8 @@ struct powerdomain *pwrdm_lookup(const char *name);
135 135
136int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), 136int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user),
137 void *user); 137 void *user);
138int pwrdm_for_each_nolock(int (*fn)(struct powerdomain *pwrdm, void *user),
139 void *user);
138 140
139int pwrdm_add_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); 141int pwrdm_add_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm);
140int pwrdm_del_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); 142int pwrdm_del_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm);
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index 57f7122a0919..dc3fac3dd0ea 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -47,7 +47,7 @@
47 * 'va': mpu virtual address 47 * 'va': mpu virtual address
48 * 48 *
49 * 'c': contiguous memory area 49 * 'c': contiguous memory area
50 * 'd': dicontiguous memory area 50 * 'd': discontiguous memory area
51 * 'a': anonymous memory allocation 51 * 'a': anonymous memory allocation
52 * '()': optional feature 52 * '()': optional feature
53 * 53 *
@@ -363,8 +363,9 @@ void *da_to_va(struct iommu *obj, u32 da)
363 goto out; 363 goto out;
364 } 364 }
365 va = area->va; 365 va = area->va;
366 mutex_unlock(&obj->mmap_lock);
367out: 366out:
367 mutex_unlock(&obj->mmap_lock);
368
368 return va; 369 return va;
369} 370}
370EXPORT_SYMBOL_GPL(da_to_va); 371EXPORT_SYMBOL_GPL(da_to_va);
@@ -398,7 +399,7 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
398{ 399{
399 /* 400 /*
400 * Actually this is not necessary at all, just exists for 401 * Actually this is not necessary at all, just exists for
401 * consistency of the code readibility. 402 * consistency of the code readability.
402 */ 403 */
403 BUG_ON(!sgt); 404 BUG_ON(!sgt);
404} 405}
@@ -434,7 +435,7 @@ static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
434{ 435{
435 /* 436 /*
436 * Actually this is not necessary at all, just exists for 437 * Actually this is not necessary at all, just exists for
437 * consistency of the code readibility 438 * consistency of the code readability
438 */ 439 */
439 BUG_ON(!sgt); 440 BUG_ON(!sgt);
440} 441}
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 925f64711c37..75d1f26e5b17 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -270,7 +270,8 @@ void * omap_sram_push(void * start, unsigned long size)
270 omap_sram_ceil -= size; 270 omap_sram_ceil -= size;
271 omap_sram_ceil = ROUND_DOWN(omap_sram_ceil, sizeof(void *)); 271 omap_sram_ceil = ROUND_DOWN(omap_sram_ceil, sizeof(void *));
272 memcpy((void *)omap_sram_ceil, start, size); 272 memcpy((void *)omap_sram_ceil, start, size);
273 flush_icache_range((unsigned long)start, (unsigned long)(start + size)); 273 flush_icache_range((unsigned long)omap_sram_ceil,
274 (unsigned long)(omap_sram_ceil + size));
274 275
275 return (void *)omap_sram_ceil; 276 return (void *)omap_sram_ceil;
276} 277}
diff --git a/arch/arm/plat-s3c24xx/include/plat/mci.h b/arch/arm/plat-s3c24xx/include/plat/mci.h
index 2d0852ac3b27..c2cef6139683 100644
--- a/arch/arm/plat-s3c24xx/include/plat/mci.h
+++ b/arch/arm/plat-s3c24xx/include/plat/mci.h
@@ -2,8 +2,11 @@
2#define _ARCH_MCI_H 2#define _ARCH_MCI_H
3 3
4struct s3c24xx_mci_pdata { 4struct s3c24xx_mci_pdata {
5 unsigned int no_wprotect : 1;
6 unsigned int no_detect : 1;
5 unsigned int wprotect_invert : 1; 7 unsigned int wprotect_invert : 1;
6 unsigned int detect_invert : 1; /* set => detect active high. */ 8 unsigned int detect_invert : 1; /* set => detect active high. */
9 unsigned int use_dma : 1;
7 10
8 unsigned int gpio_detect; 11 unsigned int gpio_detect;
9 unsigned int gpio_wprotect; 12 unsigned int gpio_wprotect;
diff --git a/arch/blackfin/mach-bf561/coreb.c b/arch/blackfin/mach-bf561/coreb.c
index 93635a766f9c..1e60a92dd602 100644
--- a/arch/blackfin/mach-bf561/coreb.c
+++ b/arch/blackfin/mach-bf561/coreb.c
@@ -48,7 +48,7 @@ coreb_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned l
48 return ret; 48 return ret;
49} 49}
50 50
51static struct file_operations coreb_fops = { 51static const struct file_operations coreb_fops = {
52 .owner = THIS_MODULE, 52 .owner = THIS_MODULE,
53 .ioctl = coreb_ioctl, 53 .ioctl = coreb_ioctl,
54}; 54};
diff --git a/arch/cris/arch-v10/drivers/sync_serial.c b/arch/cris/arch-v10/drivers/sync_serial.c
index 6cc1a0319a5d..562b9a7feae7 100644
--- a/arch/cris/arch-v10/drivers/sync_serial.c
+++ b/arch/cris/arch-v10/drivers/sync_serial.c
@@ -244,7 +244,7 @@ static unsigned sync_serial_prescale_shadow;
244 244
245#define NUMBER_OF_PORTS 2 245#define NUMBER_OF_PORTS 2
246 246
247static struct file_operations sync_serial_fops = { 247static const struct file_operations sync_serial_fops = {
248 .owner = THIS_MODULE, 248 .owner = THIS_MODULE,
249 .write = sync_serial_write, 249 .write = sync_serial_write,
250 .read = sync_serial_read, 250 .read = sync_serial_read,
diff --git a/arch/cris/arch-v32/drivers/mach-fs/gpio.c b/arch/cris/arch-v32/drivers/mach-fs/gpio.c
index fe1fde893887..d89ab80498ed 100644
--- a/arch/cris/arch-v32/drivers/mach-fs/gpio.c
+++ b/arch/cris/arch-v32/drivers/mach-fs/gpio.c
@@ -855,7 +855,7 @@ gpio_leds_ioctl(unsigned int cmd, unsigned long arg)
855 return 0; 855 return 0;
856} 856}
857 857
858struct file_operations gpio_fops = { 858static const struct file_operations gpio_fops = {
859 .owner = THIS_MODULE, 859 .owner = THIS_MODULE,
860 .poll = gpio_poll, 860 .poll = gpio_poll,
861 .ioctl = gpio_ioctl, 861 .ioctl = gpio_ioctl,
diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h
index d06933bd6318..4010f1fc5b65 100644
--- a/arch/m32r/include/asm/io.h
+++ b/arch/m32r/include/asm/io.h
@@ -162,6 +162,13 @@ static inline void _writel(unsigned long l, unsigned long addr)
162#define __raw_writew writew 162#define __raw_writew writew
163#define __raw_writel writel 163#define __raw_writel writel
164 164
165#define ioread8 read
166#define ioread16 readw
167#define ioread32 readl
168#define iowrite8 writeb
169#define iowrite16 writew
170#define iowrite32 writel
171
165#define mmiowb() 172#define mmiowb()
166 173
167#define flush_write_buffers() do { } while (0) /* M32R_FIXME */ 174#define flush_write_buffers() do { } while (0) /* M32R_FIXME */
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
index 22624b51d4d3..700570747a90 100644
--- a/arch/m32r/kernel/m32r_ksyms.c
+++ b/arch/m32r/kernel/m32r_ksyms.c
@@ -23,12 +23,6 @@ EXPORT_SYMBOL(__ioremap);
23EXPORT_SYMBOL(iounmap); 23EXPORT_SYMBOL(iounmap);
24EXPORT_SYMBOL(kernel_thread); 24EXPORT_SYMBOL(kernel_thread);
25 25
26/* Networking helper routines. */
27/* Delay loops */
28EXPORT_SYMBOL(__udelay);
29EXPORT_SYMBOL(__delay);
30EXPORT_SYMBOL(__const_udelay);
31
32EXPORT_SYMBOL(strncpy_from_user); 26EXPORT_SYMBOL(strncpy_from_user);
33EXPORT_SYMBOL(__strncpy_from_user); 27EXPORT_SYMBOL(__strncpy_from_user);
34EXPORT_SYMBOL(clear_user); 28EXPORT_SYMBOL(clear_user);
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index ba61c4c73202..e7fee0f198d5 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -33,6 +33,15 @@
33 33
34#include <asm/hw_irq.h> 34#include <asm/hw_irq.h>
35 35
36#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
37/* this needs a better home */
38DEFINE_SPINLOCK(rtc_lock);
39
40#ifdef CONFIG_RTC_DRV_CMOS_MODULE
41EXPORT_SYMBOL(rtc_lock);
42#endif
43#endif /* pc-style 'CMOS' RTC support */
44
36#ifdef CONFIG_SMP 45#ifdef CONFIG_SMP
37extern void smp_local_timer_interrupt(void); 46extern void smp_local_timer_interrupt(void);
38#endif 47#endif
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index 03b14e55cd89..fbd109031df3 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -104,8 +104,8 @@ static void set_eit_vector_entries(void)
104 eit_vector[186] = (unsigned long)smp_call_function_interrupt; 104 eit_vector[186] = (unsigned long)smp_call_function_interrupt;
105 eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; 105 eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt;
106 eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; 106 eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt;
107 eit_vector[189] = (unsigned long)smp_call_function_single_interrupt; 107 eit_vector[189] = 0; /* CPU_BOOT_IPI */
108 eit_vector[190] = 0; 108 eit_vector[190] = (unsigned long)smp_call_function_single_interrupt;
109 eit_vector[191] = 0; 109 eit_vector[191] = 0;
110#endif 110#endif
111 _flush_cache_copyback_all(); 111 _flush_cache_copyback_all();
diff --git a/arch/m32r/lib/delay.c b/arch/m32r/lib/delay.c
index ced549be80f5..940f4837e42b 100644
--- a/arch/m32r/lib/delay.c
+++ b/arch/m32r/lib/delay.c
@@ -122,4 +122,8 @@ void __ndelay(unsigned long nsecs)
122{ 122{
123 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ 123 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
124} 124}
125
126EXPORT_SYMBOL(__delay);
127EXPORT_SYMBOL(__const_udelay);
128EXPORT_SYMBOL(__udelay);
125EXPORT_SYMBOL(__ndelay); 129EXPORT_SYMBOL(__ndelay);
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c
index b7a78ad429b7..5d2858f6eede 100644
--- a/arch/m32r/mm/discontig.c
+++ b/arch/m32r/mm/discontig.c
@@ -32,6 +32,9 @@ typedef struct {
32} mem_prof_t; 32} mem_prof_t;
33static mem_prof_t mem_prof[MAX_NUMNODES]; 33static mem_prof_t mem_prof[MAX_NUMNODES];
34 34
35extern unsigned long memory_start;
36extern unsigned long memory_end;
37
35static void __init mem_prof_init(void) 38static void __init mem_prof_init(void)
36{ 39{
37 unsigned long start_pfn, holes, free_pfn; 40 unsigned long start_pfn, holes, free_pfn;
@@ -42,7 +45,7 @@ static void __init mem_prof_init(void)
42 /* Node#0 SDRAM */ 45 /* Node#0 SDRAM */
43 mp = &mem_prof[0]; 46 mp = &mem_prof[0];
44 mp->start_pfn = PFN_UP(CONFIG_MEMORY_START); 47 mp->start_pfn = PFN_UP(CONFIG_MEMORY_START);
45 mp->pages = PFN_DOWN(CONFIG_MEMORY_SIZE); 48 mp->pages = PFN_DOWN(memory_end - memory_start);
46 mp->holes = 0; 49 mp->holes = 0;
47 mp->free_pfn = PFN_UP(__pa(_end)); 50 mp->free_pfn = PFN_UP(__pa(_end));
48 51
diff --git a/arch/m32r/mm/mmu.S b/arch/m32r/mm/mmu.S
index 49a6d16a3d58..e9491a5ae827 100644
--- a/arch/m32r/mm/mmu.S
+++ b/arch/m32r/mm/mmu.S
@@ -150,9 +150,13 @@ ENTRY(tme_handler)
150 150
151 ; pmd = pmd_offset(pgd, address); 151 ; pmd = pmd_offset(pgd, address);
152 ld r3, @r3 ; r3: pmd data 152 ld r3, @r3 ; r3: pmd data
153 ldi r2, #-4096
154 beqz r3, 3f ; pmd_none(*pmd) ? 153 beqz r3, 3f ; pmd_none(*pmd) ?
155 154
155 and3 r2, r3, #0xfff
156 add3 r2, r2, #-355 ; _KERNPG_TABLE(=0x163)
157 bnez r2, 3f ; pmd_bad(*pmd) ?
158 ldi r2, #-4096
159
156 ; pte = pte_offset(pmd, address); 160 ; pte = pte_offset(pmd, address);
157 and r2, r3 ; r2: pte base addr 161 and r2, r3 ; r2: pte base addr
158 srl3 r3, r0, #10 162 srl3 r3, r0, #10
@@ -263,9 +267,9 @@ ENTRY(tme_handler)
263 ld r1, @r3 ; r1: pmd 267 ld r1, @r3 ; r1: pmd
264 beqz r1, 3f ; pmd_none(*pmd) ? 268 beqz r1, 3f ; pmd_none(*pmd) ?
265; 269;
266 and3 r1, r1, #0xeff 270 and3 r1, r1, #0x3ff
267 ldi r4, #611 ; _KERNPG_TABLE(=611) 271 ldi r4, #0x163 ; _KERNPG_TABLE(=0x163)
268 bne r1, r4, 3f ; !pmd_bad(*pmd) ? 272 bne r1, r4, 3f ; pmd_bad(*pmd) ?
269 273
270 .fillinsn 274 .fillinsn
2714: 2754:
diff --git a/arch/m68k/include/asm/hardirq_mm.h b/arch/m68k/include/asm/hardirq_mm.h
index 554f65b6cd3b..394ee946015c 100644
--- a/arch/m68k/include/asm/hardirq_mm.h
+++ b/arch/m68k/include/asm/hardirq_mm.h
@@ -1,8 +1,16 @@
1#ifndef __M68K_HARDIRQ_H 1#ifndef __M68K_HARDIRQ_H
2#define __M68K_HARDIRQ_H 2#define __M68K_HARDIRQ_H
3 3
4#define HARDIRQ_BITS 8 4#include <linux/threads.h>
5#include <linux/cache.h>
6
7/* entry.S is sensitive to the offsets of these fields */
8typedef struct {
9 unsigned int __softirq_pending;
10} ____cacheline_aligned irq_cpustat_t;
5 11
6#include <asm-generic/hardirq.h> 12#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
13
14#define HARDIRQ_BITS 8
7 15
8#endif 16#endif
diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c
index 594ee0e657fe..9a8876f715d8 100644
--- a/arch/m68knommu/kernel/asm-offsets.c
+++ b/arch/m68knommu/kernel/asm-offsets.c
@@ -45,25 +45,25 @@ int main(void)
45 DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate)); 45 DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
46 46
47 /* offsets into the pt_regs */ 47 /* offsets into the pt_regs */
48 DEFINE(PT_D0, offsetof(struct pt_regs, d0)); 48 DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
49 DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0)); 49 DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
50 DEFINE(PT_D1, offsetof(struct pt_regs, d1)); 50 DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
51 DEFINE(PT_D2, offsetof(struct pt_regs, d2)); 51 DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
52 DEFINE(PT_D3, offsetof(struct pt_regs, d3)); 52 DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
53 DEFINE(PT_D4, offsetof(struct pt_regs, d4)); 53 DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
54 DEFINE(PT_D5, offsetof(struct pt_regs, d5)); 54 DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
55 DEFINE(PT_A0, offsetof(struct pt_regs, a0)); 55 DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
56 DEFINE(PT_A1, offsetof(struct pt_regs, a1)); 56 DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
57 DEFINE(PT_A2, offsetof(struct pt_regs, a2)); 57 DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
58 DEFINE(PT_PC, offsetof(struct pt_regs, pc)); 58 DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
59 DEFINE(PT_SR, offsetof(struct pt_regs, sr)); 59 DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
60 60
61#ifdef CONFIG_COLDFIRE 61#ifdef CONFIG_COLDFIRE
62 /* bitfields are a bit difficult */ 62 /* bitfields are a bit difficult */
63 DEFINE(PT_FORMATVEC, offsetof(struct pt_regs, sr) - 2); 63 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
64#else 64#else
65 /* bitfields are a bit difficult */ 65 /* bitfields are a bit difficult */
66 DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4); 66 DEFINE(PT_OFF_VECTOR, offsetof(struct pt_regs, pc) + 4);
67#endif 67#endif
68 68
69 /* signal defines */ 69 /* signal defines */
diff --git a/arch/m68knommu/kernel/entry.S b/arch/m68knommu/kernel/entry.S
index f56faa5c9cd9..56043ade3941 100644
--- a/arch/m68knommu/kernel/entry.S
+++ b/arch/m68knommu/kernel/entry.S
@@ -46,7 +46,7 @@
46ENTRY(buserr) 46ENTRY(buserr)
47 SAVE_ALL 47 SAVE_ALL
48 moveq #-1,%d0 48 moveq #-1,%d0
49 movel %d0,%sp@(PT_ORIG_D0) 49 movel %d0,%sp@(PT_OFF_ORIG_D0)
50 movel %sp,%sp@- /* stack frame pointer argument */ 50 movel %sp,%sp@- /* stack frame pointer argument */
51 jsr buserr_c 51 jsr buserr_c
52 addql #4,%sp 52 addql #4,%sp
@@ -55,7 +55,7 @@ ENTRY(buserr)
55ENTRY(trap) 55ENTRY(trap)
56 SAVE_ALL 56 SAVE_ALL
57 moveq #-1,%d0 57 moveq #-1,%d0
58 movel %d0,%sp@(PT_ORIG_D0) 58 movel %d0,%sp@(PT_OFF_ORIG_D0)
59 movel %sp,%sp@- /* stack frame pointer argument */ 59 movel %sp,%sp@- /* stack frame pointer argument */
60 jsr trap_c 60 jsr trap_c
61 addql #4,%sp 61 addql #4,%sp
@@ -67,7 +67,7 @@ ENTRY(trap)
67ENTRY(dbginterrupt) 67ENTRY(dbginterrupt)
68 SAVE_ALL 68 SAVE_ALL
69 moveq #-1,%d0 69 moveq #-1,%d0
70 movel %d0,%sp@(PT_ORIG_D0) 70 movel %d0,%sp@(PT_OFF_ORIG_D0)
71 movel %sp,%sp@- /* stack frame pointer argument */ 71 movel %sp,%sp@- /* stack frame pointer argument */
72 jsr dbginterrupt_c 72 jsr dbginterrupt_c
73 addql #4,%sp 73 addql #4,%sp
diff --git a/arch/m68knommu/mm/init.c b/arch/m68knommu/mm/init.c
index b1703c67a4f1..f3236d0b522d 100644
--- a/arch/m68knommu/mm/init.c
+++ b/arch/m68knommu/mm/init.c
@@ -162,7 +162,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
162 totalram_pages++; 162 totalram_pages++;
163 pages++; 163 pages++;
164 } 164 }
165 printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages); 165 printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages * (PAGE_SIZE / 1024));
166} 166}
167#endif 167#endif
168 168
diff --git a/arch/m68knommu/platform/5206e/config.c b/arch/m68knommu/platform/5206e/config.c
index 0f41ba82a3b5..942397984c66 100644
--- a/arch/m68knommu/platform/5206e/config.c
+++ b/arch/m68knommu/platform/5206e/config.c
@@ -17,7 +17,6 @@
17#include <asm/mcfsim.h> 17#include <asm/mcfsim.h>
18#include <asm/mcfuart.h> 18#include <asm/mcfuart.h>
19#include <asm/mcfdma.h> 19#include <asm/mcfdma.h>
20#include <asm/mcfuart.h>
21 20
22/***************************************************************************/ 21/***************************************************************************/
23 22
diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S
index b1aef72f3baf..9d80d2c42866 100644
--- a/arch/m68knommu/platform/68328/entry.S
+++ b/arch/m68knommu/platform/68328/entry.S
@@ -39,17 +39,17 @@
39.globl inthandler7 39.globl inthandler7
40 40
41badsys: 41badsys:
42 movel #-ENOSYS,%sp@(PT_D0) 42 movel #-ENOSYS,%sp@(PT_OFF_D0)
43 jra ret_from_exception 43 jra ret_from_exception
44 44
45do_trace: 45do_trace:
46 movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ 46 movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
47 subql #4,%sp 47 subql #4,%sp
48 SAVE_SWITCH_STACK 48 SAVE_SWITCH_STACK
49 jbsr syscall_trace 49 jbsr syscall_trace
50 RESTORE_SWITCH_STACK 50 RESTORE_SWITCH_STACK
51 addql #4,%sp 51 addql #4,%sp
52 movel %sp@(PT_ORIG_D0),%d1 52 movel %sp@(PT_OFF_ORIG_D0),%d1
53 movel #-ENOSYS,%d0 53 movel #-ENOSYS,%d0
54 cmpl #NR_syscalls,%d1 54 cmpl #NR_syscalls,%d1
55 jcc 1f 55 jcc 1f
@@ -57,7 +57,7 @@ do_trace:
57 lea sys_call_table, %a0 57 lea sys_call_table, %a0
58 jbsr %a0@(%d1) 58 jbsr %a0@(%d1)
59 59
601: movel %d0,%sp@(PT_D0) /* save the return value */ 601: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
61 subql #4,%sp /* dummy return address */ 61 subql #4,%sp /* dummy return address */
62 SAVE_SWITCH_STACK 62 SAVE_SWITCH_STACK
63 jbsr syscall_trace 63 jbsr syscall_trace
@@ -75,7 +75,7 @@ ENTRY(system_call)
75 jbsr set_esp0 75 jbsr set_esp0
76 addql #4,%sp 76 addql #4,%sp
77 77
78 movel %sp@(PT_ORIG_D0),%d0 78 movel %sp@(PT_OFF_ORIG_D0),%d0
79 79
80 movel %sp,%d1 /* get thread_info pointer */ 80 movel %sp,%d1 /* get thread_info pointer */
81 andl #-THREAD_SIZE,%d1 81 andl #-THREAD_SIZE,%d1
@@ -88,10 +88,10 @@ ENTRY(system_call)
88 lea sys_call_table,%a0 88 lea sys_call_table,%a0
89 movel %a0@(%d0), %a0 89 movel %a0@(%d0), %a0
90 jbsr %a0@ 90 jbsr %a0@
91 movel %d0,%sp@(PT_D0) /* save the return value*/ 91 movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
92 92
93ret_from_exception: 93ret_from_exception:
94 btst #5,%sp@(PT_SR) /* check if returning to kernel*/ 94 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
95 jeq Luser_return /* if so, skip resched, signals*/ 95 jeq Luser_return /* if so, skip resched, signals*/
96 96
97Lkernel_return: 97Lkernel_return:
@@ -133,7 +133,7 @@ Lreturn:
133 */ 133 */
134inthandler1: 134inthandler1:
135 SAVE_ALL 135 SAVE_ALL
136 movew %sp@(PT_VECTOR), %d0 136 movew %sp@(PT_OFF_VECTOR), %d0
137 and #0x3ff, %d0 137 and #0x3ff, %d0
138 138
139 movel %sp,%sp@- 139 movel %sp,%sp@-
@@ -144,7 +144,7 @@ inthandler1:
144 144
145inthandler2: 145inthandler2:
146 SAVE_ALL 146 SAVE_ALL
147 movew %sp@(PT_VECTOR), %d0 147 movew %sp@(PT_OFF_VECTOR), %d0
148 and #0x3ff, %d0 148 and #0x3ff, %d0
149 149
150 movel %sp,%sp@- 150 movel %sp,%sp@-
@@ -155,7 +155,7 @@ inthandler2:
155 155
156inthandler3: 156inthandler3:
157 SAVE_ALL 157 SAVE_ALL
158 movew %sp@(PT_VECTOR), %d0 158 movew %sp@(PT_OFF_VECTOR), %d0
159 and #0x3ff, %d0 159 and #0x3ff, %d0
160 160
161 movel %sp,%sp@- 161 movel %sp,%sp@-
@@ -166,7 +166,7 @@ inthandler3:
166 166
167inthandler4: 167inthandler4:
168 SAVE_ALL 168 SAVE_ALL
169 movew %sp@(PT_VECTOR), %d0 169 movew %sp@(PT_OFF_VECTOR), %d0
170 and #0x3ff, %d0 170 and #0x3ff, %d0
171 171
172 movel %sp,%sp@- 172 movel %sp,%sp@-
@@ -177,7 +177,7 @@ inthandler4:
177 177
178inthandler5: 178inthandler5:
179 SAVE_ALL 179 SAVE_ALL
180 movew %sp@(PT_VECTOR), %d0 180 movew %sp@(PT_OFF_VECTOR), %d0
181 and #0x3ff, %d0 181 and #0x3ff, %d0
182 182
183 movel %sp,%sp@- 183 movel %sp,%sp@-
@@ -188,7 +188,7 @@ inthandler5:
188 188
189inthandler6: 189inthandler6:
190 SAVE_ALL 190 SAVE_ALL
191 movew %sp@(PT_VECTOR), %d0 191 movew %sp@(PT_OFF_VECTOR), %d0
192 and #0x3ff, %d0 192 and #0x3ff, %d0
193 193
194 movel %sp,%sp@- 194 movel %sp,%sp@-
@@ -199,7 +199,7 @@ inthandler6:
199 199
200inthandler7: 200inthandler7:
201 SAVE_ALL 201 SAVE_ALL
202 movew %sp@(PT_VECTOR), %d0 202 movew %sp@(PT_OFF_VECTOR), %d0
203 and #0x3ff, %d0 203 and #0x3ff, %d0
204 204
205 movel %sp,%sp@- 205 movel %sp,%sp@-
@@ -210,7 +210,7 @@ inthandler7:
210 210
211inthandler: 211inthandler:
212 SAVE_ALL 212 SAVE_ALL
213 movew %sp@(PT_VECTOR), %d0 213 movew %sp@(PT_OFF_VECTOR), %d0
214 and #0x3ff, %d0 214 and #0x3ff, %d0
215 215
216 movel %sp,%sp@- 216 movel %sp,%sp@-
@@ -224,7 +224,7 @@ ret_from_interrupt:
2242: 2242:
225 RESTORE_ALL 225 RESTORE_ALL
2261: 2261:
227 moveb %sp@(PT_SR), %d0 227 moveb %sp@(PT_OFF_SR), %d0
228 and #7, %d0 228 and #7, %d0
229 jhi 2b 229 jhi 2b
230 230
diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S
index 55dfefe38642..6d3460a39cac 100644
--- a/arch/m68knommu/platform/68360/entry.S
+++ b/arch/m68knommu/platform/68360/entry.S
@@ -35,17 +35,17 @@
35.globl inthandler 35.globl inthandler
36 36
37badsys: 37badsys:
38 movel #-ENOSYS,%sp@(PT_D0) 38 movel #-ENOSYS,%sp@(PT_OFF_D0)
39 jra ret_from_exception 39 jra ret_from_exception
40 40
41do_trace: 41do_trace:
42 movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ 42 movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
43 subql #4,%sp 43 subql #4,%sp
44 SAVE_SWITCH_STACK 44 SAVE_SWITCH_STACK
45 jbsr syscall_trace 45 jbsr syscall_trace
46 RESTORE_SWITCH_STACK 46 RESTORE_SWITCH_STACK
47 addql #4,%sp 47 addql #4,%sp
48 movel %sp@(PT_ORIG_D0),%d1 48 movel %sp@(PT_OFF_ORIG_D0),%d1
49 movel #-ENOSYS,%d0 49 movel #-ENOSYS,%d0
50 cmpl #NR_syscalls,%d1 50 cmpl #NR_syscalls,%d1
51 jcc 1f 51 jcc 1f
@@ -53,7 +53,7 @@ do_trace:
53 lea sys_call_table, %a0 53 lea sys_call_table, %a0
54 jbsr %a0@(%d1) 54 jbsr %a0@(%d1)
55 55
561: movel %d0,%sp@(PT_D0) /* save the return value */ 561: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
57 subql #4,%sp /* dummy return address */ 57 subql #4,%sp /* dummy return address */
58 SAVE_SWITCH_STACK 58 SAVE_SWITCH_STACK
59 jbsr syscall_trace 59 jbsr syscall_trace
@@ -79,10 +79,10 @@ ENTRY(system_call)
79 lea sys_call_table,%a0 79 lea sys_call_table,%a0
80 movel %a0@(%d0), %a0 80 movel %a0@(%d0), %a0
81 jbsr %a0@ 81 jbsr %a0@
82 movel %d0,%sp@(PT_D0) /* save the return value*/ 82 movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
83 83
84ret_from_exception: 84ret_from_exception:
85 btst #5,%sp@(PT_SR) /* check if returning to kernel*/ 85 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
86 jeq Luser_return /* if so, skip resched, signals*/ 86 jeq Luser_return /* if so, skip resched, signals*/
87 87
88Lkernel_return: 88Lkernel_return:
@@ -124,7 +124,7 @@ Lreturn:
124 */ 124 */
125inthandler: 125inthandler:
126 SAVE_ALL 126 SAVE_ALL
127 movew %sp@(PT_VECTOR), %d0 127 movew %sp@(PT_OFF_VECTOR), %d0
128 and.l #0x3ff, %d0 128 and.l #0x3ff, %d0
129 lsr.l #0x02, %d0 129 lsr.l #0x02, %d0
130 130
@@ -139,7 +139,7 @@ ret_from_interrupt:
1392: 1392:
140 RESTORE_ALL 140 RESTORE_ALL
1411: 1411:
142 moveb %sp@(PT_SR), %d0 142 moveb %sp@(PT_OFF_SR), %d0
143 and #7, %d0 143 and #7, %d0
144 jhi 2b 144 jhi 2b
145 /* check if we need to do software interrupts */ 145 /* check if we need to do software interrupts */
diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S
index 3b471c0da24a..dd7d591f70ea 100644
--- a/arch/m68knommu/platform/coldfire/entry.S
+++ b/arch/m68knommu/platform/coldfire/entry.S
@@ -81,11 +81,11 @@ ENTRY(system_call)
81 81
82 movel %d3,%a0 82 movel %d3,%a0
83 jbsr %a0@ 83 jbsr %a0@
84 movel %d0,%sp@(PT_D0) /* save the return value */ 84 movel %d0,%sp@(PT_OFF_D0) /* save the return value */
85 jra ret_from_exception 85 jra ret_from_exception
861: 861:
87 movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_D0 */ 87 movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_OFF_D0 */
88 movel %d2,PT_D0(%sp) /* on syscall entry */ 88 movel %d2,PT_OFF_D0(%sp) /* on syscall entry */
89 subql #4,%sp 89 subql #4,%sp
90 SAVE_SWITCH_STACK 90 SAVE_SWITCH_STACK
91 jbsr syscall_trace 91 jbsr syscall_trace
@@ -93,7 +93,7 @@ ENTRY(system_call)
93 addql #4,%sp 93 addql #4,%sp
94 movel %d3,%a0 94 movel %d3,%a0
95 jbsr %a0@ 95 jbsr %a0@
96 movel %d0,%sp@(PT_D0) /* save the return value */ 96 movel %d0,%sp@(PT_OFF_D0) /* save the return value */
97 subql #4,%sp /* dummy return address */ 97 subql #4,%sp /* dummy return address */
98 SAVE_SWITCH_STACK 98 SAVE_SWITCH_STACK
99 jbsr syscall_trace 99 jbsr syscall_trace
@@ -104,7 +104,7 @@ ret_from_signal:
104 104
105ret_from_exception: 105ret_from_exception:
106 move #0x2700,%sr /* disable intrs */ 106 move #0x2700,%sr /* disable intrs */
107 btst #5,%sp@(PT_SR) /* check if returning to kernel */ 107 btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */
108 jeq Luser_return /* if so, skip resched, signals */ 108 jeq Luser_return /* if so, skip resched, signals */
109 109
110#ifdef CONFIG_PREEMPT 110#ifdef CONFIG_PREEMPT
@@ -142,8 +142,8 @@ Luser_return:
142Lreturn: 142Lreturn:
143 move #0x2700,%sr /* disable intrs */ 143 move #0x2700,%sr /* disable intrs */
144 movel sw_usp,%a0 /* get usp */ 144 movel sw_usp,%a0 /* get usp */
145 movel %sp@(PT_PC),%a0@- /* copy exception program counter */ 145 movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */
146 movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */ 146 movel %sp@(PT_OFF_FORMATVEC),%a0@- /* copy exception format/vector/sr */
147 moveml %sp@,%d1-%d5/%a0-%a2 147 moveml %sp@,%d1-%d5/%a0-%a2
148 lea %sp@(32),%sp /* space for 8 regs */ 148 lea %sp@(32),%sp /* space for 8 regs */
149 movel %sp@+,%d0 149 movel %sp@+,%d0
@@ -181,9 +181,9 @@ Lsignal_return:
181ENTRY(inthandler) 181ENTRY(inthandler)
182 SAVE_ALL 182 SAVE_ALL
183 moveq #-1,%d0 183 moveq #-1,%d0
184 movel %d0,%sp@(PT_ORIG_D0) 184 movel %d0,%sp@(PT_OFF_ORIG_D0)
185 185
186 movew %sp@(PT_FORMATVEC),%d0 /* put exception # in d0 */ 186 movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */
187 andl #0x03fc,%d0 /* mask out vector only */ 187 andl #0x03fc,%d0 /* mask out vector only */
188 188
189 movel %sp,%sp@- /* push regs arg */ 189 movel %sp,%sp@- /* push regs arg */
@@ -203,7 +203,7 @@ ENTRY(inthandler)
203ENTRY(fasthandler) 203ENTRY(fasthandler)
204 SAVE_LOCAL 204 SAVE_LOCAL
205 205
206 movew %sp@(PT_FORMATVEC),%d0 206 movew %sp@(PT_OFF_FORMATVEC),%d0
207 andl #0x03fc,%d0 /* mask out vector only */ 207 andl #0x03fc,%d0 /* mask out vector only */
208 208
209 movel %sp,%sp@- /* push regs arg */ 209 movel %sp,%sp@- /* push regs arg */
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index acc1f05d1e2c..e3ecb36dd554 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -592,6 +592,8 @@ C_ENTRY(full_exception_trap):
592 nop 592 nop
593 mfs r7, rfsr; /* save FSR */ 593 mfs r7, rfsr; /* save FSR */
594 nop 594 nop
595 mts rfsr, r0; /* Clear sticky fsr */
596 nop
595 la r12, r0, full_exception 597 la r12, r0, full_exception
596 set_vms; 598 set_vms;
597 rtbd r12, 0; 599 rtbd r12, 0;
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 6b0288ebccd6..2b86c03aa841 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -384,7 +384,7 @@ handle_other_ex: /* Handle Other exceptions here */
384 addk r8, r17, r0; /* Load exception address */ 384 addk r8, r17, r0; /* Load exception address */
385 bralid r15, full_exception; /* Branch to the handler */ 385 bralid r15, full_exception; /* Branch to the handler */
386 nop; 386 nop;
387 mts r0, rfsr; /* Clear sticky fsr */ 387 mts rfsr, r0; /* Clear sticky fsr */
388 nop 388 nop
389 389
390 /* 390 /*
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 4201c743cc9f..c592d475b3d8 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -235,7 +235,9 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
235 regs->pc = pc; 235 regs->pc = pc;
236 regs->r1 = usp; 236 regs->r1 = usp;
237 regs->pt_mode = 0; 237 regs->pt_mode = 0;
238#ifdef CONFIG_MMU
238 regs->msr |= MSR_UMS; 239 regs->msr |= MSR_UMS;
240#endif
239} 241}
240 242
241#ifdef CONFIG_MMU 243#ifdef CONFIG_MMU
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 8a3a4dd55763..167e10ff06d9 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -129,42 +129,47 @@ extern int fixup_exception(struct pt_regs *regs);
129struct __large_struct { unsigned long buf[100]; }; 129struct __large_struct { unsigned long buf[100]; };
130#define __m(x) (*(struct __large_struct *)(x)) 130#define __m(x) (*(struct __large_struct *)(x))
131 131
132#define __get_user_nocheck(x, ptr, size) \ 132#define __get_user_nocheck(x, ptr, size) \
133({ \ 133({ \
134 __typeof(*(ptr)) __gu_val; \ 134 unsigned long __gu_addr; \
135 unsigned long __gu_addr; \ 135 int __gu_err; \
136 int __gu_err; \ 136 __gu_addr = (unsigned long) (ptr); \
137 __gu_addr = (unsigned long) (ptr); \ 137 switch (size) { \
138 switch (size) { \ 138 case 1: { \
139 case 1: __get_user_asm("bu"); break; \ 139 unsigned char __gu_val; \
140 case 2: __get_user_asm("hu"); break; \ 140 __get_user_asm("bu"); \
141 case 4: __get_user_asm("" ); break; \ 141 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
142 default: __get_user_unknown(); break; \ 142 break; \
143 } \ 143 } \
144 x = (__typeof__(*(ptr))) __gu_val; \ 144 case 2: { \
145 __gu_err; \ 145 unsigned short __gu_val; \
146 __get_user_asm("hu"); \
147 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
148 break; \
149 } \
150 case 4: { \
151 unsigned int __gu_val; \
152 __get_user_asm(""); \
153 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
154 break; \
155 } \
156 default: \
157 __get_user_unknown(); \
158 break; \
159 } \
160 __gu_err; \
146}) 161})
147 162
148#define __get_user_check(x, ptr, size) \ 163#define __get_user_check(x, ptr, size) \
149({ \ 164({ \
150 __typeof__(*(ptr)) __gu_val; \ 165 int _e; \
151 unsigned long __gu_addr; \ 166 if (likely(__access_ok((unsigned long) (ptr), (size)))) \
152 int __gu_err; \ 167 _e = __get_user_nocheck((x), (ptr), (size)); \
153 __gu_addr = (unsigned long) (ptr); \ 168 else { \
154 if (likely(__access_ok(__gu_addr,size))) { \ 169 _e = -EFAULT; \
155 switch (size) { \ 170 (x) = (__typeof__(x))0; \
156 case 1: __get_user_asm("bu"); break; \ 171 } \
157 case 2: __get_user_asm("hu"); break; \ 172 _e; \
158 case 4: __get_user_asm("" ); break; \
159 default: __get_user_unknown(); break; \
160 } \
161 } \
162 else { \
163 __gu_err = -EFAULT; \
164 __gu_val = 0; \
165 } \
166 x = (__typeof__(*(ptr))) __gu_val; \
167 __gu_err; \
168}) 173})
169 174
170#define __get_user_asm(INSN) \ 175#define __get_user_asm(INSN) \
diff --git a/arch/mn10300/unit-asb2303/include/unit/clock.h b/arch/mn10300/unit-asb2303/include/unit/clock.h
index 8b450e920af1..2a0bf79ab968 100644
--- a/arch/mn10300/unit-asb2303/include/unit/clock.h
+++ b/arch/mn10300/unit-asb2303/include/unit/clock.h
@@ -20,9 +20,9 @@ extern unsigned long mn10300_ioclk; /* IOCLK (crystal speed) in HZ */
20extern unsigned long mn10300_iobclk; 20extern unsigned long mn10300_iobclk;
21extern unsigned long mn10300_tsc_per_HZ; 21extern unsigned long mn10300_tsc_per_HZ;
22 22
23#define MN10300_IOCLK ((unsigned long)mn10300_ioclk) 23#define MN10300_IOCLK mn10300_ioclk
24/* If this processors has a another clock, uncomment the below. */ 24/* If this processors has a another clock, uncomment the below. */
25/* #define MN10300_IOBCLK ((unsigned long)mn10300_iobclk) */ 25/* #define MN10300_IOBCLK mn10300_iobclk */
26 26
27#else /* !CONFIG_MN10300_RTC */ 27#else /* !CONFIG_MN10300_RTC */
28 28
@@ -35,7 +35,7 @@ extern unsigned long mn10300_tsc_per_HZ;
35#define MN10300_TSCCLK MN10300_IOCLK 35#define MN10300_TSCCLK MN10300_IOCLK
36 36
37#ifdef CONFIG_MN10300_RTC 37#ifdef CONFIG_MN10300_RTC
38#define MN10300_TSC_PER_HZ ((unsigned long)mn10300_tsc_per_HZ) 38#define MN10300_TSC_PER_HZ mn10300_tsc_per_HZ
39#else /* !CONFIG_MN10300_RTC */ 39#else /* !CONFIG_MN10300_RTC */
40#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ) 40#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ)
41#endif /* !CONFIG_MN10300_RTC */ 41#endif /* !CONFIG_MN10300_RTC */
diff --git a/arch/mn10300/unit-asb2305/include/unit/clock.h b/arch/mn10300/unit-asb2305/include/unit/clock.h
index 7d514841ffda..67be3f2eb18e 100644
--- a/arch/mn10300/unit-asb2305/include/unit/clock.h
+++ b/arch/mn10300/unit-asb2305/include/unit/clock.h
@@ -20,9 +20,9 @@ extern unsigned long mn10300_ioclk; /* IOCLK (crystal speed) in HZ */
20extern unsigned long mn10300_iobclk; 20extern unsigned long mn10300_iobclk;
21extern unsigned long mn10300_tsc_per_HZ; 21extern unsigned long mn10300_tsc_per_HZ;
22 22
23#define MN10300_IOCLK ((unsigned long)mn10300_ioclk) 23#define MN10300_IOCLK mn10300_ioclk
24/* If this processors has a another clock, uncomment the below. */ 24/* If this processors has a another clock, uncomment the below. */
25/* #define MN10300_IOBCLK ((unsigned long)mn10300_iobclk) */ 25/* #define MN10300_IOBCLK mn10300_iobclk */
26 26
27#else /* !CONFIG_MN10300_RTC */ 27#else /* !CONFIG_MN10300_RTC */
28 28
@@ -35,7 +35,7 @@ extern unsigned long mn10300_tsc_per_HZ;
35#define MN10300_TSCCLK MN10300_IOCLK 35#define MN10300_TSCCLK MN10300_IOCLK
36 36
37#ifdef CONFIG_MN10300_RTC 37#ifdef CONFIG_MN10300_RTC
38#define MN10300_TSC_PER_HZ ((unsigned long)mn10300_tsc_per_HZ) 38#define MN10300_TSC_PER_HZ mn10300_tsc_per_HZ
39#else /* !CONFIG_MN10300_RTC */ 39#else /* !CONFIG_MN10300_RTC */
40#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ) 40#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ)
41#endif /* !CONFIG_MN10300_RTC */ 41#endif /* !CONFIG_MN10300_RTC */
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index f388dc68f605..524d9352f17e 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,6 +18,7 @@ config PARISC
18 select BUG 18 select BUG
19 select HAVE_PERF_EVENTS 19 select HAVE_PERF_EVENTS
20 select GENERIC_ATOMIC64 if !64BIT 20 select GENERIC_ATOMIC64 if !64BIT
21 select HAVE_ARCH_TRACEHOOK
21 help 22 help
22 The PA-RISC microprocessor is designed by Hewlett-Packard and used 23 The PA-RISC microprocessor is designed by Hewlett-Packard and used
23 in many of their workstations & servers (HP9000 700 and 800 series, 24 in many of their workstations & servers (HP9000 700 and 800 series,
diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h
index de3fe3a18229..6fec4d4a1a18 100644
--- a/arch/parisc/include/asm/fixmap.h
+++ b/arch/parisc/include/asm/fixmap.h
@@ -21,9 +21,9 @@
21#define KERNEL_MAP_END (TMPALIAS_MAP_START) 21#define KERNEL_MAP_END (TMPALIAS_MAP_START)
22 22
23#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
24extern void *vmalloc_start; 24extern void *parisc_vmalloc_start;
25#define PCXL_DMA_MAP_SIZE (8*1024*1024) 25#define PCXL_DMA_MAP_SIZE (8*1024*1024)
26#define VMALLOC_START ((unsigned long)vmalloc_start) 26#define VMALLOC_START ((unsigned long)parisc_vmalloc_start)
27#define VMALLOC_END (KERNEL_MAP_END) 27#define VMALLOC_END (KERNEL_MAP_END)
28#endif /*__ASSEMBLY__*/ 28#endif /*__ASSEMBLY__*/
29 29
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h
index ce93133d5112..0d68184a76cb 100644
--- a/arch/parisc/include/asm/hardirq.h
+++ b/arch/parisc/include/asm/hardirq.h
@@ -1,29 +1,11 @@
1/* hardirq.h: PA-RISC hard IRQ support. 1/* hardirq.h: PA-RISC hard IRQ support.
2 * 2 *
3 * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx> 3 * Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
4 *
5 * The locking is really quite interesting. There's a cpu-local
6 * count of how many interrupts are being handled, and a global
7 * lock. An interrupt can only be serviced if the global lock
8 * is free. You can't be sure no more interrupts are being
9 * serviced until you've acquired the lock and then checked
10 * all the per-cpu interrupt counts are all zero. It's a specialised
11 * br_lock, and that's exactly how Sparc does it. We don't because
12 * it's more locking for us. This way is lock-free in the interrupt path.
13 */ 4 */
14 5
15#ifndef _PARISC_HARDIRQ_H 6#ifndef _PARISC_HARDIRQ_H
16#define _PARISC_HARDIRQ_H 7#define _PARISC_HARDIRQ_H
17 8
18#include <linux/threads.h> 9#include <asm-generic/hardirq.h>
19#include <linux/irq.h>
20
21typedef struct {
22 unsigned long __softirq_pending; /* set_bit is used on this */
23} ____cacheline_aligned irq_cpustat_t;
24
25#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
26
27void ack_bad_irq(unsigned int irq);
28 10
29#endif /* _PARISC_HARDIRQ_H */ 11#endif /* _PARISC_HARDIRQ_H */
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
index 302f68dc889c..aead40b16dd8 100644
--- a/arch/parisc/include/asm/ptrace.h
+++ b/arch/parisc/include/asm/ptrace.h
@@ -59,8 +59,11 @@ void user_enable_block_step(struct task_struct *task);
59#define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) 59#define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0)
60#define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) 60#define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0)
61#define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) 61#define instruction_pointer(regs) ((regs)->iaoq[0] & ~3)
62#define user_stack_pointer(regs) ((regs)->gr[30])
62unsigned long profile_pc(struct pt_regs *); 63unsigned long profile_pc(struct pt_regs *);
63extern void show_regs(struct pt_regs *); 64extern void show_regs(struct pt_regs *);
64#endif 65
66
67#endif /* __KERNEL__ */
65 68
66#endif 69#endif
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
new file mode 100644
index 000000000000..8bdfd2c8c39f
--- /dev/null
+++ b/arch/parisc/include/asm/syscall.h
@@ -0,0 +1,40 @@
1/* syscall.h */
2
3#ifndef _ASM_PARISC_SYSCALL_H_
4#define _ASM_PARISC_SYSCALL_H_
5
6#include <linux/err.h>
7#include <asm/ptrace.h>
8
9static inline long syscall_get_nr(struct task_struct *tsk,
10 struct pt_regs *regs)
11{
12 return regs->gr[20];
13}
14
15static inline void syscall_get_arguments(struct task_struct *tsk,
16 struct pt_regs *regs, unsigned int i,
17 unsigned int n, unsigned long *args)
18{
19 BUG_ON(i);
20
21 switch (n) {
22 case 6:
23 args[5] = regs->gr[21];
24 case 5:
25 args[4] = regs->gr[22];
26 case 4:
27 args[3] = regs->gr[23];
28 case 3:
29 args[2] = regs->gr[24];
30 case 2:
31 args[1] = regs->gr[25];
32 case 1:
33 args[0] = regs->gr[26];
34 break;
35 default:
36 BUG();
37 }
38}
39
40#endif /*_ASM_PARISC_SYSCALL_H_*/
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index ac775a76bff7..7ecc1039cfed 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -32,6 +32,11 @@ struct thread_info {
32#define init_thread_info (init_thread_union.thread_info) 32#define init_thread_info (init_thread_union.thread_info)
33#define init_stack (init_thread_union.stack) 33#define init_stack (init_thread_union.stack)
34 34
35/* how to get the thread information struct from C */
36#define current_thread_info() ((struct thread_info *)mfctl(30))
37
38#endif /* !__ASSEMBLY */
39
35/* thread information allocation */ 40/* thread information allocation */
36 41
37#define THREAD_SIZE_ORDER 2 42#define THREAD_SIZE_ORDER 2
@@ -40,11 +45,6 @@ struct thread_info {
40#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 45#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
41#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) 46#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
42 47
43/* how to get the thread information struct from C */
44#define current_thread_info() ((struct thread_info *)mfctl(30))
45
46#endif /* !__ASSEMBLY */
47
48#define PREEMPT_ACTIVE_BIT 28 48#define PREEMPT_ACTIVE_BIT 28
49#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) 49#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
50 50
@@ -60,6 +60,8 @@ struct thread_info {
60#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ 60#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
61#define TIF_FREEZE 7 /* is freezing for suspend */ 61#define TIF_FREEZE 7 /* is freezing for suspend */
62#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ 62#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
63#define TIF_SINGLESTEP 9 /* single stepping? */
64#define TIF_BLOCKSTEP 10 /* branch stepping? */
63 65
64#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 66#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
65#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 67#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -69,6 +71,8 @@ struct thread_info {
69#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 71#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
70#define _TIF_FREEZE (1 << TIF_FREEZE) 72#define _TIF_FREEZE (1 << TIF_FREEZE)
71#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 73#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
74#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
75#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
72 76
73#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ 77#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
74 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) 78 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 699cf8ef2118..fcd3c707bf12 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -270,8 +270,8 @@ int main(void)
270 DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count)); 270 DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count));
271 DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop)); 271 DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop));
272 BLANK(); 272 BLANK();
273 DEFINE(PA_BLOCKSTEP_BIT, 31-PT_BLOCKSTEP_BIT); 273 DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP);
274 DEFINE(PA_SINGLESTEP_BIT, 31-PT_SINGLESTEP_BIT); 274 DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP);
275 BLANK(); 275 BLANK();
276 DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); 276 DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
277 DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); 277 DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 8c4712b74dc1..3a44f7f704fa 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -2047,12 +2047,13 @@ syscall_do_signal:
2047 b,n syscall_check_sig 2047 b,n syscall_check_sig
2048 2048
2049syscall_restore: 2049syscall_restore:
2050 /* Are we being ptraced? */
2051 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2050 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2052 2051
2053 ldw TASK_PTRACE(%r1), %r19 2052 /* Are we being ptraced? */
2054 bb,< %r19,31,syscall_restore_rfi 2053 ldw TASK_FLAGS(%r1),%r19
2055 nop 2054 ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2
2055 and,COND(=) %r19,%r2,%r0
2056 b,n syscall_restore_rfi
2056 2057
2057 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ 2058 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
2058 rest_fp %r19 2059 rest_fp %r19
@@ -2113,16 +2114,16 @@ syscall_restore_rfi:
2113 ldi 0x0b,%r20 /* Create new PSW */ 2114 ldi 0x0b,%r20 /* Create new PSW */
2114 depi -1,13,1,%r20 /* C, Q, D, and I bits */ 2115 depi -1,13,1,%r20 /* C, Q, D, and I bits */
2115 2116
2116 /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are 2117 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
2117 * set in include/linux/ptrace.h and converted to PA bitmap 2118 * set in thread_info.h and converted to PA bitmap
2118 * numbers in asm-offsets.c */ 2119 * numbers in asm-offsets.c */
2119 2120
2120 /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */ 2121 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
2121 extru,= %r19,PA_SINGLESTEP_BIT,1,%r0 2122 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
2122 depi -1,27,1,%r20 /* R bit */ 2123 depi -1,27,1,%r20 /* R bit */
2123 2124
2124 /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */ 2125 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
2125 extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0 2126 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
2126 depi -1,7,1,%r20 /* T bit */ 2127 depi -1,7,1,%r20 /* T bit */
2127 2128
2128 STREG %r20,TASK_PT_PSW(%r1) 2129 STREG %r20,TASK_PT_PSW(%r1)
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 330f536a9324..2e7610cb33d5 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -423,8 +423,3 @@ void __init init_IRQ(void)
423 set_eiem(cpu_eiem); /* EIEM : enable all external intr */ 423 set_eiem(cpu_eiem); /* EIEM : enable all external intr */
424 424
425} 425}
426
427void ack_bad_irq(unsigned int irq)
428{
429 printk(KERN_WARNING "unexpected IRQ %d\n", irq);
430}
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 61ee0eec4e69..212074653df7 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -893,7 +893,7 @@ int module_finalize(const Elf_Ehdr *hdr,
893 * ourselves */ 893 * ourselves */
894 for (i = 1; i < hdr->e_shnum; i++) { 894 for (i = 1; i < hdr->e_shnum; i++) {
895 if(sechdrs[i].sh_type == SHT_SYMTAB 895 if(sechdrs[i].sh_type == SHT_SYMTAB
896 && (sechdrs[i].sh_type & SHF_ALLOC)) { 896 && (sechdrs[i].sh_flags & SHF_ALLOC)) {
897 int strindex = sechdrs[i].sh_link; 897 int strindex = sechdrs[i].sh_link;
898 /* FIXME: AWFUL HACK 898 /* FIXME: AWFUL HACK
899 * The cast is to drop the const from 899 * The cast is to drop the const from
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 927db3668b6f..c4f49e45129d 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -13,6 +13,7 @@
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/ptrace.h> 15#include <linux/ptrace.h>
16#include <linux/tracehook.h>
16#include <linux/user.h> 17#include <linux/user.h>
17#include <linux/personality.h> 18#include <linux/personality.h>
18#include <linux/security.h> 19#include <linux/security.h>
@@ -35,7 +36,8 @@
35 */ 36 */
36void ptrace_disable(struct task_struct *task) 37void ptrace_disable(struct task_struct *task)
37{ 38{
38 task->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); 39 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
40 clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
39 41
40 /* make sure the trap bits are not set */ 42 /* make sure the trap bits are not set */
41 pa_psw(task)->r = 0; 43 pa_psw(task)->r = 0;
@@ -55,8 +57,8 @@ void user_disable_single_step(struct task_struct *task)
55 57
56void user_enable_single_step(struct task_struct *task) 58void user_enable_single_step(struct task_struct *task)
57{ 59{
58 task->ptrace &= ~PT_BLOCKSTEP; 60 clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
59 task->ptrace |= PT_SINGLESTEP; 61 set_tsk_thread_flag(task, TIF_SINGLESTEP);
60 62
61 if (pa_psw(task)->n) { 63 if (pa_psw(task)->n) {
62 struct siginfo si; 64 struct siginfo si;
@@ -98,8 +100,8 @@ void user_enable_single_step(struct task_struct *task)
98 100
99void user_enable_block_step(struct task_struct *task) 101void user_enable_block_step(struct task_struct *task)
100{ 102{
101 task->ptrace &= ~PT_SINGLESTEP; 103 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
102 task->ptrace |= PT_BLOCKSTEP; 104 set_tsk_thread_flag(task, TIF_BLOCKSTEP);
103 105
104 /* Enable taken branch trap. */ 106 /* Enable taken branch trap. */
105 pa_psw(task)->r = 0; 107 pa_psw(task)->r = 0;
@@ -263,22 +265,20 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
263} 265}
264#endif 266#endif
265 267
268long do_syscall_trace_enter(struct pt_regs *regs)
269{
270 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
271 tracehook_report_syscall_entry(regs))
272 return -1L;
273
274 return regs->gr[20];
275}
266 276
267void syscall_trace(void) 277void do_syscall_trace_exit(struct pt_regs *regs)
268{ 278{
269 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 279 int stepping = test_thread_flag(TIF_SINGLESTEP) ||
270 return; 280 test_thread_flag(TIF_BLOCKSTEP);
271 if (!(current->ptrace & PT_PTRACED)) 281
272 return; 282 if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
273 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) 283 tracehook_report_syscall_exit(regs, stepping);
274 ? 0x80 : 0));
275 /*
276 * this isn't the same as continuing with a signal, but it will do
277 * for normal use. strace only continues with a signal if the
278 * stopping signal is not SIGTRAP. -brl
279 */
280 if (current->exit_code) {
281 send_sig(current->exit_code, current, 1);
282 current->exit_code = 0;
283 }
284} 284}
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 8eb3c63c407a..e8467e4aa8d1 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -21,6 +21,7 @@
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/wait.h> 22#include <linux/wait.h>
23#include <linux/ptrace.h> 23#include <linux/ptrace.h>
24#include <linux/tracehook.h>
24#include <linux/unistd.h> 25#include <linux/unistd.h>
25#include <linux/stddef.h> 26#include <linux/stddef.h>
26#include <linux/compat.h> 27#include <linux/compat.h>
@@ -34,7 +35,6 @@
34#include <asm/asm-offsets.h> 35#include <asm/asm-offsets.h>
35 36
36#ifdef CONFIG_COMPAT 37#ifdef CONFIG_COMPAT
37#include <linux/compat.h>
38#include "signal32.h" 38#include "signal32.h"
39#endif 39#endif
40 40
@@ -468,6 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
468 sigaddset(&current->blocked,sig); 468 sigaddset(&current->blocked,sig);
469 recalc_sigpending(); 469 recalc_sigpending();
470 spin_unlock_irq(&current->sighand->siglock); 470 spin_unlock_irq(&current->sighand->siglock);
471
472 tracehook_signal_handler(sig, info, ka, regs, 0);
473
471 return 1; 474 return 1;
472} 475}
473 476
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 59fc1a43ec3e..f5f96021caa0 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -288,18 +288,23 @@ tracesys:
288 STREG %r18,PT_GR18(%r2) 288 STREG %r18,PT_GR18(%r2)
289 /* Finished saving things for the debugger */ 289 /* Finished saving things for the debugger */
290 290
291 ldil L%syscall_trace,%r1 291 copy %r2,%r26
292 ldil L%do_syscall_trace_enter,%r1
292 ldil L%tracesys_next,%r2 293 ldil L%tracesys_next,%r2
293 be R%syscall_trace(%sr7,%r1) 294 be R%do_syscall_trace_enter(%sr7,%r1)
294 ldo R%tracesys_next(%r2),%r2 295 ldo R%tracesys_next(%r2),%r2
295 296
296tracesys_next: 297tracesys_next:
298 /* do_syscall_trace_enter either returned the syscallno, or -1L,
299 * so we skip restoring the PT_GR20 below, since we pulled it from
300 * task->thread.regs.gr[20] above.
301 */
302 copy %ret0,%r20
297 ldil L%sys_call_table,%r1 303 ldil L%sys_call_table,%r1
298 ldo R%sys_call_table(%r1), %r19 304 ldo R%sys_call_table(%r1), %r19
299 305
300 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 306 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
301 LDREG TI_TASK(%r1), %r1 307 LDREG TI_TASK(%r1), %r1
302 LDREG TASK_PT_GR20(%r1), %r20
303 LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ 308 LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */
304 LDREG TASK_PT_GR25(%r1), %r25 309 LDREG TASK_PT_GR25(%r1), %r25
305 LDREG TASK_PT_GR24(%r1), %r24 310 LDREG TASK_PT_GR24(%r1), %r24
@@ -336,7 +341,8 @@ tracesys_exit:
336#ifdef CONFIG_64BIT 341#ifdef CONFIG_64BIT
337 ldo -16(%r30),%r29 /* Reference param save area */ 342 ldo -16(%r30),%r29 /* Reference param save area */
338#endif 343#endif
339 bl syscall_trace, %r2 344 ldo TASK_REGS(%r1),%r26
345 bl do_syscall_trace_exit,%r2
340 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ 346 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
341 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 347 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
342 LDREG TI_TASK(%r1), %r1 348 LDREG TI_TASK(%r1), %r1
@@ -353,12 +359,12 @@ tracesys_exit:
353 359
354tracesys_sigexit: 360tracesys_sigexit:
355 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 361 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
356 LDREG 0(%r1), %r1 362 LDREG TI_TASK(%r1), %r1
357#ifdef CONFIG_64BIT 363#ifdef CONFIG_64BIT
358 ldo -16(%r30),%r29 /* Reference param save area */ 364 ldo -16(%r30),%r29 /* Reference param save area */
359#endif 365#endif
360 bl syscall_trace, %r2 366 bl do_syscall_trace_exit,%r2
361 nop 367 ldo TASK_REGS(%r1),%r26
362 368
363 ldil L%syscall_exit_rfi,%r1 369 ldil L%syscall_exit_rfi,%r1
364 be,n R%syscall_exit_rfi(%sr7,%r1) 370 be,n R%syscall_exit_rfi(%sr7,%r1)
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 775be2791bc2..fda4baa059b5 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -28,6 +28,7 @@
28#include <asm/cache.h> 28#include <asm/cache.h>
29#include <asm/page.h> 29#include <asm/page.h>
30#include <asm/asm-offsets.h> 30#include <asm/asm-offsets.h>
31#include <asm/thread_info.h>
31 32
32/* ld script to make hppa Linux kernel */ 33/* ld script to make hppa Linux kernel */
33#ifndef CONFIG_64BIT 34#ifndef CONFIG_64BIT
@@ -134,6 +135,15 @@ SECTIONS
134 __init_begin = .; 135 __init_begin = .;
135 INIT_TEXT_SECTION(16384) 136 INIT_TEXT_SECTION(16384)
136 INIT_DATA_SECTION(16) 137 INIT_DATA_SECTION(16)
138 /* we have to discard exit text and such at runtime, not link time */
139 .exit.text :
140 {
141 EXIT_TEXT
142 }
143 .exit.data :
144 {
145 EXIT_DATA
146 }
137 147
138 PERCPU(PAGE_SIZE) 148 PERCPU(PAGE_SIZE)
139 . = ALIGN(PAGE_SIZE); 149 . = ALIGN(PAGE_SIZE);
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index d5aca31fddbb..13b6e3e59b99 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -434,8 +434,8 @@ void mark_rodata_ro(void)
434#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ 434#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
435 & ~(VM_MAP_OFFSET-1))) 435 & ~(VM_MAP_OFFSET-1)))
436 436
437void *vmalloc_start __read_mostly; 437void *parisc_vmalloc_start __read_mostly;
438EXPORT_SYMBOL(vmalloc_start); 438EXPORT_SYMBOL(parisc_vmalloc_start);
439 439
440#ifdef CONFIG_PA11 440#ifdef CONFIG_PA11
441unsigned long pcxl_dma_start __read_mostly; 441unsigned long pcxl_dma_start __read_mostly;
@@ -496,13 +496,14 @@ void __init mem_init(void)
496#ifdef CONFIG_PA11 496#ifdef CONFIG_PA11
497 if (hppa_dma_ops == &pcxl_dma_ops) { 497 if (hppa_dma_ops == &pcxl_dma_ops) {
498 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); 498 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
499 vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE); 499 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
500 + PCXL_DMA_MAP_SIZE);
500 } else { 501 } else {
501 pcxl_dma_start = 0; 502 pcxl_dma_start = 0;
502 vmalloc_start = SET_MAP_OFFSET(MAP_START); 503 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
503 } 504 }
504#else 505#else
505 vmalloc_start = SET_MAP_OFFSET(MAP_START); 506 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
506#endif 507#endif
507 508
508 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", 509 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 47ee603f558e..2aa371e30079 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -201,7 +201,7 @@ static int kvmppc_exit_timing_open(struct inode *inode, struct file *file)
201 return single_open(file, kvmppc_exit_timing_show, inode->i_private); 201 return single_open(file, kvmppc_exit_timing_show, inode->i_private);
202} 202}
203 203
204static struct file_operations kvmppc_exit_timing_fops = { 204static const struct file_operations kvmppc_exit_timing_fops = {
205 .owner = THIS_MODULE, 205 .owner = THIS_MODULE,
206 .open = kvmppc_exit_timing_open, 206 .open = kvmppc_exit_timing_open,
207 .read = seq_read, 207 .read = seq_read,
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 961309446170..884e8bcec499 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -147,7 +147,7 @@ static int __fops ## _open(struct inode *inode, struct file *file) \
147 __simple_attr_check_format(__fmt, 0ull); \ 147 __simple_attr_check_format(__fmt, 0ull); \
148 return spufs_attr_open(inode, file, __get, __set, __fmt); \ 148 return spufs_attr_open(inode, file, __get, __set, __fmt); \
149} \ 149} \
150static struct file_operations __fops = { \ 150static const struct file_operations __fops = { \
151 .owner = THIS_MODULE, \ 151 .owner = THIS_MODULE, \
152 .open = __fops ## _open, \ 152 .open = __fops ## _open, \
153 .release = spufs_attr_release, \ 153 .release = spufs_attr_release, \
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index ab69925d579b..937a544a236d 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -209,7 +209,7 @@ static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
209 return n_read * sizeof(struct dtl_entry); 209 return n_read * sizeof(struct dtl_entry);
210} 210}
211 211
212static struct file_operations dtl_fops = { 212static const struct file_operations dtl_fops = {
213 .open = dtl_file_open, 213 .open = dtl_file_open,
214 .release = dtl_file_release, 214 .release = dtl_file_release,
215 .read = dtl_file_read, 215 .read = dtl_file_read,
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index ec5eee7c25d8..06cce8285ba0 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -58,7 +58,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
58int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); 58int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
59int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); 59int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
60 60
61static inline int kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu) 61static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
62{ 62{
63 return vcpu->arch.sie_block->gmslm 63 return vcpu->arch.sie_block->gmslm
64 - vcpu->arch.sie_block->gmsor 64 - vcpu->arch.sie_block->gmsor
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index ac45aab741a5..05ef5380a687 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -26,6 +26,7 @@ config SPARC
26 select RTC_CLASS 26 select RTC_CLASS
27 select RTC_DRV_M48T59 27 select RTC_DRV_M48T59
28 select HAVE_PERF_EVENTS 28 select HAVE_PERF_EVENTS
29 select PERF_USE_VMALLOC
29 select HAVE_DMA_ATTRS 30 select HAVE_DMA_ATTRS
30 select HAVE_DMA_API_DEBUG 31 select HAVE_DMA_API_DEBUG
31 32
@@ -48,6 +49,7 @@ config SPARC64
48 select RTC_DRV_SUN4V 49 select RTC_DRV_SUN4V
49 select RTC_DRV_STARFIRE 50 select RTC_DRV_STARFIRE
50 select HAVE_PERF_EVENTS 51 select HAVE_PERF_EVENTS
52 select PERF_USE_VMALLOC
51 53
52config ARCH_DEFCONFIG 54config ARCH_DEFCONFIG
53 string 55 string
diff --git a/arch/sparc/include/asm/hardirq_32.h b/arch/sparc/include/asm/hardirq_32.h
index 4f63ed8df551..162007643cdc 100644
--- a/arch/sparc/include/asm/hardirq_32.h
+++ b/arch/sparc/include/asm/hardirq_32.h
@@ -7,17 +7,7 @@
7#ifndef __SPARC_HARDIRQ_H 7#ifndef __SPARC_HARDIRQ_H
8#define __SPARC_HARDIRQ_H 8#define __SPARC_HARDIRQ_H
9 9
10#include <linux/threads.h>
11#include <linux/spinlock.h>
12#include <linux/cache.h>
13
14/* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */
15typedef struct {
16 unsigned int __softirq_pending;
17} ____cacheline_aligned irq_cpustat_t;
18
19#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
20
21#define HARDIRQ_BITS 8 10#define HARDIRQ_BITS 8
11#include <asm-generic/hardirq.h>
22 12
23#endif /* __SPARC_HARDIRQ_H */ 13#endif /* __SPARC_HARDIRQ_H */
diff --git a/arch/sparc/include/asm/irq_32.h b/arch/sparc/include/asm/irq_32.h
index ea43057d4763..cbf4801deaaf 100644
--- a/arch/sparc/include/asm/irq_32.h
+++ b/arch/sparc/include/asm/irq_32.h
@@ -6,10 +6,10 @@
6#ifndef _SPARC_IRQ_H 6#ifndef _SPARC_IRQ_H
7#define _SPARC_IRQ_H 7#define _SPARC_IRQ_H
8 8
9#include <linux/interrupt.h>
10
11#define NR_IRQS 16 9#define NR_IRQS 16
12 10
11#include <linux/interrupt.h>
12
13#define irq_canonicalize(irq) (irq) 13#define irq_canonicalize(irq) (irq)
14 14
15extern void __init init_IRQ(void); 15extern void __init init_IRQ(void);
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 0ff92fa22064..f3cb790fa2ae 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -41,8 +41,8 @@
41#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) 41#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
42#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) 42#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
43#define VMALLOC_START _AC(0x0000000100000000,UL) 43#define VMALLOC_START _AC(0x0000000100000000,UL)
44#define VMALLOC_END _AC(0x0000000200000000,UL) 44#define VMALLOC_END _AC(0x0000010000000000,UL)
45#define VMEMMAP_BASE _AC(0x0000000200000000,UL) 45#define VMEMMAP_BASE _AC(0x0000010000000000,UL)
46 46
47#define vmemmap ((struct page *)VMEMMAP_BASE) 47#define vmemmap ((struct page *)VMEMMAP_BASE)
48 48
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 3ea6e8cde8c5..1d361477d7d6 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -280,8 +280,8 @@ kvmap_dtlb_nonlinear:
280 280
281#ifdef CONFIG_SPARSEMEM_VMEMMAP 281#ifdef CONFIG_SPARSEMEM_VMEMMAP
282 /* Do not use the TSB for vmemmap. */ 282 /* Do not use the TSB for vmemmap. */
283 mov (VMEMMAP_BASE >> 24), %g5 283 mov (VMEMMAP_BASE >> 40), %g5
284 sllx %g5, 24, %g5 284 sllx %g5, 40, %g5
285 cmp %g4,%g5 285 cmp %g4,%g5
286 bgeu,pn %xcc, kvmap_vmemmap 286 bgeu,pn %xcc, kvmap_vmemmap
287 nop 287 nop
@@ -293,8 +293,8 @@ kvmap_dtlb_tsbmiss:
293 sethi %hi(MODULES_VADDR), %g5 293 sethi %hi(MODULES_VADDR), %g5
294 cmp %g4, %g5 294 cmp %g4, %g5
295 blu,pn %xcc, kvmap_dtlb_longpath 295 blu,pn %xcc, kvmap_dtlb_longpath
296 mov (VMALLOC_END >> 24), %g5 296 mov (VMALLOC_END >> 40), %g5
297 sllx %g5, 24, %g5 297 sllx %g5, 40, %g5
298 cmp %g4, %g5 298 cmp %g4, %g5
299 bgeu,pn %xcc, kvmap_dtlb_longpath 299 bgeu,pn %xcc, kvmap_dtlb_longpath
300 nop 300 nop
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 2d6a1b10c81d..04db92743896 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -56,7 +56,8 @@ struct cpu_hw_events {
56 struct perf_event *events[MAX_HWEVENTS]; 56 struct perf_event *events[MAX_HWEVENTS];
57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
59 int enabled; 59 u64 pcr;
60 int enabled;
60}; 61};
61DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; 62DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
62 63
@@ -68,8 +69,30 @@ struct perf_event_map {
68#define PIC_LOWER 0x02 69#define PIC_LOWER 0x02
69}; 70};
70 71
72static unsigned long perf_event_encode(const struct perf_event_map *pmap)
73{
74 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
75}
76
77static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk)
78{
79 *msk = val & 0xff;
80 *enc = val >> 16;
81}
82
83#define C(x) PERF_COUNT_HW_CACHE_##x
84
85#define CACHE_OP_UNSUPPORTED 0xfffe
86#define CACHE_OP_NONSENSE 0xffff
87
88typedef struct perf_event_map cache_map_t
89 [PERF_COUNT_HW_CACHE_MAX]
90 [PERF_COUNT_HW_CACHE_OP_MAX]
91 [PERF_COUNT_HW_CACHE_RESULT_MAX];
92
71struct sparc_pmu { 93struct sparc_pmu {
72 const struct perf_event_map *(*event_map)(int); 94 const struct perf_event_map *(*event_map)(int);
95 const cache_map_t *cache_map;
73 int max_events; 96 int max_events;
74 int upper_shift; 97 int upper_shift;
75 int lower_shift; 98 int lower_shift;
@@ -80,21 +103,109 @@ struct sparc_pmu {
80 int lower_nop; 103 int lower_nop;
81}; 104};
82 105
83static const struct perf_event_map ultra3i_perfmon_event_map[] = { 106static const struct perf_event_map ultra3_perfmon_event_map[] = {
84 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, 107 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
85 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, 108 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
86 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, 109 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
87 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, 110 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
88}; 111};
89 112
90static const struct perf_event_map *ultra3i_event_map(int event_id) 113static const struct perf_event_map *ultra3_event_map(int event_id)
91{ 114{
92 return &ultra3i_perfmon_event_map[event_id]; 115 return &ultra3_perfmon_event_map[event_id];
93} 116}
94 117
95static const struct sparc_pmu ultra3i_pmu = { 118static const cache_map_t ultra3_cache_map = {
96 .event_map = ultra3i_event_map, 119[C(L1D)] = {
97 .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), 120 [C(OP_READ)] = {
121 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
122 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
123 },
124 [C(OP_WRITE)] = {
125 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
126 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
127 },
128 [C(OP_PREFETCH)] = {
129 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
130 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
131 },
132},
133[C(L1I)] = {
134 [C(OP_READ)] = {
135 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
136 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
137 },
138 [ C(OP_WRITE) ] = {
139 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
140 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
141 },
142 [ C(OP_PREFETCH) ] = {
143 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
144 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
145 },
146},
147[C(LL)] = {
148 [C(OP_READ)] = {
149 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
150 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
151 },
152 [C(OP_WRITE)] = {
153 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
154 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
155 },
156 [C(OP_PREFETCH)] = {
157 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
158 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
159 },
160},
161[C(DTLB)] = {
162 [C(OP_READ)] = {
163 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
164 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
165 },
166 [ C(OP_WRITE) ] = {
167 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
168 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
169 },
170 [ C(OP_PREFETCH) ] = {
171 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
172 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
173 },
174},
175[C(ITLB)] = {
176 [C(OP_READ)] = {
177 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
178 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
179 },
180 [ C(OP_WRITE) ] = {
181 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
182 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
183 },
184 [ C(OP_PREFETCH) ] = {
185 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
186 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
187 },
188},
189[C(BPU)] = {
190 [C(OP_READ)] = {
191 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
192 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
193 },
194 [ C(OP_WRITE) ] = {
195 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
196 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
197 },
198 [ C(OP_PREFETCH) ] = {
199 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
200 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
201 },
202},
203};
204
205static const struct sparc_pmu ultra3_pmu = {
206 .event_map = ultra3_event_map,
207 .cache_map = &ultra3_cache_map,
208 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
98 .upper_shift = 11, 209 .upper_shift = 11,
99 .lower_shift = 4, 210 .lower_shift = 4,
100 .event_mask = 0x3f, 211 .event_mask = 0x3f,
@@ -102,6 +213,121 @@ static const struct sparc_pmu ultra3i_pmu = {
102 .lower_nop = 0x14, 213 .lower_nop = 0x14,
103}; 214};
104 215
216/* Niagara1 is very limited. The upper PIC is hard-locked to count
217 * only instructions, so it is free running which creates all kinds of
218 * problems. Some hardware designs make one wonder if the creator
219 * even looked at how this stuff gets used by software.
220 */
221static const struct perf_event_map niagara1_perfmon_event_map[] = {
222 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
223 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
224 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
225 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
226};
227
228static const struct perf_event_map *niagara1_event_map(int event_id)
229{
230 return &niagara1_perfmon_event_map[event_id];
231}
232
233static const cache_map_t niagara1_cache_map = {
234[C(L1D)] = {
235 [C(OP_READ)] = {
236 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
237 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
238 },
239 [C(OP_WRITE)] = {
240 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
241 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
242 },
243 [C(OP_PREFETCH)] = {
244 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
245 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
246 },
247},
248[C(L1I)] = {
249 [C(OP_READ)] = {
250 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
251 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
252 },
253 [ C(OP_WRITE) ] = {
254 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
255 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
256 },
257 [ C(OP_PREFETCH) ] = {
258 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
259 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
260 },
261},
262[C(LL)] = {
263 [C(OP_READ)] = {
264 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
265 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
266 },
267 [C(OP_WRITE)] = {
268 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
269 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
270 },
271 [C(OP_PREFETCH)] = {
272 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
273 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
274 },
275},
276[C(DTLB)] = {
277 [C(OP_READ)] = {
278 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
279 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
280 },
281 [ C(OP_WRITE) ] = {
282 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
283 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
284 },
285 [ C(OP_PREFETCH) ] = {
286 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
287 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
288 },
289},
290[C(ITLB)] = {
291 [C(OP_READ)] = {
292 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
293 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
294 },
295 [ C(OP_WRITE) ] = {
296 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
297 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
298 },
299 [ C(OP_PREFETCH) ] = {
300 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
301 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
302 },
303},
304[C(BPU)] = {
305 [C(OP_READ)] = {
306 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
307 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
308 },
309 [ C(OP_WRITE) ] = {
310 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
311 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
312 },
313 [ C(OP_PREFETCH) ] = {
314 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
315 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
316 },
317},
318};
319
320static const struct sparc_pmu niagara1_pmu = {
321 .event_map = niagara1_event_map,
322 .cache_map = &niagara1_cache_map,
323 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
324 .upper_shift = 0,
325 .lower_shift = 4,
326 .event_mask = 0x7,
327 .upper_nop = 0x0,
328 .lower_nop = 0x0,
329};
330
105static const struct perf_event_map niagara2_perfmon_event_map[] = { 331static const struct perf_event_map niagara2_perfmon_event_map[] = {
106 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, 332 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
107 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, 333 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
@@ -116,8 +342,96 @@ static const struct perf_event_map *niagara2_event_map(int event_id)
116 return &niagara2_perfmon_event_map[event_id]; 342 return &niagara2_perfmon_event_map[event_id];
117} 343}
118 344
345static const cache_map_t niagara2_cache_map = {
346[C(L1D)] = {
347 [C(OP_READ)] = {
348 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
349 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
350 },
351 [C(OP_WRITE)] = {
352 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
353 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
354 },
355 [C(OP_PREFETCH)] = {
356 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
357 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
358 },
359},
360[C(L1I)] = {
361 [C(OP_READ)] = {
362 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
363 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
364 },
365 [ C(OP_WRITE) ] = {
366 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
367 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
368 },
369 [ C(OP_PREFETCH) ] = {
370 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
371 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
372 },
373},
374[C(LL)] = {
375 [C(OP_READ)] = {
376 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
377 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
378 },
379 [C(OP_WRITE)] = {
380 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
381 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
382 },
383 [C(OP_PREFETCH)] = {
384 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
385 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
386 },
387},
388[C(DTLB)] = {
389 [C(OP_READ)] = {
390 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
391 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
392 },
393 [ C(OP_WRITE) ] = {
394 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
395 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
396 },
397 [ C(OP_PREFETCH) ] = {
398 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
399 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
400 },
401},
402[C(ITLB)] = {
403 [C(OP_READ)] = {
404 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
405 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
406 },
407 [ C(OP_WRITE) ] = {
408 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
409 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
410 },
411 [ C(OP_PREFETCH) ] = {
412 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
413 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
414 },
415},
416[C(BPU)] = {
417 [C(OP_READ)] = {
418 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
419 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
420 },
421 [ C(OP_WRITE) ] = {
422 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
423 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
424 },
425 [ C(OP_PREFETCH) ] = {
426 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
427 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
428 },
429},
430};
431
119static const struct sparc_pmu niagara2_pmu = { 432static const struct sparc_pmu niagara2_pmu = {
120 .event_map = niagara2_event_map, 433 .event_map = niagara2_event_map,
434 .cache_map = &niagara2_cache_map,
121 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), 435 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
122 .upper_shift = 19, 436 .upper_shift = 19,
123 .lower_shift = 6, 437 .lower_shift = 6,
@@ -151,23 +465,30 @@ static u64 nop_for_index(int idx)
151 sparc_pmu->lower_nop, idx); 465 sparc_pmu->lower_nop, idx);
152} 466}
153 467
154static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, 468static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
155 int idx)
156{ 469{
157 u64 val, mask = mask_for_index(idx); 470 u64 val, mask = mask_for_index(idx);
158 471
159 val = pcr_ops->read(); 472 val = cpuc->pcr;
160 pcr_ops->write((val & ~mask) | hwc->config); 473 val &= ~mask;
474 val |= hwc->config;
475 cpuc->pcr = val;
476
477 pcr_ops->write(cpuc->pcr);
161} 478}
162 479
163static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, 480static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
164 int idx)
165{ 481{
166 u64 mask = mask_for_index(idx); 482 u64 mask = mask_for_index(idx);
167 u64 nop = nop_for_index(idx); 483 u64 nop = nop_for_index(idx);
168 u64 val = pcr_ops->read(); 484 u64 val;
169 485
170 pcr_ops->write((val & ~mask) | nop); 486 val = cpuc->pcr;
487 val &= ~mask;
488 val |= nop;
489 cpuc->pcr = val;
490
491 pcr_ops->write(cpuc->pcr);
171} 492}
172 493
173void hw_perf_enable(void) 494void hw_perf_enable(void)
@@ -182,7 +503,7 @@ void hw_perf_enable(void)
182 cpuc->enabled = 1; 503 cpuc->enabled = 1;
183 barrier(); 504 barrier();
184 505
185 val = pcr_ops->read(); 506 val = cpuc->pcr;
186 507
187 for (i = 0; i < MAX_HWEVENTS; i++) { 508 for (i = 0; i < MAX_HWEVENTS; i++) {
188 struct perf_event *cp = cpuc->events[i]; 509 struct perf_event *cp = cpuc->events[i];
@@ -194,7 +515,9 @@ void hw_perf_enable(void)
194 val |= hwc->config_base; 515 val |= hwc->config_base;
195 } 516 }
196 517
197 pcr_ops->write(val); 518 cpuc->pcr = val;
519
520 pcr_ops->write(cpuc->pcr);
198} 521}
199 522
200void hw_perf_disable(void) 523void hw_perf_disable(void)
@@ -207,10 +530,12 @@ void hw_perf_disable(void)
207 530
208 cpuc->enabled = 0; 531 cpuc->enabled = 0;
209 532
210 val = pcr_ops->read(); 533 val = cpuc->pcr;
211 val &= ~(PCR_UTRACE | PCR_STRACE | 534 val &= ~(PCR_UTRACE | PCR_STRACE |
212 sparc_pmu->hv_bit | sparc_pmu->irq_bit); 535 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
213 pcr_ops->write(val); 536 cpuc->pcr = val;
537
538 pcr_ops->write(cpuc->pcr);
214} 539}
215 540
216static u32 read_pmc(int idx) 541static u32 read_pmc(int idx)
@@ -242,7 +567,7 @@ static void write_pmc(int idx, u64 val)
242} 567}
243 568
244static int sparc_perf_event_set_period(struct perf_event *event, 569static int sparc_perf_event_set_period(struct perf_event *event,
245 struct hw_perf_event *hwc, int idx) 570 struct hw_perf_event *hwc, int idx)
246{ 571{
247 s64 left = atomic64_read(&hwc->period_left); 572 s64 left = atomic64_read(&hwc->period_left);
248 s64 period = hwc->sample_period; 573 s64 period = hwc->sample_period;
@@ -282,19 +607,19 @@ static int sparc_pmu_enable(struct perf_event *event)
282 if (test_and_set_bit(idx, cpuc->used_mask)) 607 if (test_and_set_bit(idx, cpuc->used_mask))
283 return -EAGAIN; 608 return -EAGAIN;
284 609
285 sparc_pmu_disable_event(hwc, idx); 610 sparc_pmu_disable_event(cpuc, hwc, idx);
286 611
287 cpuc->events[idx] = event; 612 cpuc->events[idx] = event;
288 set_bit(idx, cpuc->active_mask); 613 set_bit(idx, cpuc->active_mask);
289 614
290 sparc_perf_event_set_period(event, hwc, idx); 615 sparc_perf_event_set_period(event, hwc, idx);
291 sparc_pmu_enable_event(hwc, idx); 616 sparc_pmu_enable_event(cpuc, hwc, idx);
292 perf_event_update_userpage(event); 617 perf_event_update_userpage(event);
293 return 0; 618 return 0;
294} 619}
295 620
296static u64 sparc_perf_event_update(struct perf_event *event, 621static u64 sparc_perf_event_update(struct perf_event *event,
297 struct hw_perf_event *hwc, int idx) 622 struct hw_perf_event *hwc, int idx)
298{ 623{
299 int shift = 64 - 32; 624 int shift = 64 - 32;
300 u64 prev_raw_count, new_raw_count; 625 u64 prev_raw_count, new_raw_count;
@@ -324,7 +649,7 @@ static void sparc_pmu_disable(struct perf_event *event)
324 int idx = hwc->idx; 649 int idx = hwc->idx;
325 650
326 clear_bit(idx, cpuc->active_mask); 651 clear_bit(idx, cpuc->active_mask);
327 sparc_pmu_disable_event(hwc, idx); 652 sparc_pmu_disable_event(cpuc, hwc, idx);
328 653
329 barrier(); 654 barrier();
330 655
@@ -338,18 +663,29 @@ static void sparc_pmu_disable(struct perf_event *event)
338static void sparc_pmu_read(struct perf_event *event) 663static void sparc_pmu_read(struct perf_event *event)
339{ 664{
340 struct hw_perf_event *hwc = &event->hw; 665 struct hw_perf_event *hwc = &event->hw;
666
341 sparc_perf_event_update(event, hwc, hwc->idx); 667 sparc_perf_event_update(event, hwc, hwc->idx);
342} 668}
343 669
344static void sparc_pmu_unthrottle(struct perf_event *event) 670static void sparc_pmu_unthrottle(struct perf_event *event)
345{ 671{
672 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
346 struct hw_perf_event *hwc = &event->hw; 673 struct hw_perf_event *hwc = &event->hw;
347 sparc_pmu_enable_event(hwc, hwc->idx); 674
675 sparc_pmu_enable_event(cpuc, hwc, hwc->idx);
348} 676}
349 677
350static atomic_t active_events = ATOMIC_INIT(0); 678static atomic_t active_events = ATOMIC_INIT(0);
351static DEFINE_MUTEX(pmc_grab_mutex); 679static DEFINE_MUTEX(pmc_grab_mutex);
352 680
681static void perf_stop_nmi_watchdog(void *unused)
682{
683 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
684
685 stop_nmi_watchdog(NULL);
686 cpuc->pcr = pcr_ops->read();
687}
688
353void perf_event_grab_pmc(void) 689void perf_event_grab_pmc(void)
354{ 690{
355 if (atomic_inc_not_zero(&active_events)) 691 if (atomic_inc_not_zero(&active_events))
@@ -358,7 +694,7 @@ void perf_event_grab_pmc(void)
358 mutex_lock(&pmc_grab_mutex); 694 mutex_lock(&pmc_grab_mutex);
359 if (atomic_read(&active_events) == 0) { 695 if (atomic_read(&active_events) == 0) {
360 if (atomic_read(&nmi_active) > 0) { 696 if (atomic_read(&nmi_active) > 0) {
361 on_each_cpu(stop_nmi_watchdog, NULL, 1); 697 on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
362 BUG_ON(atomic_read(&nmi_active) != 0); 698 BUG_ON(atomic_read(&nmi_active) != 0);
363 } 699 }
364 atomic_inc(&active_events); 700 atomic_inc(&active_events);
@@ -375,30 +711,160 @@ void perf_event_release_pmc(void)
375 } 711 }
376} 712}
377 713
714static const struct perf_event_map *sparc_map_cache_event(u64 config)
715{
716 unsigned int cache_type, cache_op, cache_result;
717 const struct perf_event_map *pmap;
718
719 if (!sparc_pmu->cache_map)
720 return ERR_PTR(-ENOENT);
721
722 cache_type = (config >> 0) & 0xff;
723 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
724 return ERR_PTR(-EINVAL);
725
726 cache_op = (config >> 8) & 0xff;
727 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
728 return ERR_PTR(-EINVAL);
729
730 cache_result = (config >> 16) & 0xff;
731 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
732 return ERR_PTR(-EINVAL);
733
734 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
735
736 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
737 return ERR_PTR(-ENOENT);
738
739 if (pmap->encoding == CACHE_OP_NONSENSE)
740 return ERR_PTR(-EINVAL);
741
742 return pmap;
743}
744
378static void hw_perf_event_destroy(struct perf_event *event) 745static void hw_perf_event_destroy(struct perf_event *event)
379{ 746{
380 perf_event_release_pmc(); 747 perf_event_release_pmc();
381} 748}
382 749
750/* Make sure all events can be scheduled into the hardware at
751 * the same time. This is simplified by the fact that we only
752 * need to support 2 simultaneous HW events.
753 */
754static int sparc_check_constraints(unsigned long *events, int n_ev)
755{
756 if (n_ev <= perf_max_events) {
757 u8 msk1, msk2;
758 u16 dummy;
759
760 if (n_ev == 1)
761 return 0;
762 BUG_ON(n_ev != 2);
763 perf_event_decode(events[0], &dummy, &msk1);
764 perf_event_decode(events[1], &dummy, &msk2);
765
766 /* If both events can go on any counter, OK. */
767 if (msk1 == (PIC_UPPER | PIC_LOWER) &&
768 msk2 == (PIC_UPPER | PIC_LOWER))
769 return 0;
770
771 /* If one event is limited to a specific counter,
772 * and the other can go on both, OK.
773 */
774 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
775 msk2 == (PIC_UPPER | PIC_LOWER))
776 return 0;
777 if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) &&
778 msk1 == (PIC_UPPER | PIC_LOWER))
779 return 0;
780
781 /* If the events are fixed to different counters, OK. */
782 if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) ||
783 (msk1 == PIC_LOWER && msk2 == PIC_UPPER))
784 return 0;
785
786 /* Otherwise, there is a conflict. */
787 }
788
789 return -1;
790}
791
792static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
793{
794 int eu = 0, ek = 0, eh = 0;
795 struct perf_event *event;
796 int i, n, first;
797
798 n = n_prev + n_new;
799 if (n <= 1)
800 return 0;
801
802 first = 1;
803 for (i = 0; i < n; i++) {
804 event = evts[i];
805 if (first) {
806 eu = event->attr.exclude_user;
807 ek = event->attr.exclude_kernel;
808 eh = event->attr.exclude_hv;
809 first = 0;
810 } else if (event->attr.exclude_user != eu ||
811 event->attr.exclude_kernel != ek ||
812 event->attr.exclude_hv != eh) {
813 return -EAGAIN;
814 }
815 }
816
817 return 0;
818}
819
820static int collect_events(struct perf_event *group, int max_count,
821 struct perf_event *evts[], unsigned long *events)
822{
823 struct perf_event *event;
824 int n = 0;
825
826 if (!is_software_event(group)) {
827 if (n >= max_count)
828 return -1;
829 evts[n] = group;
830 events[n++] = group->hw.event_base;
831 }
832 list_for_each_entry(event, &group->sibling_list, group_entry) {
833 if (!is_software_event(event) &&
834 event->state != PERF_EVENT_STATE_OFF) {
835 if (n >= max_count)
836 return -1;
837 evts[n] = event;
838 events[n++] = event->hw.event_base;
839 }
840 }
841 return n;
842}
843
383static int __hw_perf_event_init(struct perf_event *event) 844static int __hw_perf_event_init(struct perf_event *event)
384{ 845{
385 struct perf_event_attr *attr = &event->attr; 846 struct perf_event_attr *attr = &event->attr;
847 struct perf_event *evts[MAX_HWEVENTS];
386 struct hw_perf_event *hwc = &event->hw; 848 struct hw_perf_event *hwc = &event->hw;
849 unsigned long events[MAX_HWEVENTS];
387 const struct perf_event_map *pmap; 850 const struct perf_event_map *pmap;
388 u64 enc; 851 u64 enc;
852 int n;
389 853
390 if (atomic_read(&nmi_active) < 0) 854 if (atomic_read(&nmi_active) < 0)
391 return -ENODEV; 855 return -ENODEV;
392 856
393 if (attr->type != PERF_TYPE_HARDWARE) 857 if (attr->type == PERF_TYPE_HARDWARE) {
858 if (attr->config >= sparc_pmu->max_events)
859 return -EINVAL;
860 pmap = sparc_pmu->event_map(attr->config);
861 } else if (attr->type == PERF_TYPE_HW_CACHE) {
862 pmap = sparc_map_cache_event(attr->config);
863 if (IS_ERR(pmap))
864 return PTR_ERR(pmap);
865 } else
394 return -EOPNOTSUPP; 866 return -EOPNOTSUPP;
395 867
396 if (attr->config >= sparc_pmu->max_events)
397 return -EINVAL;
398
399 perf_event_grab_pmc();
400 event->destroy = hw_perf_event_destroy;
401
402 /* We save the enable bits in the config_base. So to 868 /* We save the enable bits in the config_base. So to
403 * turn off sampling just write 'config', and to enable 869 * turn off sampling just write 'config', and to enable
404 * things write 'config | config_base'. 870 * things write 'config | config_base'.
@@ -411,15 +877,39 @@ static int __hw_perf_event_init(struct perf_event *event)
411 if (!attr->exclude_hv) 877 if (!attr->exclude_hv)
412 hwc->config_base |= sparc_pmu->hv_bit; 878 hwc->config_base |= sparc_pmu->hv_bit;
413 879
880 hwc->event_base = perf_event_encode(pmap);
881
882 enc = pmap->encoding;
883
884 n = 0;
885 if (event->group_leader != event) {
886 n = collect_events(event->group_leader,
887 perf_max_events - 1,
888 evts, events);
889 if (n < 0)
890 return -EINVAL;
891 }
892 events[n] = hwc->event_base;
893 evts[n] = event;
894
895 if (check_excludes(evts, n, 1))
896 return -EINVAL;
897
898 if (sparc_check_constraints(events, n + 1))
899 return -EINVAL;
900
901 /* Try to do all error checking before this point, as unwinding
902 * state after grabbing the PMC is difficult.
903 */
904 perf_event_grab_pmc();
905 event->destroy = hw_perf_event_destroy;
906
414 if (!hwc->sample_period) { 907 if (!hwc->sample_period) {
415 hwc->sample_period = MAX_PERIOD; 908 hwc->sample_period = MAX_PERIOD;
416 hwc->last_period = hwc->sample_period; 909 hwc->last_period = hwc->sample_period;
417 atomic64_set(&hwc->period_left, hwc->sample_period); 910 atomic64_set(&hwc->period_left, hwc->sample_period);
418 } 911 }
419 912
420 pmap = sparc_pmu->event_map(attr->config);
421
422 enc = pmap->encoding;
423 if (pmap->pic_mask & PIC_UPPER) { 913 if (pmap->pic_mask & PIC_UPPER) {
424 hwc->idx = PIC_UPPER_INDEX; 914 hwc->idx = PIC_UPPER_INDEX;
425 enc <<= sparc_pmu->upper_shift; 915 enc <<= sparc_pmu->upper_shift;
@@ -472,7 +962,7 @@ void perf_event_print_debug(void)
472} 962}
473 963
474static int __kprobes perf_event_nmi_handler(struct notifier_block *self, 964static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
475 unsigned long cmd, void *__args) 965 unsigned long cmd, void *__args)
476{ 966{
477 struct die_args *args = __args; 967 struct die_args *args = __args;
478 struct perf_sample_data data; 968 struct perf_sample_data data;
@@ -513,7 +1003,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
513 continue; 1003 continue;
514 1004
515 if (perf_event_overflow(event, 1, &data, regs)) 1005 if (perf_event_overflow(event, 1, &data, regs))
516 sparc_pmu_disable_event(hwc, idx); 1006 sparc_pmu_disable_event(cpuc, hwc, idx);
517 } 1007 }
518 1008
519 return NOTIFY_STOP; 1009 return NOTIFY_STOP;
@@ -525,8 +1015,15 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = {
525 1015
526static bool __init supported_pmu(void) 1016static bool __init supported_pmu(void)
527{ 1017{
528 if (!strcmp(sparc_pmu_type, "ultra3i")) { 1018 if (!strcmp(sparc_pmu_type, "ultra3") ||
529 sparc_pmu = &ultra3i_pmu; 1019 !strcmp(sparc_pmu_type, "ultra3+") ||
1020 !strcmp(sparc_pmu_type, "ultra3i") ||
1021 !strcmp(sparc_pmu_type, "ultra4+")) {
1022 sparc_pmu = &ultra3_pmu;
1023 return true;
1024 }
1025 if (!strcmp(sparc_pmu_type, "niagara")) {
1026 sparc_pmu = &niagara1_pmu;
530 return true; 1027 return true;
531 } 1028 }
532 if (!strcmp(sparc_pmu_type, "niagara2")) { 1029 if (!strcmp(sparc_pmu_type, "niagara2")) {
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c
index f97cb8b6ee5f..f9024bccff16 100644
--- a/arch/sparc/oprofile/init.c
+++ b/arch/sparc/oprofile/init.c
@@ -11,6 +11,7 @@
11#include <linux/oprofile.h> 11#include <linux/oprofile.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/param.h> /* for HZ */
14 15
15#ifdef CONFIG_SPARC64 16#ifdef CONFIG_SPARC64
16#include <linux/notifier.h> 17#include <linux/notifier.h>
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3be000435fad..d83892226f73 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -796,6 +796,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
796#define KVM_ARCH_WANT_MMU_NOTIFIER 796#define KVM_ARCH_WANT_MMU_NOTIFIER
797int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 797int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
798int kvm_age_hva(struct kvm *kvm, unsigned long hva); 798int kvm_age_hva(struct kvm *kvm, unsigned long hva);
799void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
799int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); 800int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
800int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); 801int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
801int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); 802int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 74656d1d4e30..391206199515 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -244,6 +244,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
244 __func__, smp_processor_id(), vector, irq); 244 __func__, smp_processor_id(), vector, irq);
245 } 245 }
246 246
247 run_local_timers();
247 irq_exit(); 248 irq_exit();
248 249
249 set_irq_regs(old_regs); 250 set_irq_regs(old_regs);
@@ -268,6 +269,7 @@ void smp_generic_interrupt(struct pt_regs *regs)
268 if (generic_interrupt_extension) 269 if (generic_interrupt_extension)
269 generic_interrupt_extension(); 270 generic_interrupt_extension();
270 271
272 run_local_timers();
271 irq_exit(); 273 irq_exit();
272 274
273 set_irq_regs(old_regs); 275 set_irq_regs(old_regs);
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index ec1de97600e7..d915d956e66d 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -198,6 +198,7 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
198{ 198{
199 ack_APIC_irq(); 199 ack_APIC_irq();
200 inc_irq_stat(irq_resched_count); 200 inc_irq_stat(irq_resched_count);
201 run_local_timers();
201 /* 202 /*
202 * KVM uses this interrupt to force a cpu out of guest mode 203 * KVM uses this interrupt to force a cpu out of guest mode
203 */ 204 */
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 1ae5ceba7eb2..7024224f0fc8 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -664,7 +664,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
664{ 664{
665 ktime_t now = apic->lapic_timer.timer.base->get_time(); 665 ktime_t now = apic->lapic_timer.timer.base->get_time();
666 666
667 apic->lapic_timer.period = apic_get_reg(apic, APIC_TMICT) * 667 apic->lapic_timer.period = (u64)apic_get_reg(apic, APIC_TMICT) *
668 APIC_BUS_CYCLE_NS * apic->divide_count; 668 APIC_BUS_CYCLE_NS * apic->divide_count;
669 atomic_set(&apic->lapic_timer.pending, 0); 669 atomic_set(&apic->lapic_timer.pending, 0);
670 670
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index eca41ae9f453..685a4ffac8e6 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -156,6 +156,8 @@ module_param(oos_shadow, bool, 0644);
156#define CREATE_TRACE_POINTS 156#define CREATE_TRACE_POINTS
157#include "mmutrace.h" 157#include "mmutrace.h"
158 158
159#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
160
159#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 161#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
160 162
161struct kvm_rmap_desc { 163struct kvm_rmap_desc {
@@ -634,9 +636,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
634 if (*spte & shadow_accessed_mask) 636 if (*spte & shadow_accessed_mask)
635 kvm_set_pfn_accessed(pfn); 637 kvm_set_pfn_accessed(pfn);
636 if (is_writeble_pte(*spte)) 638 if (is_writeble_pte(*spte))
637 kvm_release_pfn_dirty(pfn); 639 kvm_set_pfn_dirty(pfn);
638 else
639 kvm_release_pfn_clean(pfn);
640 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level); 640 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
641 if (!*rmapp) { 641 if (!*rmapp) {
642 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); 642 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
@@ -748,7 +748,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
748 return write_protected; 748 return write_protected;
749} 749}
750 750
751static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) 751static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
752{ 752{
753 u64 *spte; 753 u64 *spte;
754 int need_tlb_flush = 0; 754 int need_tlb_flush = 0;
@@ -763,8 +763,45 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
763 return need_tlb_flush; 763 return need_tlb_flush;
764} 764}
765 765
766static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, 766static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
767 int (*handler)(struct kvm *kvm, unsigned long *rmapp)) 767{
768 int need_flush = 0;
769 u64 *spte, new_spte;
770 pte_t *ptep = (pte_t *)data;
771 pfn_t new_pfn;
772
773 WARN_ON(pte_huge(*ptep));
774 new_pfn = pte_pfn(*ptep);
775 spte = rmap_next(kvm, rmapp, NULL);
776 while (spte) {
777 BUG_ON(!is_shadow_present_pte(*spte));
778 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
779 need_flush = 1;
780 if (pte_write(*ptep)) {
781 rmap_remove(kvm, spte);
782 __set_spte(spte, shadow_trap_nonpresent_pte);
783 spte = rmap_next(kvm, rmapp, NULL);
784 } else {
785 new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
786 new_spte |= (u64)new_pfn << PAGE_SHIFT;
787
788 new_spte &= ~PT_WRITABLE_MASK;
789 new_spte &= ~SPTE_HOST_WRITEABLE;
790 if (is_writeble_pte(*spte))
791 kvm_set_pfn_dirty(spte_to_pfn(*spte));
792 __set_spte(spte, new_spte);
793 spte = rmap_next(kvm, rmapp, spte);
794 }
795 }
796 if (need_flush)
797 kvm_flush_remote_tlbs(kvm);
798
799 return 0;
800}
801
802static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, u64 data,
803 int (*handler)(struct kvm *kvm, unsigned long *rmapp,
804 u64 data))
768{ 805{
769 int i, j; 806 int i, j;
770 int retval = 0; 807 int retval = 0;
@@ -786,13 +823,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
786 if (hva >= start && hva < end) { 823 if (hva >= start && hva < end) {
787 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; 824 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
788 825
789 retval |= handler(kvm, &memslot->rmap[gfn_offset]); 826 retval |= handler(kvm, &memslot->rmap[gfn_offset],
827 data);
790 828
791 for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { 829 for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
792 int idx = gfn_offset; 830 int idx = gfn_offset;
793 idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j); 831 idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j);
794 retval |= handler(kvm, 832 retval |= handler(kvm,
795 &memslot->lpage_info[j][idx].rmap_pde); 833 &memslot->lpage_info[j][idx].rmap_pde,
834 data);
796 } 835 }
797 } 836 }
798 } 837 }
@@ -802,10 +841,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
802 841
803int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 842int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
804{ 843{
805 return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); 844 return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
806} 845}
807 846
808static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp) 847void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
848{
849 kvm_handle_hva(kvm, hva, (u64)&pte, kvm_set_pte_rmapp);
850}
851
852static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
809{ 853{
810 u64 *spte; 854 u64 *spte;
811 int young = 0; 855 int young = 0;
@@ -841,13 +885,13 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
841 gfn = unalias_gfn(vcpu->kvm, gfn); 885 gfn = unalias_gfn(vcpu->kvm, gfn);
842 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); 886 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
843 887
844 kvm_unmap_rmapp(vcpu->kvm, rmapp); 888 kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
845 kvm_flush_remote_tlbs(vcpu->kvm); 889 kvm_flush_remote_tlbs(vcpu->kvm);
846} 890}
847 891
848int kvm_age_hva(struct kvm *kvm, unsigned long hva) 892int kvm_age_hva(struct kvm *kvm, unsigned long hva)
849{ 893{
850 return kvm_handle_hva(kvm, hva, kvm_age_rmapp); 894 return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
851} 895}
852 896
853#ifdef MMU_DEBUG 897#ifdef MMU_DEBUG
@@ -1756,7 +1800,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1756 unsigned pte_access, int user_fault, 1800 unsigned pte_access, int user_fault,
1757 int write_fault, int dirty, int level, 1801 int write_fault, int dirty, int level,
1758 gfn_t gfn, pfn_t pfn, bool speculative, 1802 gfn_t gfn, pfn_t pfn, bool speculative,
1759 bool can_unsync) 1803 bool can_unsync, bool reset_host_protection)
1760{ 1804{
1761 u64 spte; 1805 u64 spte;
1762 int ret = 0; 1806 int ret = 0;
@@ -1783,6 +1827,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1783 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, 1827 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1784 kvm_is_mmio_pfn(pfn)); 1828 kvm_is_mmio_pfn(pfn));
1785 1829
1830 if (reset_host_protection)
1831 spte |= SPTE_HOST_WRITEABLE;
1832
1786 spte |= (u64)pfn << PAGE_SHIFT; 1833 spte |= (u64)pfn << PAGE_SHIFT;
1787 1834
1788 if ((pte_access & ACC_WRITE_MASK) 1835 if ((pte_access & ACC_WRITE_MASK)
@@ -1828,7 +1875,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1828 unsigned pt_access, unsigned pte_access, 1875 unsigned pt_access, unsigned pte_access,
1829 int user_fault, int write_fault, int dirty, 1876 int user_fault, int write_fault, int dirty,
1830 int *ptwrite, int level, gfn_t gfn, 1877 int *ptwrite, int level, gfn_t gfn,
1831 pfn_t pfn, bool speculative) 1878 pfn_t pfn, bool speculative,
1879 bool reset_host_protection)
1832{ 1880{
1833 int was_rmapped = 0; 1881 int was_rmapped = 0;
1834 int was_writeble = is_writeble_pte(*sptep); 1882 int was_writeble = is_writeble_pte(*sptep);
@@ -1860,7 +1908,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1860 } 1908 }
1861 1909
1862 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, 1910 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
1863 dirty, level, gfn, pfn, speculative, true)) { 1911 dirty, level, gfn, pfn, speculative, true,
1912 reset_host_protection)) {
1864 if (write_fault) 1913 if (write_fault)
1865 *ptwrite = 1; 1914 *ptwrite = 1;
1866 kvm_x86_ops->tlb_flush(vcpu); 1915 kvm_x86_ops->tlb_flush(vcpu);
@@ -1877,8 +1926,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1877 page_header_update_slot(vcpu->kvm, sptep, gfn); 1926 page_header_update_slot(vcpu->kvm, sptep, gfn);
1878 if (!was_rmapped) { 1927 if (!was_rmapped) {
1879 rmap_count = rmap_add(vcpu, sptep, gfn); 1928 rmap_count = rmap_add(vcpu, sptep, gfn);
1880 if (!is_rmap_spte(*sptep)) 1929 kvm_release_pfn_clean(pfn);
1881 kvm_release_pfn_clean(pfn);
1882 if (rmap_count > RMAP_RECYCLE_THRESHOLD) 1930 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1883 rmap_recycle(vcpu, sptep, gfn); 1931 rmap_recycle(vcpu, sptep, gfn);
1884 } else { 1932 } else {
@@ -1909,7 +1957,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1909 if (iterator.level == level) { 1957 if (iterator.level == level) {
1910 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, 1958 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1911 0, write, 1, &pt_write, 1959 0, write, 1, &pt_write,
1912 level, gfn, pfn, false); 1960 level, gfn, pfn, false, true);
1913 ++vcpu->stat.pf_fixed; 1961 ++vcpu->stat.pf_fixed;
1914 break; 1962 break;
1915 } 1963 }
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index d2fec9c12d22..72558f8ff3f5 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -273,9 +273,13 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
273 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq)) 273 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
274 return; 274 return;
275 kvm_get_pfn(pfn); 275 kvm_get_pfn(pfn);
276 /*
277 * we call mmu_set_spte() with reset_host_protection = true beacuse that
278 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
279 */
276 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, 280 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
277 gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL, 281 gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL,
278 gpte_to_gfn(gpte), pfn, true); 282 gpte_to_gfn(gpte), pfn, true, true);
279} 283}
280 284
281/* 285/*
@@ -308,7 +312,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
308 user_fault, write_fault, 312 user_fault, write_fault,
309 gw->ptes[gw->level-1] & PT_DIRTY_MASK, 313 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
310 ptwrite, level, 314 ptwrite, level,
311 gw->gfn, pfn, false); 315 gw->gfn, pfn, false, true);
312 break; 316 break;
313 } 317 }
314 318
@@ -558,6 +562,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
558static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 562static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
559{ 563{
560 int i, offset, nr_present; 564 int i, offset, nr_present;
565 bool reset_host_protection;
561 566
562 offset = nr_present = 0; 567 offset = nr_present = 0;
563 568
@@ -595,9 +600,16 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
595 600
596 nr_present++; 601 nr_present++;
597 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); 602 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
603 if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
604 pte_access &= ~ACC_WRITE_MASK;
605 reset_host_protection = 0;
606 } else {
607 reset_host_protection = 1;
608 }
598 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, 609 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
599 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, 610 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
600 spte_to_pfn(sp->spt[i]), true, false); 611 spte_to_pfn(sp->spt[i]), true, false,
612 reset_host_protection);
601 } 613 }
602 614
603 return !nr_present; 615 return !nr_present;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 944cc9c04b3c..c17404add91f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -767,6 +767,8 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
767 rdtscll(tsc_this); 767 rdtscll(tsc_this);
768 delta = vcpu->arch.host_tsc - tsc_this; 768 delta = vcpu->arch.host_tsc - tsc_this;
769 svm->vmcb->control.tsc_offset += delta; 769 svm->vmcb->control.tsc_offset += delta;
770 if (is_nested(svm))
771 svm->nested.hsave->control.tsc_offset += delta;
770 vcpu->cpu = cpu; 772 vcpu->cpu = cpu;
771 kvm_migrate_timers(vcpu); 773 kvm_migrate_timers(vcpu);
772 svm->asid_generation = 0; 774 svm->asid_generation = 0;
@@ -2057,10 +2059,14 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2057 2059
2058 switch (ecx) { 2060 switch (ecx) {
2059 case MSR_IA32_TSC: { 2061 case MSR_IA32_TSC: {
2060 u64 tsc; 2062 u64 tsc_offset;
2063
2064 if (is_nested(svm))
2065 tsc_offset = svm->nested.hsave->control.tsc_offset;
2066 else
2067 tsc_offset = svm->vmcb->control.tsc_offset;
2061 2068
2062 rdtscll(tsc); 2069 *data = tsc_offset + native_read_tsc();
2063 *data = svm->vmcb->control.tsc_offset + tsc;
2064 break; 2070 break;
2065 } 2071 }
2066 case MSR_K6_STAR: 2072 case MSR_K6_STAR:
@@ -2146,10 +2152,17 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2146 2152
2147 switch (ecx) { 2153 switch (ecx) {
2148 case MSR_IA32_TSC: { 2154 case MSR_IA32_TSC: {
2149 u64 tsc; 2155 u64 tsc_offset = data - native_read_tsc();
2156 u64 g_tsc_offset = 0;
2157
2158 if (is_nested(svm)) {
2159 g_tsc_offset = svm->vmcb->control.tsc_offset -
2160 svm->nested.hsave->control.tsc_offset;
2161 svm->nested.hsave->control.tsc_offset = tsc_offset;
2162 }
2163
2164 svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset;
2150 2165
2151 rdtscll(tsc);
2152 svm->vmcb->control.tsc_offset = data - tsc;
2153 break; 2166 break;
2154 } 2167 }
2155 case MSR_K6_STAR: 2168 case MSR_K6_STAR:
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f3812014bd0b..ed53b42caba1 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -709,7 +709,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
709 if (vcpu->cpu != cpu) { 709 if (vcpu->cpu != cpu) {
710 vcpu_clear(vmx); 710 vcpu_clear(vmx);
711 kvm_migrate_timers(vcpu); 711 kvm_migrate_timers(vcpu);
712 vpid_sync_vcpu_all(vmx); 712 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
713 local_irq_disable(); 713 local_irq_disable();
714 list_add(&vmx->local_vcpus_link, 714 list_add(&vmx->local_vcpus_link,
715 &per_cpu(vcpus_on_cpu, cpu)); 715 &per_cpu(vcpus_on_cpu, cpu));
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index be451ee44249..9b9695322f56 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1591,6 +1591,8 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
1591 1591
1592 if (cpuid->nent < 1) 1592 if (cpuid->nent < 1)
1593 goto out; 1593 goto out;
1594 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1595 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1594 r = -ENOMEM; 1596 r = -ENOMEM;
1595 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); 1597 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1596 if (!cpuid_entries) 1598 if (!cpuid_entries)
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c
index b53225d2cac3..e133ce25e290 100644
--- a/arch/x86/xen/debugfs.c
+++ b/arch/x86/xen/debugfs.c
@@ -100,7 +100,7 @@ static int xen_array_release(struct inode *inode, struct file *file)
100 return 0; 100 return 0;
101} 101}
102 102
103static struct file_operations u32_array_fops = { 103static const struct file_operations u32_array_fops = {
104 .owner = THIS_MODULE, 104 .owner = THIS_MODULE,
105 .open = u32_array_open, 105 .open = u32_array_open,
106 .release= xen_array_release, 106 .release= xen_array_release,
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 6593ab39cfe9..8873b9b439ff 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -350,6 +350,7 @@ static void blkdev_discard_end_io(struct bio *bio, int err)
350 350
351 if (bio->bi_private) 351 if (bio->bi_private)
352 complete(bio->bi_private); 352 complete(bio->bi_private);
353 __free_page(bio_page(bio));
353 354
354 bio_put(bio); 355 bio_put(bio);
355} 356}
@@ -372,30 +373,50 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
372 struct request_queue *q = bdev_get_queue(bdev); 373 struct request_queue *q = bdev_get_queue(bdev);
373 int type = flags & DISCARD_FL_BARRIER ? 374 int type = flags & DISCARD_FL_BARRIER ?
374 DISCARD_BARRIER : DISCARD_NOBARRIER; 375 DISCARD_BARRIER : DISCARD_NOBARRIER;
376 struct bio *bio;
377 struct page *page;
375 int ret = 0; 378 int ret = 0;
376 379
377 if (!q) 380 if (!q)
378 return -ENXIO; 381 return -ENXIO;
379 382
380 if (!q->prepare_discard_fn) 383 if (!blk_queue_discard(q))
381 return -EOPNOTSUPP; 384 return -EOPNOTSUPP;
382 385
383 while (nr_sects && !ret) { 386 while (nr_sects && !ret) {
384 struct bio *bio = bio_alloc(gfp_mask, 0); 387 unsigned int sector_size = q->limits.logical_block_size;
385 if (!bio) 388 unsigned int max_discard_sectors =
386 return -ENOMEM; 389 min(q->limits.max_discard_sectors, UINT_MAX >> 9);
387 390
391 bio = bio_alloc(gfp_mask, 1);
392 if (!bio)
393 goto out;
394 bio->bi_sector = sector;
388 bio->bi_end_io = blkdev_discard_end_io; 395 bio->bi_end_io = blkdev_discard_end_io;
389 bio->bi_bdev = bdev; 396 bio->bi_bdev = bdev;
390 if (flags & DISCARD_FL_WAIT) 397 if (flags & DISCARD_FL_WAIT)
391 bio->bi_private = &wait; 398 bio->bi_private = &wait;
392 399
393 bio->bi_sector = sector; 400 /*
401 * Add a zeroed one-sector payload as that's what
402 * our current implementations need. If we'll ever need
403 * more the interface will need revisiting.
404 */
405 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
406 if (!page)
407 goto out_free_bio;
408 if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
409 goto out_free_page;
394 410
395 if (nr_sects > queue_max_hw_sectors(q)) { 411 /*
396 bio->bi_size = queue_max_hw_sectors(q) << 9; 412 * And override the bio size - the way discard works we
397 nr_sects -= queue_max_hw_sectors(q); 413 * touch many more blocks on disk than the actual payload
398 sector += queue_max_hw_sectors(q); 414 * length.
415 */
416 if (nr_sects > max_discard_sectors) {
417 bio->bi_size = max_discard_sectors << 9;
418 nr_sects -= max_discard_sectors;
419 sector += max_discard_sectors;
399 } else { 420 } else {
400 bio->bi_size = nr_sects << 9; 421 bio->bi_size = nr_sects << 9;
401 nr_sects = 0; 422 nr_sects = 0;
@@ -414,5 +435,11 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
414 bio_put(bio); 435 bio_put(bio);
415 } 436 }
416 return ret; 437 return ret;
438out_free_page:
439 __free_page(page);
440out_free_bio:
441 bio_put(bio);
442out:
443 return -ENOMEM;
417} 444}
418EXPORT_SYMBOL(blkdev_issue_discard); 445EXPORT_SYMBOL(blkdev_issue_discard);
diff --git a/block/blk-core.c b/block/blk-core.c
index 8135228e4b29..81f34311659a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -34,6 +34,7 @@
34#include "blk.h" 34#include "blk.h"
35 35
36EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); 36EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
37EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
37EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 38EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
38 39
39static int __make_request(struct request_queue *q, struct bio *bio); 40static int __make_request(struct request_queue *q, struct bio *bio);
@@ -69,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
69 part_stat_inc(cpu, part, merges[rw]); 70 part_stat_inc(cpu, part, merges[rw]);
70 else { 71 else {
71 part_round_stats(cpu, part); 72 part_round_stats(cpu, part);
72 part_inc_in_flight(part, rw); 73 part_inc_in_flight(part);
73 } 74 }
74 75
75 part_stat_unlock(); 76 part_stat_unlock();
@@ -1031,7 +1032,7 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
1031 1032
1032 if (part->in_flight) { 1033 if (part->in_flight) {
1033 __part_stat_add(cpu, part, time_in_queue, 1034 __part_stat_add(cpu, part, time_in_queue,
1034 part_in_flight(part) * (now - part->stamp)); 1035 part->in_flight * (now - part->stamp));
1035 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1036 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1036 } 1037 }
1037 part->stamp = now; 1038 part->stamp = now;
@@ -1124,7 +1125,6 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1124 req->cmd_flags |= REQ_DISCARD; 1125 req->cmd_flags |= REQ_DISCARD;
1125 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) 1126 if (bio_rw_flagged(bio, BIO_RW_BARRIER))
1126 req->cmd_flags |= REQ_SOFTBARRIER; 1127 req->cmd_flags |= REQ_SOFTBARRIER;
1127 req->q->prepare_discard_fn(req->q, req);
1128 } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) 1128 } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
1129 req->cmd_flags |= REQ_HARDBARRIER; 1129 req->cmd_flags |= REQ_HARDBARRIER;
1130 1130
@@ -1437,7 +1437,8 @@ static inline void __generic_make_request(struct bio *bio)
1437 goto end_io; 1437 goto end_io;
1438 } 1438 }
1439 1439
1440 if (unlikely(nr_sectors > queue_max_hw_sectors(q))) { 1440 if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
1441 nr_sectors > queue_max_hw_sectors(q))) {
1441 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1442 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1442 bdevname(bio->bi_bdev, b), 1443 bdevname(bio->bi_bdev, b),
1443 bio_sectors(bio), 1444 bio_sectors(bio),
@@ -1470,7 +1471,7 @@ static inline void __generic_make_request(struct bio *bio)
1470 goto end_io; 1471 goto end_io;
1471 1472
1472 if (bio_rw_flagged(bio, BIO_RW_DISCARD) && 1473 if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
1473 !q->prepare_discard_fn) { 1474 !blk_queue_discard(q)) {
1474 err = -EOPNOTSUPP; 1475 err = -EOPNOTSUPP;
1475 goto end_io; 1476 goto end_io;
1476 } 1477 }
@@ -1738,7 +1739,7 @@ static void blk_account_io_done(struct request *req)
1738 part_stat_inc(cpu, part, ios[rw]); 1739 part_stat_inc(cpu, part, ios[rw]);
1739 part_stat_add(cpu, part, ticks[rw], duration); 1740 part_stat_add(cpu, part, ticks[rw], duration);
1740 part_round_stats(cpu, part); 1741 part_round_stats(cpu, part);
1741 part_dec_in_flight(part, rw); 1742 part_dec_in_flight(part);
1742 1743
1743 part_stat_unlock(); 1744 part_stat_unlock();
1744 } 1745 }
@@ -2491,6 +2492,14 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2491} 2492}
2492EXPORT_SYMBOL(kblockd_schedule_work); 2493EXPORT_SYMBOL(kblockd_schedule_work);
2493 2494
2495int kblockd_schedule_delayed_work(struct request_queue *q,
2496 struct delayed_work *work,
2497 unsigned long delay)
2498{
2499 return queue_delayed_work(kblockd_workqueue, work, delay);
2500}
2501EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2502
2494int __init blk_dev_init(void) 2503int __init blk_dev_init(void)
2495{ 2504{
2496 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2505 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 99cb5cf1f447..b0de8574fdc8 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req)
351 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 351 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
352 352
353 part_round_stats(cpu, part); 353 part_round_stats(cpu, part);
354 part_dec_in_flight(part, rq_data_dir(req)); 354 part_dec_in_flight(part);
355 355
356 part_stat_unlock(); 356 part_stat_unlock();
357 } 357 }
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 83413ff83739..e0695bca7027 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -34,23 +34,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
34EXPORT_SYMBOL(blk_queue_prep_rq); 34EXPORT_SYMBOL(blk_queue_prep_rq);
35 35
36/** 36/**
37 * blk_queue_set_discard - set a discard_sectors function for queue
38 * @q: queue
39 * @dfn: prepare_discard function
40 *
41 * It's possible for a queue to register a discard callback which is used
42 * to transform a discard request into the appropriate type for the
43 * hardware. If none is registered, then discard requests are failed
44 * with %EOPNOTSUPP.
45 *
46 */
47void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
48{
49 q->prepare_discard_fn = dfn;
50}
51EXPORT_SYMBOL(blk_queue_set_discard);
52
53/**
54 * blk_queue_merge_bvec - set a merge_bvec function for queue 37 * blk_queue_merge_bvec - set a merge_bvec function for queue
55 * @q: queue 38 * @q: queue
56 * @mbfn: merge_bvec_fn 39 * @mbfn: merge_bvec_fn
@@ -111,7 +94,9 @@ void blk_set_default_limits(struct queue_limits *lim)
111 lim->max_hw_segments = MAX_HW_SEGMENTS; 94 lim->max_hw_segments = MAX_HW_SEGMENTS;
112 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 95 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
113 lim->max_segment_size = MAX_SEGMENT_SIZE; 96 lim->max_segment_size = MAX_SEGMENT_SIZE;
114 lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS; 97 lim->max_sectors = BLK_DEF_MAX_SECTORS;
98 lim->max_hw_sectors = INT_MAX;
99 lim->max_discard_sectors = SAFE_MAX_SECTORS;
115 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; 100 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
116 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); 101 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
117 lim->alignment_offset = 0; 102 lim->alignment_offset = 0;
@@ -164,6 +149,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
164 q->unplug_timer.data = (unsigned long)q; 149 q->unplug_timer.data = (unsigned long)q;
165 150
166 blk_set_default_limits(&q->limits); 151 blk_set_default_limits(&q->limits);
152 blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
167 153
168 /* 154 /*
169 * If the caller didn't supply a lock, fall back to our embedded 155 * If the caller didn't supply a lock, fall back to our embedded
@@ -254,6 +240,18 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
254EXPORT_SYMBOL(blk_queue_max_hw_sectors); 240EXPORT_SYMBOL(blk_queue_max_hw_sectors);
255 241
256/** 242/**
243 * blk_queue_max_discard_sectors - set max sectors for a single discard
244 * @q: the request queue for the device
245 * @max_discard: maximum number of sectors to discard
246 **/
247void blk_queue_max_discard_sectors(struct request_queue *q,
248 unsigned int max_discard_sectors)
249{
250 q->limits.max_discard_sectors = max_discard_sectors;
251}
252EXPORT_SYMBOL(blk_queue_max_discard_sectors);
253
254/**
257 * blk_queue_max_phys_segments - set max phys segments for a request for this queue 255 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
258 * @q: the request queue for the device 256 * @q: the request queue for the device
259 * @max_segments: max number of segments 257 * @max_segments: max number of segments
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index b78c9c3e2670..8a6d81afb284 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -452,6 +452,7 @@ int blk_register_queue(struct gendisk *disk)
452 if (ret) { 452 if (ret) {
453 kobject_uevent(&q->kobj, KOBJ_REMOVE); 453 kobject_uevent(&q->kobj, KOBJ_REMOVE);
454 kobject_del(&q->kobj); 454 kobject_del(&q->kobj);
455 blk_trace_remove_sysfs(disk_to_dev(disk));
455 return ret; 456 return ret;
456 } 457 }
457 458
@@ -465,11 +466,11 @@ void blk_unregister_queue(struct gendisk *disk)
465 if (WARN_ON(!q)) 466 if (WARN_ON(!q))
466 return; 467 return;
467 468
468 if (q->request_fn) { 469 if (q->request_fn)
469 elv_unregister_queue(q); 470 elv_unregister_queue(q);
470 471
471 kobject_uevent(&q->kobj, KOBJ_REMOVE); 472 kobject_uevent(&q->kobj, KOBJ_REMOVE);
472 kobject_del(&q->kobj); 473 kobject_del(&q->kobj);
473 kobject_put(&disk_to_dev(disk)->kobj); 474 blk_trace_remove_sysfs(disk_to_dev(disk));
474 } 475 kobject_put(&disk_to_dev(disk)->kobj);
475} 476}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 1ca813b16e78..9c4b679908f4 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -150,7 +150,7 @@ struct cfq_data {
150 * idle window management 150 * idle window management
151 */ 151 */
152 struct timer_list idle_slice_timer; 152 struct timer_list idle_slice_timer;
153 struct work_struct unplug_work; 153 struct delayed_work unplug_work;
154 154
155 struct cfq_queue *active_queue; 155 struct cfq_queue *active_queue;
156 struct cfq_io_context *active_cic; 156 struct cfq_io_context *active_cic;
@@ -173,6 +173,7 @@ struct cfq_data {
173 unsigned int cfq_slice[2]; 173 unsigned int cfq_slice[2];
174 unsigned int cfq_slice_async_rq; 174 unsigned int cfq_slice_async_rq;
175 unsigned int cfq_slice_idle; 175 unsigned int cfq_slice_idle;
176 unsigned int cfq_latency;
176 177
177 struct list_head cic_list; 178 struct list_head cic_list;
178 179
@@ -180,6 +181,8 @@ struct cfq_data {
180 * Fallback dummy cfqq for extreme OOM conditions 181 * Fallback dummy cfqq for extreme OOM conditions
181 */ 182 */
182 struct cfq_queue oom_cfqq; 183 struct cfq_queue oom_cfqq;
184
185 unsigned long last_end_sync_rq;
183}; 186};
184 187
185enum cfqq_state_flags { 188enum cfqq_state_flags {
@@ -265,11 +268,13 @@ static inline int cfq_bio_sync(struct bio *bio)
265 * scheduler run of queue, if there are requests pending and no one in the 268 * scheduler run of queue, if there are requests pending and no one in the
266 * driver that will restart queueing 269 * driver that will restart queueing
267 */ 270 */
268static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 271static inline void cfq_schedule_dispatch(struct cfq_data *cfqd,
272 unsigned long delay)
269{ 273{
270 if (cfqd->busy_queues) { 274 if (cfqd->busy_queues) {
271 cfq_log(cfqd, "schedule dispatch"); 275 cfq_log(cfqd, "schedule dispatch");
272 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); 276 kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work,
277 delay);
273 } 278 }
274} 279}
275 280
@@ -1326,12 +1331,30 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
1326 return 0; 1331 return 0;
1327 1332
1328 /* 1333 /*
1329 * we are the only queue, allow up to 4 times of 'quantum' 1334 * Sole queue user, allow bigger slice
1330 */ 1335 */
1331 if (cfqq->dispatched >= 4 * max_dispatch) 1336 max_dispatch *= 4;
1332 return 0; 1337 }
1338
1339 /*
1340 * Async queues must wait a bit before being allowed dispatch.
1341 * We also ramp up the dispatch depth gradually for async IO,
1342 * based on the last sync IO we serviced
1343 */
1344 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
1345 unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
1346 unsigned int depth;
1347
1348 depth = last_sync / cfqd->cfq_slice[1];
1349 if (!depth && !cfqq->dispatched)
1350 depth = 1;
1351 if (depth < max_dispatch)
1352 max_dispatch = depth;
1333 } 1353 }
1334 1354
1355 if (cfqq->dispatched >= max_dispatch)
1356 return 0;
1357
1335 /* 1358 /*
1336 * Dispatch a request from this cfqq 1359 * Dispatch a request from this cfqq
1337 */ 1360 */
@@ -1376,7 +1399,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
1376 1399
1377 if (unlikely(cfqd->active_queue == cfqq)) { 1400 if (unlikely(cfqd->active_queue == cfqq)) {
1378 __cfq_slice_expired(cfqd, cfqq, 0); 1401 __cfq_slice_expired(cfqd, cfqq, 0);
1379 cfq_schedule_dispatch(cfqd); 1402 cfq_schedule_dispatch(cfqd, 0);
1380 } 1403 }
1381 1404
1382 kmem_cache_free(cfq_pool, cfqq); 1405 kmem_cache_free(cfq_pool, cfqq);
@@ -1471,7 +1494,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1471{ 1494{
1472 if (unlikely(cfqq == cfqd->active_queue)) { 1495 if (unlikely(cfqq == cfqd->active_queue)) {
1473 __cfq_slice_expired(cfqd, cfqq, 0); 1496 __cfq_slice_expired(cfqd, cfqq, 0);
1474 cfq_schedule_dispatch(cfqd); 1497 cfq_schedule_dispatch(cfqd, 0);
1475 } 1498 }
1476 1499
1477 cfq_put_queue(cfqq); 1500 cfq_put_queue(cfqq);
@@ -1951,7 +1974,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1951 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); 1974 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1952 1975
1953 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || 1976 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
1954 (cfqd->hw_tag && CIC_SEEKY(cic))) 1977 (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
1955 enable_idle = 0; 1978 enable_idle = 0;
1956 else if (sample_valid(cic->ttime_samples)) { 1979 else if (sample_valid(cic->ttime_samples)) {
1957 if (cic->ttime_mean > cfqd->cfq_slice_idle) 1980 if (cic->ttime_mean > cfqd->cfq_slice_idle)
@@ -2157,8 +2180,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
2157 if (cfq_cfqq_sync(cfqq)) 2180 if (cfq_cfqq_sync(cfqq))
2158 cfqd->sync_flight--; 2181 cfqd->sync_flight--;
2159 2182
2160 if (sync) 2183 if (sync) {
2161 RQ_CIC(rq)->last_end_request = now; 2184 RQ_CIC(rq)->last_end_request = now;
2185 cfqd->last_end_sync_rq = now;
2186 }
2162 2187
2163 /* 2188 /*
2164 * If this is the active queue, check if it needs to be expired, 2189 * If this is the active queue, check if it needs to be expired,
@@ -2186,7 +2211,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
2186 } 2211 }
2187 2212
2188 if (!rq_in_driver(cfqd)) 2213 if (!rq_in_driver(cfqd))
2189 cfq_schedule_dispatch(cfqd); 2214 cfq_schedule_dispatch(cfqd, 0);
2190} 2215}
2191 2216
2192/* 2217/*
@@ -2316,7 +2341,7 @@ queue_fail:
2316 if (cic) 2341 if (cic)
2317 put_io_context(cic->ioc); 2342 put_io_context(cic->ioc);
2318 2343
2319 cfq_schedule_dispatch(cfqd); 2344 cfq_schedule_dispatch(cfqd, 0);
2320 spin_unlock_irqrestore(q->queue_lock, flags); 2345 spin_unlock_irqrestore(q->queue_lock, flags);
2321 cfq_log(cfqd, "set_request fail"); 2346 cfq_log(cfqd, "set_request fail");
2322 return 1; 2347 return 1;
@@ -2325,7 +2350,7 @@ queue_fail:
2325static void cfq_kick_queue(struct work_struct *work) 2350static void cfq_kick_queue(struct work_struct *work)
2326{ 2351{
2327 struct cfq_data *cfqd = 2352 struct cfq_data *cfqd =
2328 container_of(work, struct cfq_data, unplug_work); 2353 container_of(work, struct cfq_data, unplug_work.work);
2329 struct request_queue *q = cfqd->queue; 2354 struct request_queue *q = cfqd->queue;
2330 2355
2331 spin_lock_irq(q->queue_lock); 2356 spin_lock_irq(q->queue_lock);
@@ -2379,7 +2404,7 @@ static void cfq_idle_slice_timer(unsigned long data)
2379expire: 2404expire:
2380 cfq_slice_expired(cfqd, timed_out); 2405 cfq_slice_expired(cfqd, timed_out);
2381out_kick: 2406out_kick:
2382 cfq_schedule_dispatch(cfqd); 2407 cfq_schedule_dispatch(cfqd, 0);
2383out_cont: 2408out_cont:
2384 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2409 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2385} 2410}
@@ -2387,7 +2412,7 @@ out_cont:
2387static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2412static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2388{ 2413{
2389 del_timer_sync(&cfqd->idle_slice_timer); 2414 del_timer_sync(&cfqd->idle_slice_timer);
2390 cancel_work_sync(&cfqd->unplug_work); 2415 cancel_delayed_work_sync(&cfqd->unplug_work);
2391} 2416}
2392 2417
2393static void cfq_put_async_queues(struct cfq_data *cfqd) 2418static void cfq_put_async_queues(struct cfq_data *cfqd)
@@ -2469,7 +2494,7 @@ static void *cfq_init_queue(struct request_queue *q)
2469 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2494 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2470 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2495 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2471 2496
2472 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 2497 INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue);
2473 2498
2474 cfqd->cfq_quantum = cfq_quantum; 2499 cfqd->cfq_quantum = cfq_quantum;
2475 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 2500 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
@@ -2480,8 +2505,9 @@ static void *cfq_init_queue(struct request_queue *q)
2480 cfqd->cfq_slice[1] = cfq_slice_sync; 2505 cfqd->cfq_slice[1] = cfq_slice_sync;
2481 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2506 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2482 cfqd->cfq_slice_idle = cfq_slice_idle; 2507 cfqd->cfq_slice_idle = cfq_slice_idle;
2508 cfqd->cfq_latency = 1;
2483 cfqd->hw_tag = 1; 2509 cfqd->hw_tag = 1;
2484 2510 cfqd->last_end_sync_rq = jiffies;
2485 return cfqd; 2511 return cfqd;
2486} 2512}
2487 2513
@@ -2549,6 +2575,7 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2549SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 2575SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2550SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 2576SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2551SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 2577SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2578SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
2552#undef SHOW_FUNCTION 2579#undef SHOW_FUNCTION
2553 2580
2554#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 2581#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
@@ -2580,6 +2607,7 @@ STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2580STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2607STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2581STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 2608STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
2582 UINT_MAX, 0); 2609 UINT_MAX, 0);
2610STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
2583#undef STORE_FUNCTION 2611#undef STORE_FUNCTION
2584 2612
2585#define CFQ_ATTR(name) \ 2613#define CFQ_ATTR(name) \
@@ -2595,6 +2623,7 @@ static struct elv_fs_entry cfq_attrs[] = {
2595 CFQ_ATTR(slice_async), 2623 CFQ_ATTR(slice_async),
2596 CFQ_ATTR(slice_async_rq), 2624 CFQ_ATTR(slice_async_rq),
2597 CFQ_ATTR(slice_idle), 2625 CFQ_ATTR(slice_idle),
2626 CFQ_ATTR(low_latency),
2598 __ATTR_NULL 2627 __ATTR_NULL
2599}; 2628};
2600 2629
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 7865a34e0faa..9bd086c1a4d5 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -21,6 +21,11 @@ static int compat_put_int(unsigned long arg, int val)
21 return put_user(val, (compat_int_t __user *)compat_ptr(arg)); 21 return put_user(val, (compat_int_t __user *)compat_ptr(arg));
22} 22}
23 23
24static int compat_put_uint(unsigned long arg, unsigned int val)
25{
26 return put_user(val, (compat_uint_t __user *)compat_ptr(arg));
27}
28
24static int compat_put_long(unsigned long arg, long val) 29static int compat_put_long(unsigned long arg, long val)
25{ 30{
26 return put_user(val, (compat_long_t __user *)compat_ptr(arg)); 31 return put_user(val, (compat_long_t __user *)compat_ptr(arg));
@@ -734,6 +739,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
734 switch (cmd) { 739 switch (cmd) {
735 case HDIO_GETGEO: 740 case HDIO_GETGEO:
736 return compat_hdio_getgeo(disk, bdev, compat_ptr(arg)); 741 return compat_hdio_getgeo(disk, bdev, compat_ptr(arg));
742 case BLKPBSZGET:
743 return compat_put_uint(arg, bdev_physical_block_size(bdev));
744 case BLKIOMIN:
745 return compat_put_uint(arg, bdev_io_min(bdev));
746 case BLKIOOPT:
747 return compat_put_uint(arg, bdev_io_opt(bdev));
748 case BLKALIGNOFF:
749 return compat_put_int(arg, bdev_alignment_offset(bdev));
737 case BLKFLSBUF: 750 case BLKFLSBUF:
738 case BLKROSET: 751 case BLKROSET:
739 case BLKDISCARD: 752 case BLKDISCARD:
diff --git a/block/genhd.c b/block/genhd.c
index 517e4332cb37..5a0861da324d 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -869,7 +869,6 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
869static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); 869static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
870static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); 870static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
871static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 871static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
872static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
873#ifdef CONFIG_FAIL_MAKE_REQUEST 872#ifdef CONFIG_FAIL_MAKE_REQUEST
874static struct device_attribute dev_attr_fail = 873static struct device_attribute dev_attr_fail =
875 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); 874 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
@@ -889,7 +888,6 @@ static struct attribute *disk_attrs[] = {
889 &dev_attr_alignment_offset.attr, 888 &dev_attr_alignment_offset.attr,
890 &dev_attr_capability.attr, 889 &dev_attr_capability.attr,
891 &dev_attr_stat.attr, 890 &dev_attr_stat.attr,
892 &dev_attr_inflight.attr,
893#ifdef CONFIG_FAIL_MAKE_REQUEST 891#ifdef CONFIG_FAIL_MAKE_REQUEST
894 &dev_attr_fail.attr, 892 &dev_attr_fail.attr,
895#endif 893#endif
@@ -1055,7 +1053,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
1055 part_stat_read(hd, merges[1]), 1053 part_stat_read(hd, merges[1]),
1056 (unsigned long long)part_stat_read(hd, sectors[1]), 1054 (unsigned long long)part_stat_read(hd, sectors[1]),
1057 jiffies_to_msecs(part_stat_read(hd, ticks[1])), 1055 jiffies_to_msecs(part_stat_read(hd, ticks[1])),
1058 part_in_flight(hd), 1056 hd->in_flight,
1059 jiffies_to_msecs(part_stat_read(hd, io_ticks)), 1057 jiffies_to_msecs(part_stat_read(hd, io_ticks)),
1060 jiffies_to_msecs(part_stat_read(hd, time_in_queue)) 1058 jiffies_to_msecs(part_stat_read(hd, time_in_queue))
1061 ); 1059 );
diff --git a/block/ioctl.c b/block/ioctl.c
index d3e6b5827a34..1f4d1de12b09 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -138,6 +138,11 @@ static int put_int(unsigned long arg, int val)
138 return put_user(val, (int __user *)arg); 138 return put_user(val, (int __user *)arg);
139} 139}
140 140
141static int put_uint(unsigned long arg, unsigned int val)
142{
143 return put_user(val, (unsigned int __user *)arg);
144}
145
141static int put_long(unsigned long arg, long val) 146static int put_long(unsigned long arg, long val)
142{ 147{
143 return put_user(val, (long __user *)arg); 148 return put_user(val, (long __user *)arg);
@@ -263,10 +268,18 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
263 return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); 268 return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
264 case BLKROGET: 269 case BLKROGET:
265 return put_int(arg, bdev_read_only(bdev) != 0); 270 return put_int(arg, bdev_read_only(bdev) != 0);
266 case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */ 271 case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
267 return put_int(arg, block_size(bdev)); 272 return put_int(arg, block_size(bdev));
268 case BLKSSZGET: /* get block device hardware sector size */ 273 case BLKSSZGET: /* get block device logical block size */
269 return put_int(arg, bdev_logical_block_size(bdev)); 274 return put_int(arg, bdev_logical_block_size(bdev));
275 case BLKPBSZGET: /* get block device physical block size */
276 return put_uint(arg, bdev_physical_block_size(bdev));
277 case BLKIOMIN:
278 return put_uint(arg, bdev_io_min(bdev));
279 case BLKIOOPT:
280 return put_uint(arg, bdev_io_opt(bdev));
281 case BLKALIGNOFF:
282 return put_int(arg, bdev_alignment_offset(bdev));
270 case BLKSECTGET: 283 case BLKSECTGET:
271 return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); 284 return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
272 case BLKRASET: 285 case BLKRASET:
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index dd8729d674e5..0ed42d8870c7 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -211,6 +211,18 @@ config ACPI_HOTPLUG_CPU
211 select ACPI_CONTAINER 211 select ACPI_CONTAINER
212 default y 212 default y
213 213
214config ACPI_PROCESSOR_AGGREGATOR
215 tristate "Processor Aggregator"
216 depends on ACPI_PROCESSOR
217 depends on EXPERIMENTAL
218 depends on X86
219 help
220 ACPI 4.0 defines processor Aggregator, which enables OS to perform
221 specfic processor configuration and control that applies to all
222 processors in the platform. Currently only logical processor idling
223 is defined, which is to reduce power consumption. This driver
224 support the new device.
225
214config ACPI_THERMAL 226config ACPI_THERMAL
215 tristate "Thermal Zone" 227 tristate "Thermal Zone"
216 depends on ACPI_PROCESSOR 228 depends on ACPI_PROCESSOR
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 82cd49dc603b..7702118509a0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -62,3 +62,5 @@ obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
62processor-y := processor_core.o processor_throttling.o 62processor-y := processor_core.o processor_throttling.o
63processor-y += processor_idle.o processor_thermal.o 63processor-y += processor_idle.o processor_thermal.o
64processor-$(CONFIG_CPU_FREQ) += processor_perflib.o 64processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
65
66obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
new file mode 100644
index 000000000000..0d2cdb86158b
--- /dev/null
+++ b/drivers/acpi/acpi_pad.c
@@ -0,0 +1,514 @@
1/*
2 * acpi_pad.c ACPI Processor Aggregator Driver
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 */
20
21#include <linux/kernel.h>
22#include <linux/cpumask.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/types.h>
26#include <linux/kthread.h>
27#include <linux/freezer.h>
28#include <linux/cpu.h>
29#include <linux/clockchips.h>
30#include <acpi/acpi_bus.h>
31#include <acpi/acpi_drivers.h>
32
33#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
34#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
35#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
36static DEFINE_MUTEX(isolated_cpus_lock);
37
38#define MWAIT_SUBSTATE_MASK (0xf)
39#define MWAIT_CSTATE_MASK (0xf)
40#define MWAIT_SUBSTATE_SIZE (4)
41#define CPUID_MWAIT_LEAF (5)
42#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
43#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
44static unsigned long power_saving_mwait_eax;
45static void power_saving_mwait_init(void)
46{
47 unsigned int eax, ebx, ecx, edx;
48 unsigned int highest_cstate = 0;
49 unsigned int highest_subcstate = 0;
50 int i;
51
52 if (!boot_cpu_has(X86_FEATURE_MWAIT))
53 return;
54 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
55 return;
56
57 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
58
59 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
60 !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
61 return;
62
63 edx >>= MWAIT_SUBSTATE_SIZE;
64 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
65 if (edx & MWAIT_SUBSTATE_MASK) {
66 highest_cstate = i;
67 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
68 }
69 }
70 power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
71 (highest_subcstate - 1);
72
73 for_each_online_cpu(i)
74 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &i);
75
76#if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86)
77 switch (boot_cpu_data.x86_vendor) {
78 case X86_VENDOR_AMD:
79 case X86_VENDOR_INTEL:
80 /*
81 * AMD Fam10h TSC will tick in all
82 * C/P/S0/S1 states when this bit is set.
83 */
84 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
85 return;
86
87 /*FALL THROUGH*/
88 default:
89 /* TSC could halt in idle, so notify users */
90 mark_tsc_unstable("TSC halts in idle");
91 }
92#endif
93}
94
95static unsigned long cpu_weight[NR_CPUS];
96static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
97static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS);
98static void round_robin_cpu(unsigned int tsk_index)
99{
100 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
101 cpumask_var_t tmp;
102 int cpu;
103 unsigned long min_weight = -1, preferred_cpu;
104
105 if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
106 return;
107
108 mutex_lock(&isolated_cpus_lock);
109 cpumask_clear(tmp);
110 for_each_cpu(cpu, pad_busy_cpus)
111 cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
112 cpumask_andnot(tmp, cpu_online_mask, tmp);
113 /* avoid HT sibilings if possible */
114 if (cpumask_empty(tmp))
115 cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
116 if (cpumask_empty(tmp)) {
117 mutex_unlock(&isolated_cpus_lock);
118 return;
119 }
120 for_each_cpu(cpu, tmp) {
121 if (cpu_weight[cpu] < min_weight) {
122 min_weight = cpu_weight[cpu];
123 preferred_cpu = cpu;
124 }
125 }
126
127 if (tsk_in_cpu[tsk_index] != -1)
128 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
129 tsk_in_cpu[tsk_index] = preferred_cpu;
130 cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
131 cpu_weight[preferred_cpu]++;
132 mutex_unlock(&isolated_cpus_lock);
133
134 set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
135}
136
137static void exit_round_robin(unsigned int tsk_index)
138{
139 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
140 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
141 tsk_in_cpu[tsk_index] = -1;
142}
143
144static unsigned int idle_pct = 5; /* percentage */
145static unsigned int round_robin_time = 10; /* second */
146static int power_saving_thread(void *data)
147{
148 struct sched_param param = {.sched_priority = 1};
149 int do_sleep;
150 unsigned int tsk_index = (unsigned long)data;
151 u64 last_jiffies = 0;
152
153 sched_setscheduler(current, SCHED_RR, &param);
154
155 while (!kthread_should_stop()) {
156 int cpu;
157 u64 expire_time;
158
159 try_to_freeze();
160
161 /* round robin to cpus */
162 if (last_jiffies + round_robin_time * HZ < jiffies) {
163 last_jiffies = jiffies;
164 round_robin_cpu(tsk_index);
165 }
166
167 do_sleep = 0;
168
169 current_thread_info()->status &= ~TS_POLLING;
170 /*
171 * TS_POLLING-cleared state must be visible before we test
172 * NEED_RESCHED:
173 */
174 smp_mb();
175
176 expire_time = jiffies + HZ * (100 - idle_pct) / 100;
177
178 while (!need_resched()) {
179 local_irq_disable();
180 cpu = smp_processor_id();
181 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
182 &cpu);
183 stop_critical_timings();
184
185 __monitor((void *)&current_thread_info()->flags, 0, 0);
186 smp_mb();
187 if (!need_resched())
188 __mwait(power_saving_mwait_eax, 1);
189
190 start_critical_timings();
191 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
192 &cpu);
193 local_irq_enable();
194
195 if (jiffies > expire_time) {
196 do_sleep = 1;
197 break;
198 }
199 }
200
201 current_thread_info()->status |= TS_POLLING;
202
203 /*
204 * current sched_rt has threshold for rt task running time.
205 * When a rt task uses 95% CPU time, the rt thread will be
206 * scheduled out for 5% CPU time to not starve other tasks. But
207 * the mechanism only works when all CPUs have RT task running,
208 * as if one CPU hasn't RT task, RT task from other CPUs will
209 * borrow CPU time from this CPU and cause RT task use > 95%
210 * CPU time. To make 'avoid staration' work, takes a nap here.
211 */
212 if (do_sleep)
213 schedule_timeout_killable(HZ * idle_pct / 100);
214 }
215
216 exit_round_robin(tsk_index);
217 return 0;
218}
219
220static struct task_struct *ps_tsks[NR_CPUS];
221static unsigned int ps_tsk_num;
222static int create_power_saving_task(void)
223{
224 ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
225 (void *)(unsigned long)ps_tsk_num,
226 "power_saving/%d", ps_tsk_num);
227 if (ps_tsks[ps_tsk_num]) {
228 ps_tsk_num++;
229 return 0;
230 }
231 return -EINVAL;
232}
233
234static void destroy_power_saving_task(void)
235{
236 if (ps_tsk_num > 0) {
237 ps_tsk_num--;
238 kthread_stop(ps_tsks[ps_tsk_num]);
239 }
240}
241
242static void set_power_saving_task_num(unsigned int num)
243{
244 if (num > ps_tsk_num) {
245 while (ps_tsk_num < num) {
246 if (create_power_saving_task())
247 return;
248 }
249 } else if (num < ps_tsk_num) {
250 while (ps_tsk_num > num)
251 destroy_power_saving_task();
252 }
253}
254
255static int acpi_pad_idle_cpus(unsigned int num_cpus)
256{
257 get_online_cpus();
258
259 num_cpus = min_t(unsigned int, num_cpus, num_online_cpus());
260 set_power_saving_task_num(num_cpus);
261
262 put_online_cpus();
263 return 0;
264}
265
266static uint32_t acpi_pad_idle_cpus_num(void)
267{
268 return ps_tsk_num;
269}
270
271static ssize_t acpi_pad_rrtime_store(struct device *dev,
272 struct device_attribute *attr, const char *buf, size_t count)
273{
274 unsigned long num;
275 if (strict_strtoul(buf, 0, &num))
276 return -EINVAL;
277 if (num < 1 || num >= 100)
278 return -EINVAL;
279 mutex_lock(&isolated_cpus_lock);
280 round_robin_time = num;
281 mutex_unlock(&isolated_cpus_lock);
282 return count;
283}
284
285static ssize_t acpi_pad_rrtime_show(struct device *dev,
286 struct device_attribute *attr, char *buf)
287{
288 return scnprintf(buf, PAGE_SIZE, "%d", round_robin_time);
289}
290static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR,
291 acpi_pad_rrtime_show,
292 acpi_pad_rrtime_store);
293
294static ssize_t acpi_pad_idlepct_store(struct device *dev,
295 struct device_attribute *attr, const char *buf, size_t count)
296{
297 unsigned long num;
298 if (strict_strtoul(buf, 0, &num))
299 return -EINVAL;
300 if (num < 1 || num >= 100)
301 return -EINVAL;
302 mutex_lock(&isolated_cpus_lock);
303 idle_pct = num;
304 mutex_unlock(&isolated_cpus_lock);
305 return count;
306}
307
308static ssize_t acpi_pad_idlepct_show(struct device *dev,
309 struct device_attribute *attr, char *buf)
310{
311 return scnprintf(buf, PAGE_SIZE, "%d", idle_pct);
312}
313static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR,
314 acpi_pad_idlepct_show,
315 acpi_pad_idlepct_store);
316
317static ssize_t acpi_pad_idlecpus_store(struct device *dev,
318 struct device_attribute *attr, const char *buf, size_t count)
319{
320 unsigned long num;
321 if (strict_strtoul(buf, 0, &num))
322 return -EINVAL;
323 mutex_lock(&isolated_cpus_lock);
324 acpi_pad_idle_cpus(num);
325 mutex_unlock(&isolated_cpus_lock);
326 return count;
327}
328
329static ssize_t acpi_pad_idlecpus_show(struct device *dev,
330 struct device_attribute *attr, char *buf)
331{
332 return cpumask_scnprintf(buf, PAGE_SIZE,
333 to_cpumask(pad_busy_cpus_bits));
334}
335static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR,
336 acpi_pad_idlecpus_show,
337 acpi_pad_idlecpus_store);
338
339static int acpi_pad_add_sysfs(struct acpi_device *device)
340{
341 int result;
342
343 result = device_create_file(&device->dev, &dev_attr_idlecpus);
344 if (result)
345 return -ENODEV;
346 result = device_create_file(&device->dev, &dev_attr_idlepct);
347 if (result) {
348 device_remove_file(&device->dev, &dev_attr_idlecpus);
349 return -ENODEV;
350 }
351 result = device_create_file(&device->dev, &dev_attr_rrtime);
352 if (result) {
353 device_remove_file(&device->dev, &dev_attr_idlecpus);
354 device_remove_file(&device->dev, &dev_attr_idlepct);
355 return -ENODEV;
356 }
357 return 0;
358}
359
360static void acpi_pad_remove_sysfs(struct acpi_device *device)
361{
362 device_remove_file(&device->dev, &dev_attr_idlecpus);
363 device_remove_file(&device->dev, &dev_attr_idlepct);
364 device_remove_file(&device->dev, &dev_attr_rrtime);
365}
366
367/* Query firmware how many CPUs should be idle */
368static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
369{
370 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
371 acpi_status status;
372 union acpi_object *package;
373 int rev, num, ret = -EINVAL;
374
375 status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer);
376 if (ACPI_FAILURE(status))
377 return -EINVAL;
378 package = buffer.pointer;
379 if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
380 goto out;
381 rev = package->package.elements[0].integer.value;
382 num = package->package.elements[1].integer.value;
383 if (rev != 1)
384 goto out;
385 *num_cpus = num;
386 ret = 0;
387out:
388 kfree(buffer.pointer);
389 return ret;
390}
391
392/* Notify firmware how many CPUs are idle */
393static void acpi_pad_ost(acpi_handle handle, int stat,
394 uint32_t idle_cpus)
395{
396 union acpi_object params[3] = {
397 {.type = ACPI_TYPE_INTEGER,},
398 {.type = ACPI_TYPE_INTEGER,},
399 {.type = ACPI_TYPE_BUFFER,},
400 };
401 struct acpi_object_list arg_list = {3, params};
402
403 params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
404 params[1].integer.value = stat;
405 params[2].buffer.length = 4;
406 params[2].buffer.pointer = (void *)&idle_cpus;
407 acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
408}
409
410static void acpi_pad_handle_notify(acpi_handle handle)
411{
412 int num_cpus, ret;
413 uint32_t idle_cpus;
414
415 mutex_lock(&isolated_cpus_lock);
416 if (acpi_pad_pur(handle, &num_cpus)) {
417 mutex_unlock(&isolated_cpus_lock);
418 return;
419 }
420 ret = acpi_pad_idle_cpus(num_cpus);
421 idle_cpus = acpi_pad_idle_cpus_num();
422 if (!ret)
423 acpi_pad_ost(handle, 0, idle_cpus);
424 else
425 acpi_pad_ost(handle, 1, 0);
426 mutex_unlock(&isolated_cpus_lock);
427}
428
429static void acpi_pad_notify(acpi_handle handle, u32 event,
430 void *data)
431{
432 struct acpi_device *device = data;
433
434 switch (event) {
435 case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
436 acpi_pad_handle_notify(handle);
437 acpi_bus_generate_proc_event(device, event, 0);
438 acpi_bus_generate_netlink_event(device->pnp.device_class,
439 dev_name(&device->dev), event, 0);
440 break;
441 default:
442 printk(KERN_WARNING"Unsupported event [0x%x]\n", event);
443 break;
444 }
445}
446
447static int acpi_pad_add(struct acpi_device *device)
448{
449 acpi_status status;
450
451 strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
452 strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS);
453
454 if (acpi_pad_add_sysfs(device))
455 return -ENODEV;
456
457 status = acpi_install_notify_handler(device->handle,
458 ACPI_DEVICE_NOTIFY, acpi_pad_notify, device);
459 if (ACPI_FAILURE(status)) {
460 acpi_pad_remove_sysfs(device);
461 return -ENODEV;
462 }
463
464 return 0;
465}
466
467static int acpi_pad_remove(struct acpi_device *device,
468 int type)
469{
470 mutex_lock(&isolated_cpus_lock);
471 acpi_pad_idle_cpus(0);
472 mutex_unlock(&isolated_cpus_lock);
473
474 acpi_remove_notify_handler(device->handle,
475 ACPI_DEVICE_NOTIFY, acpi_pad_notify);
476 acpi_pad_remove_sysfs(device);
477 return 0;
478}
479
480static const struct acpi_device_id pad_device_ids[] = {
481 {"ACPI000C", 0},
482 {"", 0},
483};
484MODULE_DEVICE_TABLE(acpi, pad_device_ids);
485
486static struct acpi_driver acpi_pad_driver = {
487 .name = "processor_aggregator",
488 .class = ACPI_PROCESSOR_AGGREGATOR_CLASS,
489 .ids = pad_device_ids,
490 .ops = {
491 .add = acpi_pad_add,
492 .remove = acpi_pad_remove,
493 },
494};
495
496static int __init acpi_pad_init(void)
497{
498 power_saving_mwait_init();
499 if (power_saving_mwait_eax == 0)
500 return -EINVAL;
501
502 return acpi_bus_register_driver(&acpi_pad_driver);
503}
504
505static void __exit acpi_pad_exit(void)
506{
507 acpi_bus_unregister_driver(&acpi_pad_driver);
508}
509
510module_init(acpi_pad_init);
511module_exit(acpi_pad_exit);
512MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>");
513MODULE_DESCRIPTION("ACPI Processor Aggregator Driver");
514MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 3a2cfefc71ab..7338b6a3e049 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -67,7 +67,7 @@ struct dock_station {
67 struct list_head dependent_devices; 67 struct list_head dependent_devices;
68 struct list_head hotplug_devices; 68 struct list_head hotplug_devices;
69 69
70 struct list_head sibiling; 70 struct list_head sibling;
71 struct platform_device *dock_device; 71 struct platform_device *dock_device;
72}; 72};
73static LIST_HEAD(dock_stations); 73static LIST_HEAD(dock_stations);
@@ -275,7 +275,7 @@ int is_dock_device(acpi_handle handle)
275 275
276 if (is_dock(handle)) 276 if (is_dock(handle))
277 return 1; 277 return 1;
278 list_for_each_entry(dock_station, &dock_stations, sibiling) { 278 list_for_each_entry(dock_station, &dock_stations, sibling) {
279 if (find_dock_dependent_device(dock_station, handle)) 279 if (find_dock_dependent_device(dock_station, handle))
280 return 1; 280 return 1;
281 } 281 }
@@ -619,7 +619,7 @@ register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
619 * make sure this handle is for a device dependent on the dock, 619 * make sure this handle is for a device dependent on the dock,
620 * this would include the dock station itself 620 * this would include the dock station itself
621 */ 621 */
622 list_for_each_entry(dock_station, &dock_stations, sibiling) { 622 list_for_each_entry(dock_station, &dock_stations, sibling) {
623 /* 623 /*
624 * An ATA bay can be in a dock and itself can be ejected 624 * An ATA bay can be in a dock and itself can be ejected
625 * seperately, so there are two 'dock stations' which need the 625 * seperately, so there are two 'dock stations' which need the
@@ -651,7 +651,7 @@ void unregister_hotplug_dock_device(acpi_handle handle)
651 if (!dock_station_count) 651 if (!dock_station_count)
652 return; 652 return;
653 653
654 list_for_each_entry(dock_station, &dock_stations, sibiling) { 654 list_for_each_entry(dock_station, &dock_stations, sibling) {
655 dd = find_dock_dependent_device(dock_station, handle); 655 dd = find_dock_dependent_device(dock_station, handle);
656 if (dd) 656 if (dd)
657 dock_del_hotplug_device(dock_station, dd); 657 dock_del_hotplug_device(dock_station, dd);
@@ -787,7 +787,7 @@ static int acpi_dock_notifier_call(struct notifier_block *this,
787 if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK 787 if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
788 && event != ACPI_NOTIFY_EJECT_REQUEST) 788 && event != ACPI_NOTIFY_EJECT_REQUEST)
789 return 0; 789 return 0;
790 list_for_each_entry(dock_station, &dock_stations, sibiling) { 790 list_for_each_entry(dock_station, &dock_stations, sibling) {
791 if (dock_station->handle == handle) { 791 if (dock_station->handle == handle) {
792 struct dock_data *dock_data; 792 struct dock_data *dock_data;
793 793
@@ -958,7 +958,7 @@ static int dock_add(acpi_handle handle)
958 dock_station->last_dock_time = jiffies - HZ; 958 dock_station->last_dock_time = jiffies - HZ;
959 INIT_LIST_HEAD(&dock_station->dependent_devices); 959 INIT_LIST_HEAD(&dock_station->dependent_devices);
960 INIT_LIST_HEAD(&dock_station->hotplug_devices); 960 INIT_LIST_HEAD(&dock_station->hotplug_devices);
961 INIT_LIST_HEAD(&dock_station->sibiling); 961 INIT_LIST_HEAD(&dock_station->sibling);
962 spin_lock_init(&dock_station->dd_lock); 962 spin_lock_init(&dock_station->dd_lock);
963 mutex_init(&dock_station->hp_lock); 963 mutex_init(&dock_station->hp_lock);
964 ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list); 964 ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
@@ -1044,7 +1044,7 @@ static int dock_add(acpi_handle handle)
1044 add_dock_dependent_device(dock_station, dd); 1044 add_dock_dependent_device(dock_station, dd);
1045 1045
1046 dock_station_count++; 1046 dock_station_count++;
1047 list_add(&dock_station->sibiling, &dock_stations); 1047 list_add(&dock_station->sibling, &dock_stations);
1048 return 0; 1048 return 0;
1049 1049
1050dock_add_err_unregister: 1050dock_add_err_unregister:
@@ -1149,7 +1149,7 @@ static void __exit dock_exit(void)
1149 struct dock_station *tmp; 1149 struct dock_station *tmp;
1150 1150
1151 unregister_acpi_bus_notifier(&dock_acpi_notifier); 1151 unregister_acpi_bus_notifier(&dock_acpi_notifier);
1152 list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibiling) 1152 list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling)
1153 dock_remove(dock_station); 1153 dock_remove(dock_station);
1154} 1154}
1155 1155
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f70796081c4c..baef28c1e630 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -119,6 +119,8 @@ static struct acpi_ec {
119} *boot_ec, *first_ec; 119} *boot_ec, *first_ec;
120 120
121static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ 121static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
122static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
123static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
122 124
123/* -------------------------------------------------------------------------- 125/* --------------------------------------------------------------------------
124 Transaction Management 126 Transaction Management
@@ -232,10 +234,8 @@ static int ec_poll(struct acpi_ec *ec)
232 } 234 }
233 advance_transaction(ec, acpi_ec_read_status(ec)); 235 advance_transaction(ec, acpi_ec_read_status(ec));
234 } while (time_before(jiffies, delay)); 236 } while (time_before(jiffies, delay));
235 if (!ec->curr->irq_count || 237 if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
236 (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF))
237 break; 238 break;
238 /* try restart command if we get any false interrupts */
239 pr_debug(PREFIX "controller reset, restart transaction\n"); 239 pr_debug(PREFIX "controller reset, restart transaction\n");
240 spin_lock_irqsave(&ec->curr_lock, flags); 240 spin_lock_irqsave(&ec->curr_lock, flags);
241 start_transaction(ec); 241 start_transaction(ec);
@@ -899,6 +899,44 @@ static const struct acpi_device_id ec_device_ids[] = {
899 {"", 0}, 899 {"", 0},
900}; 900};
901 901
902/* Some BIOS do not survive early DSDT scan, skip it */
903static int ec_skip_dsdt_scan(const struct dmi_system_id *id)
904{
905 EC_FLAGS_SKIP_DSDT_SCAN = 1;
906 return 0;
907}
908
909/* ASUStek often supplies us with broken ECDT, validate it */
910static int ec_validate_ecdt(const struct dmi_system_id *id)
911{
912 EC_FLAGS_VALIDATE_ECDT = 1;
913 return 0;
914}
915
916/* MSI EC needs special treatment, enable it */
917static int ec_flag_msi(const struct dmi_system_id *id)
918{
919 EC_FLAGS_MSI = 1;
920 EC_FLAGS_VALIDATE_ECDT = 1;
921 return 0;
922}
923
924static struct dmi_system_id __initdata ec_dmi_table[] = {
925 {
926 ec_skip_dsdt_scan, "Compal JFL92", {
927 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
928 DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
929 {
930 ec_flag_msi, "MSI hardware", {
931 DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"),
932 DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL},
933 {
934 ec_validate_ecdt, "ASUS hardware", {
935 DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
936 {},
937};
938
939
902int __init acpi_ec_ecdt_probe(void) 940int __init acpi_ec_ecdt_probe(void)
903{ 941{
904 acpi_status status; 942 acpi_status status;
@@ -911,11 +949,7 @@ int __init acpi_ec_ecdt_probe(void)
911 /* 949 /*
912 * Generate a boot ec context 950 * Generate a boot ec context
913 */ 951 */
914 if (dmi_name_in_vendors("Micro-Star") || 952 dmi_check_system(ec_dmi_table);
915 dmi_name_in_vendors("Notebook")) {
916 pr_info(PREFIX "Enabling special treatment for EC from MSI.\n");
917 EC_FLAGS_MSI = 1;
918 }
919 status = acpi_get_table(ACPI_SIG_ECDT, 1, 953 status = acpi_get_table(ACPI_SIG_ECDT, 1,
920 (struct acpi_table_header **)&ecdt_ptr); 954 (struct acpi_table_header **)&ecdt_ptr);
921 if (ACPI_SUCCESS(status)) { 955 if (ACPI_SUCCESS(status)) {
@@ -926,7 +960,7 @@ int __init acpi_ec_ecdt_probe(void)
926 boot_ec->handle = ACPI_ROOT_OBJECT; 960 boot_ec->handle = ACPI_ROOT_OBJECT;
927 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); 961 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
928 /* Don't trust ECDT, which comes from ASUSTek */ 962 /* Don't trust ECDT, which comes from ASUSTek */
929 if (!dmi_name_in_vendors("ASUS") && EC_FLAGS_MSI == 0) 963 if (!EC_FLAGS_VALIDATE_ECDT)
930 goto install; 964 goto install;
931 saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 965 saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL);
932 if (!saved_ec) 966 if (!saved_ec)
@@ -934,6 +968,10 @@ int __init acpi_ec_ecdt_probe(void)
934 memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec)); 968 memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec));
935 /* fall through */ 969 /* fall through */
936 } 970 }
971
972 if (EC_FLAGS_SKIP_DSDT_SCAN)
973 return -ENODEV;
974
937 /* This workaround is needed only on some broken machines, 975 /* This workaround is needed only on some broken machines,
938 * which require early EC, but fail to provide ECDT */ 976 * which require early EC, but fail to provide ECDT */
939 printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); 977 printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index d0d550d22a6d..f8b6f555ba52 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -398,6 +398,8 @@ acpi_system_write_wakeup_device(struct file *file,
398 398
399 if (len > 4) 399 if (len > 4)
400 len = 4; 400 len = 4;
401 if (len < 0)
402 return -EFAULT;
401 403
402 if (copy_from_user(strbuf, buffer, len)) 404 if (copy_from_user(strbuf, buffer, len))
403 return -EFAULT; 405 return -EFAULT;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index c2d4d6e09364..c567b46dfa0f 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -863,13 +863,6 @@ static int acpi_processor_add(struct acpi_device *device)
863 goto err_remove_sysfs; 863 goto err_remove_sysfs;
864 } 864 }
865 865
866 if (pr->flags.throttling) {
867 printk(KERN_INFO PREFIX "%s [%s] (supports",
868 acpi_device_name(device), acpi_device_bid(device));
869 printk(" %d throttling states", pr->throttling.state_count);
870 printk(")\n");
871 }
872
873 return 0; 866 return 0;
874 867
875err_remove_sysfs: 868err_remove_sysfs:
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 468921bed22f..14a7481c97d7 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1052,6 +1052,8 @@ static void acpi_device_set_id(struct acpi_device *device)
1052 device->flags.bus_address = 1; 1052 device->flags.bus_address = 1;
1053 } 1053 }
1054 1054
1055 kfree(info);
1056
1055 /* 1057 /*
1056 * Some devices don't reliably have _HIDs & _CIDs, so add 1058 * Some devices don't reliably have _HIDs & _CIDs, so add
1057 * synthetic HIDs to make sure drivers can find them. 1059 * synthetic HIDs to make sure drivers can find them.
@@ -1325,13 +1327,8 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
1325 struct acpi_device **child) 1327 struct acpi_device **child)
1326{ 1328{
1327 acpi_status status; 1329 acpi_status status;
1328 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1329 void *device = NULL; 1330 void *device = NULL;
1330 1331
1331 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
1332 printk(KERN_INFO PREFIX "Enumerating devices from [%s]\n",
1333 (char *) buffer.pointer);
1334
1335 status = acpi_bus_check_add(handle, 0, ops, &device); 1332 status = acpi_bus_check_add(handle, 0, ops, &device);
1336 if (ACPI_SUCCESS(status)) 1333 if (ACPI_SUCCESS(status))
1337 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, 1334 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index a4fddb24476f..f6e54bf8dd96 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -285,7 +285,7 @@ static int acpi_video_device_brightness_open_fs(struct inode *inode,
285 struct file *file); 285 struct file *file);
286static ssize_t acpi_video_device_write_brightness(struct file *file, 286static ssize_t acpi_video_device_write_brightness(struct file *file,
287 const char __user *buffer, size_t count, loff_t *data); 287 const char __user *buffer, size_t count, loff_t *data);
288static struct file_operations acpi_video_device_brightness_fops = { 288static const struct file_operations acpi_video_device_brightness_fops = {
289 .owner = THIS_MODULE, 289 .owner = THIS_MODULE,
290 .open = acpi_video_device_brightness_open_fs, 290 .open = acpi_video_device_brightness_open_fs,
291 .read = seq_read, 291 .read = seq_read,
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 703364b52170..66e181345b3a 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1306,14 +1306,6 @@ static void amb_close (struct atm_vcc * atm_vcc) {
1306 return; 1306 return;
1307} 1307}
1308 1308
1309/********** Set socket options for a VC **********/
1310
1311// int amb_getsockopt (struct atm_vcc * atm_vcc, int level, int optname, void * optval, int optlen);
1312
1313/********** Set socket options for a VC **********/
1314
1315// int amb_setsockopt (struct atm_vcc * atm_vcc, int level, int optname, void * optval, int optlen);
1316
1317/********** Send **********/ 1309/********** Send **********/
1318 1310
1319static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) { 1311static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 5503bfc8e132..0c3026145443 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -2031,7 +2031,7 @@ static int eni_getsockopt(struct atm_vcc *vcc,int level,int optname,
2031 2031
2032 2032
2033static int eni_setsockopt(struct atm_vcc *vcc,int level,int optname, 2033static int eni_setsockopt(struct atm_vcc *vcc,int level,int optname,
2034 void __user *optval,int optlen) 2034 void __user *optval,unsigned int optlen)
2035{ 2035{
2036 return -EINVAL; 2036 return -EINVAL;
2037} 2037}
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index b119640e1ee9..cd5049af47a9 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1244,7 +1244,7 @@ static int fs_getsockopt(struct atm_vcc *vcc,int level,int optname,
1244 1244
1245 1245
1246static int fs_setsockopt(struct atm_vcc *vcc,int level,int optname, 1246static int fs_setsockopt(struct atm_vcc *vcc,int level,int optname,
1247 void __user *optval,int optlen) 1247 void __user *optval,unsigned int optlen)
1248{ 1248{
1249 func_enter (); 1249 func_enter ();
1250 func_exit (); 1250 func_exit ();
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 10f000dbe448..f766cc46b4c4 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -1795,7 +1795,7 @@ fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *op
1795 1795
1796 1796
1797static int 1797static int
1798fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen) 1798fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen)
1799{ 1799{
1800 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ 1800 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1801 1801
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 01ce241dbeae..4e49021e67ee 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2590,7 +2590,7 @@ static int hrz_getsockopt (struct atm_vcc * atm_vcc, int level, int optname,
2590} 2590}
2591 2591
2592static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname, 2592static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname,
2593 void *optval, int optlen) { 2593 void *optval, unsigned int optlen) {
2594 hrz_dev * dev = HRZ_DEV(atm_vcc->dev); 2594 hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
2595 PRINTD (DBG_FLOW|DBG_VCC, "hrz_setsockopt"); 2595 PRINTD (DBG_FLOW|DBG_VCC, "hrz_setsockopt");
2596 switch (level) { 2596 switch (level) {
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 78c9736c3579..b2c1b37ab2e4 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2862,7 +2862,7 @@ static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
2862} 2862}
2863 2863
2864static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname, 2864static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
2865 void __user *optval, int optlen) 2865 void __user *optval, unsigned int optlen)
2866{ 2866{
2867 IF_EVENT(printk(">ia_setsockopt\n");) 2867 IF_EVENT(printk(">ia_setsockopt\n");)
2868 return -EINVAL; 2868 return -EINVAL;
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 752b1ba81f7e..2e9635be048c 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1517,7 +1517,7 @@ static int zatm_getsockopt(struct atm_vcc *vcc,int level,int optname,
1517 1517
1518 1518
1519static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname, 1519static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname,
1520 void __user *optval,int optlen) 1520 void __user *optval,unsigned int optlen)
1521{ 1521{
1522 return -EINVAL; 1522 return -EINVAL;
1523} 1523}
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 6fa7b0fdbdfd..eb4fa1943944 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -38,6 +38,7 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/smp_lock.h> 39#include <linux/smp_lock.h>
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
41#include <linux/reboot.h> 42#include <linux/reboot.h>
42#include <linux/spinlock.h> 43#include <linux/spinlock.h>
43#include <linux/timer.h> 44#include <linux/timer.h>
@@ -6422,16 +6423,10 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller,
6422 return true; 6423 return true;
6423} 6424}
6424 6425
6425 6426static int dac960_proc_show(struct seq_file *m, void *v)
6426/*
6427 DAC960_ProcReadStatus implements reading /proc/rd/status.
6428*/
6429
6430static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset,
6431 int Count, int *EOF, void *Data)
6432{ 6427{
6433 unsigned char *StatusMessage = "OK\n"; 6428 unsigned char *StatusMessage = "OK\n";
6434 int ControllerNumber, BytesAvailable; 6429 int ControllerNumber;
6435 for (ControllerNumber = 0; 6430 for (ControllerNumber = 0;
6436 ControllerNumber < DAC960_ControllerCount; 6431 ControllerNumber < DAC960_ControllerCount;
6437 ControllerNumber++) 6432 ControllerNumber++)
@@ -6444,52 +6439,49 @@ static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset,
6444 break; 6439 break;
6445 } 6440 }
6446 } 6441 }
6447 BytesAvailable = strlen(StatusMessage) - Offset; 6442 seq_puts(m, StatusMessage);
6448 if (Count >= BytesAvailable) 6443 return 0;
6449 {
6450 Count = BytesAvailable;
6451 *EOF = true;
6452 }
6453 if (Count <= 0) return 0;
6454 *Start = Page;
6455 memcpy(Page, &StatusMessage[Offset], Count);
6456 return Count;
6457} 6444}
6458 6445
6446static int dac960_proc_open(struct inode *inode, struct file *file)
6447{
6448 return single_open(file, dac960_proc_show, NULL);
6449}
6459 6450
6460/* 6451static const struct file_operations dac960_proc_fops = {
6461 DAC960_ProcReadInitialStatus implements reading /proc/rd/cN/initial_status. 6452 .owner = THIS_MODULE,
6462*/ 6453 .open = dac960_proc_open,
6454 .read = seq_read,
6455 .llseek = seq_lseek,
6456 .release = single_release,
6457};
6463 6458
6464static int DAC960_ProcReadInitialStatus(char *Page, char **Start, off_t Offset, 6459static int dac960_initial_status_proc_show(struct seq_file *m, void *v)
6465 int Count, int *EOF, void *Data)
6466{ 6460{
6467 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6461 DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
6468 int BytesAvailable = Controller->InitialStatusLength - Offset; 6462 seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer);
6469 if (Count >= BytesAvailable) 6463 return 0;
6470 {
6471 Count = BytesAvailable;
6472 *EOF = true;
6473 }
6474 if (Count <= 0) return 0;
6475 *Start = Page;
6476 memcpy(Page, &Controller->CombinedStatusBuffer[Offset], Count);
6477 return Count;
6478} 6464}
6479 6465
6466static int dac960_initial_status_proc_open(struct inode *inode, struct file *file)
6467{
6468 return single_open(file, dac960_initial_status_proc_show, PDE(inode)->data);
6469}
6480 6470
6481/* 6471static const struct file_operations dac960_initial_status_proc_fops = {
6482 DAC960_ProcReadCurrentStatus implements reading /proc/rd/cN/current_status. 6472 .owner = THIS_MODULE,
6483*/ 6473 .open = dac960_initial_status_proc_open,
6474 .read = seq_read,
6475 .llseek = seq_lseek,
6476 .release = single_release,
6477};
6484 6478
6485static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset, 6479static int dac960_current_status_proc_show(struct seq_file *m, void *v)
6486 int Count, int *EOF, void *Data)
6487{ 6480{
6488 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6481 DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private;
6489 unsigned char *StatusMessage = 6482 unsigned char *StatusMessage =
6490 "No Rebuild or Consistency Check in Progress\n"; 6483 "No Rebuild or Consistency Check in Progress\n";
6491 int ProgressMessageLength = strlen(StatusMessage); 6484 int ProgressMessageLength = strlen(StatusMessage);
6492 int BytesAvailable;
6493 if (jiffies != Controller->LastCurrentStatusTime) 6485 if (jiffies != Controller->LastCurrentStatusTime)
6494 { 6486 {
6495 Controller->CurrentStatusLength = 0; 6487 Controller->CurrentStatusLength = 0;
@@ -6513,49 +6505,41 @@ static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset,
6513 } 6505 }
6514 Controller->LastCurrentStatusTime = jiffies; 6506 Controller->LastCurrentStatusTime = jiffies;
6515 } 6507 }
6516 BytesAvailable = Controller->CurrentStatusLength - Offset; 6508 seq_printf(m, "%.*s", Controller->CurrentStatusLength, Controller->CurrentStatusBuffer);
6517 if (Count >= BytesAvailable) 6509 return 0;
6518 {
6519 Count = BytesAvailable;
6520 *EOF = true;
6521 }
6522 if (Count <= 0) return 0;
6523 *Start = Page;
6524 memcpy(Page, &Controller->CurrentStatusBuffer[Offset], Count);
6525 return Count;
6526} 6510}
6527 6511
6512static int dac960_current_status_proc_open(struct inode *inode, struct file *file)
6513{
6514 return single_open(file, dac960_current_status_proc_show, PDE(inode)->data);
6515}
6528 6516
6529/* 6517static const struct file_operations dac960_current_status_proc_fops = {
6530 DAC960_ProcReadUserCommand implements reading /proc/rd/cN/user_command. 6518 .owner = THIS_MODULE,
6531*/ 6519 .open = dac960_current_status_proc_open,
6520 .read = seq_read,
6521 .llseek = seq_lseek,
6522 .release = single_release,
6523};
6532 6524
6533static int DAC960_ProcReadUserCommand(char *Page, char **Start, off_t Offset, 6525static int dac960_user_command_proc_show(struct seq_file *m, void *v)
6534 int Count, int *EOF, void *Data)
6535{ 6526{
6536 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6527 DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
6537 int BytesAvailable = Controller->UserStatusLength - Offset;
6538 if (Count >= BytesAvailable)
6539 {
6540 Count = BytesAvailable;
6541 *EOF = true;
6542 }
6543 if (Count <= 0) return 0;
6544 *Start = Page;
6545 memcpy(Page, &Controller->UserStatusBuffer[Offset], Count);
6546 return Count;
6547}
6548 6528
6529 seq_printf(m, "%.*s", Controller->UserStatusLength, Controller->UserStatusBuffer);
6530 return 0;
6531}
6549 6532
6550/* 6533static int dac960_user_command_proc_open(struct inode *inode, struct file *file)
6551 DAC960_ProcWriteUserCommand implements writing /proc/rd/cN/user_command. 6534{
6552*/ 6535 return single_open(file, dac960_user_command_proc_show, PDE(inode)->data);
6536}
6553 6537
6554static int DAC960_ProcWriteUserCommand(struct file *file, 6538static ssize_t dac960_user_command_proc_write(struct file *file,
6555 const char __user *Buffer, 6539 const char __user *Buffer,
6556 unsigned long Count, void *Data) 6540 size_t Count, loff_t *pos)
6557{ 6541{
6558 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6542 DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file->f_path.dentry->d_inode)->data;
6559 unsigned char CommandBuffer[80]; 6543 unsigned char CommandBuffer[80];
6560 int Length; 6544 int Length;
6561 if (Count > sizeof(CommandBuffer)-1) return -EINVAL; 6545 if (Count > sizeof(CommandBuffer)-1) return -EINVAL;
@@ -6572,6 +6556,14 @@ static int DAC960_ProcWriteUserCommand(struct file *file,
6572 ? Count : -EBUSY); 6556 ? Count : -EBUSY);
6573} 6557}
6574 6558
6559static const struct file_operations dac960_user_command_proc_fops = {
6560 .owner = THIS_MODULE,
6561 .open = dac960_user_command_proc_open,
6562 .read = seq_read,
6563 .llseek = seq_lseek,
6564 .release = single_release,
6565 .write = dac960_user_command_proc_write,
6566};
6575 6567
6576/* 6568/*
6577 DAC960_CreateProcEntries creates the /proc/rd/... entries for the 6569 DAC960_CreateProcEntries creates the /proc/rd/... entries for the
@@ -6586,23 +6578,17 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
6586 6578
6587 if (DAC960_ProcDirectoryEntry == NULL) { 6579 if (DAC960_ProcDirectoryEntry == NULL) {
6588 DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL); 6580 DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
6589 StatusProcEntry = create_proc_read_entry("status", 0, 6581 StatusProcEntry = proc_create("status", 0,
6590 DAC960_ProcDirectoryEntry, 6582 DAC960_ProcDirectoryEntry,
6591 DAC960_ProcReadStatus, NULL); 6583 &dac960_proc_fops);
6592 } 6584 }
6593 6585
6594 sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); 6586 sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
6595 ControllerProcEntry = proc_mkdir(Controller->ControllerName, 6587 ControllerProcEntry = proc_mkdir(Controller->ControllerName,
6596 DAC960_ProcDirectoryEntry); 6588 DAC960_ProcDirectoryEntry);
6597 create_proc_read_entry("initial_status", 0, ControllerProcEntry, 6589 proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
6598 DAC960_ProcReadInitialStatus, Controller); 6590 proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller);
6599 create_proc_read_entry("current_status", 0, ControllerProcEntry, 6591 UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
6600 DAC960_ProcReadCurrentStatus, Controller);
6601 UserCommandProcEntry =
6602 create_proc_read_entry("user_command", S_IWUSR | S_IRUSR,
6603 ControllerProcEntry, DAC960_ProcReadUserCommand,
6604 Controller);
6605 UserCommandProcEntry->write_proc = DAC960_ProcWriteUserCommand;
6606 Controller->ControllerProcEntry = ControllerProcEntry; 6592 Controller->ControllerProcEntry = ControllerProcEntry;
6607} 6593}
6608 6594
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 24c3e21ab263..fb5be2d95d52 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -36,9 +36,11 @@
36#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
37#include <linux/seq_file.h> 37#include <linux/seq_file.h>
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/jiffies.h>
39#include <linux/hdreg.h> 40#include <linux/hdreg.h>
40#include <linux/spinlock.h> 41#include <linux/spinlock.h>
41#include <linux/compat.h> 42#include <linux/compat.h>
43#include <linux/mutex.h>
42#include <asm/uaccess.h> 44#include <asm/uaccess.h>
43#include <asm/io.h> 45#include <asm/io.h>
44 46
@@ -155,6 +157,10 @@ static struct board_type products[] = {
155 157
156static ctlr_info_t *hba[MAX_CTLR]; 158static ctlr_info_t *hba[MAX_CTLR];
157 159
160static struct task_struct *cciss_scan_thread;
161static DEFINE_MUTEX(scan_mutex);
162static LIST_HEAD(scan_q);
163
158static void do_cciss_request(struct request_queue *q); 164static void do_cciss_request(struct request_queue *q);
159static irqreturn_t do_cciss_intr(int irq, void *dev_id); 165static irqreturn_t do_cciss_intr(int irq, void *dev_id);
160static int cciss_open(struct block_device *bdev, fmode_t mode); 166static int cciss_open(struct block_device *bdev, fmode_t mode);
@@ -164,9 +170,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
164static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); 170static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
165 171
166static int cciss_revalidate(struct gendisk *disk); 172static int cciss_revalidate(struct gendisk *disk);
167static int rebuild_lun_table(ctlr_info_t *h, int first_time); 173static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl);
168static int deregister_disk(ctlr_info_t *h, int drv_index, 174static int deregister_disk(ctlr_info_t *h, int drv_index,
169 int clear_all); 175 int clear_all, int via_ioctl);
170 176
171static void cciss_read_capacity(int ctlr, int logvol, int withirq, 177static void cciss_read_capacity(int ctlr, int logvol, int withirq,
172 sector_t *total_size, unsigned int *block_size); 178 sector_t *total_size, unsigned int *block_size);
@@ -189,8 +195,13 @@ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
189static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); 195static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
190 196
191static void fail_all_cmds(unsigned long ctlr); 197static void fail_all_cmds(unsigned long ctlr);
198static int add_to_scan_list(struct ctlr_info *h);
192static int scan_thread(void *data); 199static int scan_thread(void *data);
193static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); 200static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c);
201static void cciss_hba_release(struct device *dev);
202static void cciss_device_release(struct device *dev);
203static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
204static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
194 205
195#ifdef CONFIG_PROC_FS 206#ifdef CONFIG_PROC_FS
196static void cciss_procinit(int i); 207static void cciss_procinit(int i);
@@ -245,7 +256,10 @@ static inline void removeQ(CommandList_struct *c)
245 256
246#include "cciss_scsi.c" /* For SCSI tape support */ 257#include "cciss_scsi.c" /* For SCSI tape support */
247 258
248#define RAID_UNKNOWN 6 259static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
260 "UNKNOWN"
261};
262#define RAID_UNKNOWN (sizeof(raid_label) / sizeof(raid_label[0])-1)
249 263
250#ifdef CONFIG_PROC_FS 264#ifdef CONFIG_PROC_FS
251 265
@@ -255,9 +269,6 @@ static inline void removeQ(CommandList_struct *c)
255#define ENG_GIG 1000000000 269#define ENG_GIG 1000000000
256#define ENG_GIG_FACTOR (ENG_GIG/512) 270#define ENG_GIG_FACTOR (ENG_GIG/512)
257#define ENGAGE_SCSI "engage scsi" 271#define ENGAGE_SCSI "engage scsi"
258static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
259 "UNKNOWN"
260};
261 272
262static struct proc_dir_entry *proc_cciss; 273static struct proc_dir_entry *proc_cciss;
263 274
@@ -318,7 +329,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
318 ctlr_info_t *h = seq->private; 329 ctlr_info_t *h = seq->private;
319 unsigned ctlr = h->ctlr; 330 unsigned ctlr = h->ctlr;
320 loff_t *pos = v; 331 loff_t *pos = v;
321 drive_info_struct *drv = &h->drv[*pos]; 332 drive_info_struct *drv = h->drv[*pos];
322 333
323 if (*pos > h->highest_lun) 334 if (*pos > h->highest_lun)
324 return 0; 335 return 0;
@@ -331,7 +342,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
331 vol_sz_frac *= 100; 342 vol_sz_frac *= 100;
332 sector_div(vol_sz_frac, ENG_GIG_FACTOR); 343 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
333 344
334 if (drv->raid_level > 5) 345 if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN)
335 drv->raid_level = RAID_UNKNOWN; 346 drv->raid_level = RAID_UNKNOWN;
336 seq_printf(seq, "cciss/c%dd%d:" 347 seq_printf(seq, "cciss/c%dd%d:"
337 "\t%4u.%02uGB\tRAID %s\n", 348 "\t%4u.%02uGB\tRAID %s\n",
@@ -426,7 +437,7 @@ out:
426 return err; 437 return err;
427} 438}
428 439
429static struct file_operations cciss_proc_fops = { 440static const struct file_operations cciss_proc_fops = {
430 .owner = THIS_MODULE, 441 .owner = THIS_MODULE,
431 .open = cciss_seq_open, 442 .open = cciss_seq_open,
432 .read = seq_read, 443 .read = seq_read,
@@ -454,9 +465,19 @@ static void __devinit cciss_procinit(int i)
454#define to_hba(n) container_of(n, struct ctlr_info, dev) 465#define to_hba(n) container_of(n, struct ctlr_info, dev)
455#define to_drv(n) container_of(n, drive_info_struct, dev) 466#define to_drv(n) container_of(n, drive_info_struct, dev)
456 467
457static struct device_type cciss_host_type = { 468static ssize_t host_store_rescan(struct device *dev,
458 .name = "cciss_host", 469 struct device_attribute *attr,
459}; 470 const char *buf, size_t count)
471{
472 struct ctlr_info *h = to_hba(dev);
473
474 add_to_scan_list(h);
475 wake_up_process(cciss_scan_thread);
476 wait_for_completion_interruptible(&h->scan_wait);
477
478 return count;
479}
480DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
460 481
461static ssize_t dev_show_unique_id(struct device *dev, 482static ssize_t dev_show_unique_id(struct device *dev,
462 struct device_attribute *attr, 483 struct device_attribute *attr,
@@ -560,11 +581,101 @@ static ssize_t dev_show_rev(struct device *dev,
560} 581}
561DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); 582DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
562 583
584static ssize_t cciss_show_lunid(struct device *dev,
585 struct device_attribute *attr, char *buf)
586{
587 drive_info_struct *drv = to_drv(dev);
588 struct ctlr_info *h = to_hba(drv->dev.parent);
589 unsigned long flags;
590 unsigned char lunid[8];
591
592 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
593 if (h->busy_configuring) {
594 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
595 return -EBUSY;
596 }
597 if (!drv->heads) {
598 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
599 return -ENOTTY;
600 }
601 memcpy(lunid, drv->LunID, sizeof(lunid));
602 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
603 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
604 lunid[0], lunid[1], lunid[2], lunid[3],
605 lunid[4], lunid[5], lunid[6], lunid[7]);
606}
607DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL);
608
609static ssize_t cciss_show_raid_level(struct device *dev,
610 struct device_attribute *attr, char *buf)
611{
612 drive_info_struct *drv = to_drv(dev);
613 struct ctlr_info *h = to_hba(drv->dev.parent);
614 int raid;
615 unsigned long flags;
616
617 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
618 if (h->busy_configuring) {
619 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
620 return -EBUSY;
621 }
622 raid = drv->raid_level;
623 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
624 if (raid < 0 || raid > RAID_UNKNOWN)
625 raid = RAID_UNKNOWN;
626
627 return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n",
628 raid_label[raid]);
629}
630DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL);
631
632static ssize_t cciss_show_usage_count(struct device *dev,
633 struct device_attribute *attr, char *buf)
634{
635 drive_info_struct *drv = to_drv(dev);
636 struct ctlr_info *h = to_hba(drv->dev.parent);
637 unsigned long flags;
638 int count;
639
640 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
641 if (h->busy_configuring) {
642 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
643 return -EBUSY;
644 }
645 count = drv->usage_count;
646 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
647 return snprintf(buf, 20, "%d\n", count);
648}
649DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
650
651static struct attribute *cciss_host_attrs[] = {
652 &dev_attr_rescan.attr,
653 NULL
654};
655
656static struct attribute_group cciss_host_attr_group = {
657 .attrs = cciss_host_attrs,
658};
659
660static const struct attribute_group *cciss_host_attr_groups[] = {
661 &cciss_host_attr_group,
662 NULL
663};
664
665static struct device_type cciss_host_type = {
666 .name = "cciss_host",
667 .groups = cciss_host_attr_groups,
668 .release = cciss_hba_release,
669};
670
563static struct attribute *cciss_dev_attrs[] = { 671static struct attribute *cciss_dev_attrs[] = {
564 &dev_attr_unique_id.attr, 672 &dev_attr_unique_id.attr,
565 &dev_attr_model.attr, 673 &dev_attr_model.attr,
566 &dev_attr_vendor.attr, 674 &dev_attr_vendor.attr,
567 &dev_attr_rev.attr, 675 &dev_attr_rev.attr,
676 &dev_attr_lunid.attr,
677 &dev_attr_raid_level.attr,
678 &dev_attr_usage_count.attr,
568 NULL 679 NULL
569}; 680};
570 681
@@ -580,12 +691,24 @@ static const struct attribute_group *cciss_dev_attr_groups[] = {
580static struct device_type cciss_dev_type = { 691static struct device_type cciss_dev_type = {
581 .name = "cciss_device", 692 .name = "cciss_device",
582 .groups = cciss_dev_attr_groups, 693 .groups = cciss_dev_attr_groups,
694 .release = cciss_device_release,
583}; 695};
584 696
585static struct bus_type cciss_bus_type = { 697static struct bus_type cciss_bus_type = {
586 .name = "cciss", 698 .name = "cciss",
587}; 699};
588 700
701/*
702 * cciss_hba_release is called when the reference count
703 * of h->dev goes to zero.
704 */
705static void cciss_hba_release(struct device *dev)
706{
707 /*
708 * nothing to do, but need this to avoid a warning
709 * about not having a release handler from lib/kref.c.
710 */
711}
589 712
590/* 713/*
591 * Initialize sysfs entry for each controller. This sets up and registers 714 * Initialize sysfs entry for each controller. This sets up and registers
@@ -609,6 +732,16 @@ static int cciss_create_hba_sysfs_entry(struct ctlr_info *h)
609static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) 732static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
610{ 733{
611 device_del(&h->dev); 734 device_del(&h->dev);
735 put_device(&h->dev); /* final put. */
736}
737
738/* cciss_device_release is called when the reference count
739 * of h->drv[x]dev goes to zero.
740 */
741static void cciss_device_release(struct device *dev)
742{
743 drive_info_struct *drv = to_drv(dev);
744 kfree(drv);
612} 745}
613 746
614/* 747/*
@@ -617,24 +750,39 @@ static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
617 * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from 750 * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from
618 * /sys/block/cciss!c#d# to this entry. 751 * /sys/block/cciss!c#d# to this entry.
619 */ 752 */
620static int cciss_create_ld_sysfs_entry(struct ctlr_info *h, 753static long cciss_create_ld_sysfs_entry(struct ctlr_info *h,
621 drive_info_struct *drv,
622 int drv_index) 754 int drv_index)
623{ 755{
624 device_initialize(&drv->dev); 756 struct device *dev;
625 drv->dev.type = &cciss_dev_type; 757
626 drv->dev.bus = &cciss_bus_type; 758 if (h->drv[drv_index]->device_initialized)
627 dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index); 759 return 0;
628 drv->dev.parent = &h->dev; 760
629 return device_add(&drv->dev); 761 dev = &h->drv[drv_index]->dev;
762 device_initialize(dev);
763 dev->type = &cciss_dev_type;
764 dev->bus = &cciss_bus_type;
765 dev_set_name(dev, "c%dd%d", h->ctlr, drv_index);
766 dev->parent = &h->dev;
767 h->drv[drv_index]->device_initialized = 1;
768 return device_add(dev);
630} 769}
631 770
632/* 771/*
633 * Remove sysfs entries for a logical drive. 772 * Remove sysfs entries for a logical drive.
634 */ 773 */
635static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv) 774static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index,
775 int ctlr_exiting)
636{ 776{
637 device_del(&drv->dev); 777 struct device *dev = &h->drv[drv_index]->dev;
778
779 /* special case for c*d0, we only destroy it on controller exit */
780 if (drv_index == 0 && !ctlr_exiting)
781 return;
782
783 device_del(dev);
784 put_device(dev); /* the "final" put. */
785 h->drv[drv_index] = NULL;
638} 786}
639 787
640/* 788/*
@@ -751,7 +899,7 @@ static int cciss_open(struct block_device *bdev, fmode_t mode)
751 printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name); 899 printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name);
752#endif /* CCISS_DEBUG */ 900#endif /* CCISS_DEBUG */
753 901
754 if (host->busy_initializing || drv->busy_configuring) 902 if (drv->busy_configuring)
755 return -EBUSY; 903 return -EBUSY;
756 /* 904 /*
757 * Root is allowed to open raw volume zero even if it's not configured 905 * Root is allowed to open raw volume zero even if it's not configured
@@ -767,7 +915,8 @@ static int cciss_open(struct block_device *bdev, fmode_t mode)
767 if (MINOR(bdev->bd_dev) & 0x0f) { 915 if (MINOR(bdev->bd_dev) & 0x0f) {
768 return -ENXIO; 916 return -ENXIO;
769 /* if it is, make sure we have a LUN ID */ 917 /* if it is, make sure we have a LUN ID */
770 } else if (drv->LunID == 0) { 918 } else if (memcmp(drv->LunID, CTLR_LUNID,
919 sizeof(drv->LunID))) {
771 return -ENXIO; 920 return -ENXIO;
772 } 921 }
773 } 922 }
@@ -1132,12 +1281,13 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
1132 case CCISS_DEREGDISK: 1281 case CCISS_DEREGDISK:
1133 case CCISS_REGNEWD: 1282 case CCISS_REGNEWD:
1134 case CCISS_REVALIDVOLS: 1283 case CCISS_REVALIDVOLS:
1135 return rebuild_lun_table(host, 0); 1284 return rebuild_lun_table(host, 0, 1);
1136 1285
1137 case CCISS_GETLUNINFO:{ 1286 case CCISS_GETLUNINFO:{
1138 LogvolInfo_struct luninfo; 1287 LogvolInfo_struct luninfo;
1139 1288
1140 luninfo.LunID = drv->LunID; 1289 memcpy(&luninfo.LunID, drv->LunID,
1290 sizeof(luninfo.LunID));
1141 luninfo.num_opens = drv->usage_count; 1291 luninfo.num_opens = drv->usage_count;
1142 luninfo.num_parts = 0; 1292 luninfo.num_parts = 0;
1143 if (copy_to_user(argp, &luninfo, 1293 if (copy_to_user(argp, &luninfo,
@@ -1475,7 +1625,10 @@ static void cciss_check_queues(ctlr_info_t *h)
1475 /* make sure the disk has been added and the drive is real 1625 /* make sure the disk has been added and the drive is real
1476 * because this can be called from the middle of init_one. 1626 * because this can be called from the middle of init_one.
1477 */ 1627 */
1478 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) 1628 if (!h->drv[curr_queue])
1629 continue;
1630 if (!(h->drv[curr_queue]->queue) ||
1631 !(h->drv[curr_queue]->heads))
1479 continue; 1632 continue;
1480 blk_start_queue(h->gendisk[curr_queue]->queue); 1633 blk_start_queue(h->gendisk[curr_queue]->queue);
1481 1634
@@ -1532,13 +1685,11 @@ static void cciss_softirq_done(struct request *rq)
1532 spin_unlock_irqrestore(&h->lock, flags); 1685 spin_unlock_irqrestore(&h->lock, flags);
1533} 1686}
1534 1687
1535static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[], 1688static inline void log_unit_to_scsi3addr(ctlr_info_t *h,
1536 uint32_t log_unit) 1689 unsigned char scsi3addr[], uint32_t log_unit)
1537{ 1690{
1538 log_unit = h->drv[log_unit].LunID & 0x03fff; 1691 memcpy(scsi3addr, h->drv[log_unit]->LunID,
1539 memset(&scsi3addr[4], 0, 4); 1692 sizeof(h->drv[log_unit]->LunID));
1540 memcpy(&scsi3addr[0], &log_unit, 4);
1541 scsi3addr[3] |= 0x40;
1542} 1693}
1543 1694
1544/* This function gets the SCSI vendor, model, and revision of a logical drive 1695/* This function gets the SCSI vendor, model, and revision of a logical drive
@@ -1615,16 +1766,23 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
1615 return; 1766 return;
1616} 1767}
1617 1768
1618static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, 1769/*
1770 * cciss_add_disk sets up the block device queue for a logical drive
1771 */
1772static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1619 int drv_index) 1773 int drv_index)
1620{ 1774{
1621 disk->queue = blk_init_queue(do_cciss_request, &h->lock); 1775 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1776 if (!disk->queue)
1777 goto init_queue_failure;
1622 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); 1778 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
1623 disk->major = h->major; 1779 disk->major = h->major;
1624 disk->first_minor = drv_index << NWD_SHIFT; 1780 disk->first_minor = drv_index << NWD_SHIFT;
1625 disk->fops = &cciss_fops; 1781 disk->fops = &cciss_fops;
1626 disk->private_data = &h->drv[drv_index]; 1782 if (cciss_create_ld_sysfs_entry(h, drv_index))
1627 disk->driverfs_dev = &h->drv[drv_index].dev; 1783 goto cleanup_queue;
1784 disk->private_data = h->drv[drv_index];
1785 disk->driverfs_dev = &h->drv[drv_index]->dev;
1628 1786
1629 /* Set up queue information */ 1787 /* Set up queue information */
1630 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); 1788 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
@@ -1642,14 +1800,21 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1642 disk->queue->queuedata = h; 1800 disk->queue->queuedata = h;
1643 1801
1644 blk_queue_logical_block_size(disk->queue, 1802 blk_queue_logical_block_size(disk->queue,
1645 h->drv[drv_index].block_size); 1803 h->drv[drv_index]->block_size);
1646 1804
1647 /* Make sure all queue data is written out before */ 1805 /* Make sure all queue data is written out before */
1648 /* setting h->drv[drv_index].queue, as setting this */ 1806 /* setting h->drv[drv_index]->queue, as setting this */
1649 /* allows the interrupt handler to start the queue */ 1807 /* allows the interrupt handler to start the queue */
1650 wmb(); 1808 wmb();
1651 h->drv[drv_index].queue = disk->queue; 1809 h->drv[drv_index]->queue = disk->queue;
1652 add_disk(disk); 1810 add_disk(disk);
1811 return 0;
1812
1813cleanup_queue:
1814 blk_cleanup_queue(disk->queue);
1815 disk->queue = NULL;
1816init_queue_failure:
1817 return -1;
1653} 1818}
1654 1819
1655/* This function will check the usage_count of the drive to be updated/added. 1820/* This function will check the usage_count of the drive to be updated/added.
@@ -1662,7 +1827,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1662 * is also the controller node. Any changes to disk 0 will show up on 1827 * is also the controller node. Any changes to disk 0 will show up on
1663 * the next reboot. 1828 * the next reboot.
1664 */ 1829 */
1665static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) 1830static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
1831 int via_ioctl)
1666{ 1832{
1667 ctlr_info_t *h = hba[ctlr]; 1833 ctlr_info_t *h = hba[ctlr];
1668 struct gendisk *disk; 1834 struct gendisk *disk;
@@ -1672,21 +1838,13 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1672 unsigned long flags = 0; 1838 unsigned long flags = 0;
1673 int ret = 0; 1839 int ret = 0;
1674 drive_info_struct *drvinfo; 1840 drive_info_struct *drvinfo;
1675 int was_only_controller_node;
1676 1841
1677 /* Get information about the disk and modify the driver structure */ 1842 /* Get information about the disk and modify the driver structure */
1678 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); 1843 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1679 drvinfo = kmalloc(sizeof(*drvinfo), GFP_KERNEL); 1844 drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL);
1680 if (inq_buff == NULL || drvinfo == NULL) 1845 if (inq_buff == NULL || drvinfo == NULL)
1681 goto mem_msg; 1846 goto mem_msg;
1682 1847
1683 /* See if we're trying to update the "controller node"
1684 * this will happen the when the first logical drive gets
1685 * created by ACU.
1686 */
1687 was_only_controller_node = (drv_index == 0 &&
1688 h->drv[0].raid_level == -1);
1689
1690 /* testing to see if 16-byte CDBs are already being used */ 1848 /* testing to see if 16-byte CDBs are already being used */
1691 if (h->cciss_read == CCISS_READ_16) { 1849 if (h->cciss_read == CCISS_READ_16) {
1692 cciss_read_capacity_16(h->ctlr, drv_index, 1, 1850 cciss_read_capacity_16(h->ctlr, drv_index, 1,
@@ -1719,16 +1877,19 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1719 drvinfo->model, drvinfo->rev); 1877 drvinfo->model, drvinfo->rev);
1720 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, 1878 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
1721 sizeof(drvinfo->serial_no)); 1879 sizeof(drvinfo->serial_no));
1880 /* Save the lunid in case we deregister the disk, below. */
1881 memcpy(drvinfo->LunID, h->drv[drv_index]->LunID,
1882 sizeof(drvinfo->LunID));
1722 1883
1723 /* Is it the same disk we already know, and nothing's changed? */ 1884 /* Is it the same disk we already know, and nothing's changed? */
1724 if (h->drv[drv_index].raid_level != -1 && 1885 if (h->drv[drv_index]->raid_level != -1 &&
1725 ((memcmp(drvinfo->serial_no, 1886 ((memcmp(drvinfo->serial_no,
1726 h->drv[drv_index].serial_no, 16) == 0) && 1887 h->drv[drv_index]->serial_no, 16) == 0) &&
1727 drvinfo->block_size == h->drv[drv_index].block_size && 1888 drvinfo->block_size == h->drv[drv_index]->block_size &&
1728 drvinfo->nr_blocks == h->drv[drv_index].nr_blocks && 1889 drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks &&
1729 drvinfo->heads == h->drv[drv_index].heads && 1890 drvinfo->heads == h->drv[drv_index]->heads &&
1730 drvinfo->sectors == h->drv[drv_index].sectors && 1891 drvinfo->sectors == h->drv[drv_index]->sectors &&
1731 drvinfo->cylinders == h->drv[drv_index].cylinders)) 1892 drvinfo->cylinders == h->drv[drv_index]->cylinders))
1732 /* The disk is unchanged, nothing to update */ 1893 /* The disk is unchanged, nothing to update */
1733 goto freeret; 1894 goto freeret;
1734 1895
@@ -1738,18 +1899,17 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1738 * If the disk already exists then deregister it before proceeding 1899 * If the disk already exists then deregister it before proceeding
1739 * (unless it's the first disk (for the controller node). 1900 * (unless it's the first disk (for the controller node).
1740 */ 1901 */
1741 if (h->drv[drv_index].raid_level != -1 && drv_index != 0) { 1902 if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) {
1742 printk(KERN_WARNING "disk %d has changed.\n", drv_index); 1903 printk(KERN_WARNING "disk %d has changed.\n", drv_index);
1743 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 1904 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1744 h->drv[drv_index].busy_configuring = 1; 1905 h->drv[drv_index]->busy_configuring = 1;
1745 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 1906 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1746 1907
1747 /* deregister_disk sets h->drv[drv_index].queue = NULL 1908 /* deregister_disk sets h->drv[drv_index]->queue = NULL
1748 * which keeps the interrupt handler from starting 1909 * which keeps the interrupt handler from starting
1749 * the queue. 1910 * the queue.
1750 */ 1911 */
1751 ret = deregister_disk(h, drv_index, 0); 1912 ret = deregister_disk(h, drv_index, 0, via_ioctl);
1752 h->drv[drv_index].busy_configuring = 0;
1753 } 1913 }
1754 1914
1755 /* If the disk is in use return */ 1915 /* If the disk is in use return */
@@ -1757,22 +1917,31 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1757 goto freeret; 1917 goto freeret;
1758 1918
1759 /* Save the new information from cciss_geometry_inquiry 1919 /* Save the new information from cciss_geometry_inquiry
1760 * and serial number inquiry. 1920 * and serial number inquiry. If the disk was deregistered
1921 * above, then h->drv[drv_index] will be NULL.
1761 */ 1922 */
1762 h->drv[drv_index].block_size = drvinfo->block_size; 1923 if (h->drv[drv_index] == NULL) {
1763 h->drv[drv_index].nr_blocks = drvinfo->nr_blocks; 1924 drvinfo->device_initialized = 0;
1764 h->drv[drv_index].heads = drvinfo->heads; 1925 h->drv[drv_index] = drvinfo;
1765 h->drv[drv_index].sectors = drvinfo->sectors; 1926 drvinfo = NULL; /* so it won't be freed below. */
1766 h->drv[drv_index].cylinders = drvinfo->cylinders; 1927 } else {
1767 h->drv[drv_index].raid_level = drvinfo->raid_level; 1928 /* special case for cxd0 */
1768 memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16); 1929 h->drv[drv_index]->block_size = drvinfo->block_size;
1769 memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1); 1930 h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks;
1770 memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1); 1931 h->drv[drv_index]->heads = drvinfo->heads;
1771 memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1); 1932 h->drv[drv_index]->sectors = drvinfo->sectors;
1933 h->drv[drv_index]->cylinders = drvinfo->cylinders;
1934 h->drv[drv_index]->raid_level = drvinfo->raid_level;
1935 memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16);
1936 memcpy(h->drv[drv_index]->vendor, drvinfo->vendor,
1937 VENDOR_LEN + 1);
1938 memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1);
1939 memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1);
1940 }
1772 1941
1773 ++h->num_luns; 1942 ++h->num_luns;
1774 disk = h->gendisk[drv_index]; 1943 disk = h->gendisk[drv_index];
1775 set_capacity(disk, h->drv[drv_index].nr_blocks); 1944 set_capacity(disk, h->drv[drv_index]->nr_blocks);
1776 1945
1777 /* If it's not disk 0 (drv_index != 0) 1946 /* If it's not disk 0 (drv_index != 0)
1778 * or if it was disk 0, but there was previously 1947 * or if it was disk 0, but there was previously
@@ -1780,8 +1949,15 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1780 * (raid_leve == -1) then we want to update the 1949 * (raid_leve == -1) then we want to update the
1781 * logical drive's information. 1950 * logical drive's information.
1782 */ 1951 */
1783 if (drv_index || first_time) 1952 if (drv_index || first_time) {
1784 cciss_add_disk(h, disk, drv_index); 1953 if (cciss_add_disk(h, disk, drv_index) != 0) {
1954 cciss_free_gendisk(h, drv_index);
1955 cciss_free_drive_info(h, drv_index);
1956 printk(KERN_WARNING "cciss:%d could not update "
1957 "disk %d\n", h->ctlr, drv_index);
1958 --h->num_luns;
1959 }
1960 }
1785 1961
1786freeret: 1962freeret:
1787 kfree(inq_buff); 1963 kfree(inq_buff);
@@ -1793,28 +1969,70 @@ mem_msg:
1793} 1969}
1794 1970
1795/* This function will find the first index of the controllers drive array 1971/* This function will find the first index of the controllers drive array
1796 * that has a -1 for the raid_level and will return that index. This is 1972 * that has a null drv pointer and allocate the drive info struct and
1797 * where new drives will be added. If the index to be returned is greater 1973 * will return that index This is where new drives will be added.
1798 * than the highest_lun index for the controller then highest_lun is set 1974 * If the index to be returned is greater than the highest_lun index for
1799 * to this new index. If there are no available indexes then -1 is returned. 1975 * the controller then highest_lun is set * to this new index.
1800 * "controller_node" is used to know if this is a real logical drive, or just 1976 * If there are no available indexes or if tha allocation fails, then -1
1801 * the controller node, which determines if this counts towards highest_lun. 1977 * is returned. * "controller_node" is used to know if this is a real
1978 * logical drive, or just the controller node, which determines if this
1979 * counts towards highest_lun.
1802 */ 1980 */
1803static int cciss_find_free_drive_index(int ctlr, int controller_node) 1981static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node)
1804{ 1982{
1805 int i; 1983 int i;
1984 drive_info_struct *drv;
1806 1985
1986 /* Search for an empty slot for our drive info */
1807 for (i = 0; i < CISS_MAX_LUN; i++) { 1987 for (i = 0; i < CISS_MAX_LUN; i++) {
1808 if (hba[ctlr]->drv[i].raid_level == -1) { 1988
1809 if (i > hba[ctlr]->highest_lun) 1989 /* if not cxd0 case, and it's occupied, skip it. */
1810 if (!controller_node) 1990 if (h->drv[i] && i != 0)
1811 hba[ctlr]->highest_lun = i; 1991 continue;
1992 /*
1993 * If it's cxd0 case, and drv is alloc'ed already, and a
1994 * disk is configured there, skip it.
1995 */
1996 if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1)
1997 continue;
1998
1999 /*
2000 * We've found an empty slot. Update highest_lun
2001 * provided this isn't just the fake cxd0 controller node.
2002 */
2003 if (i > h->highest_lun && !controller_node)
2004 h->highest_lun = i;
2005
2006 /* If adding a real disk at cxd0, and it's already alloc'ed */
2007 if (i == 0 && h->drv[i] != NULL)
1812 return i; 2008 return i;
1813 } 2009
2010 /*
2011 * Found an empty slot, not already alloc'ed. Allocate it.
2012 * Mark it with raid_level == -1, so we know it's new later on.
2013 */
2014 drv = kzalloc(sizeof(*drv), GFP_KERNEL);
2015 if (!drv)
2016 return -1;
2017 drv->raid_level = -1; /* so we know it's new */
2018 h->drv[i] = drv;
2019 return i;
1814 } 2020 }
1815 return -1; 2021 return -1;
1816} 2022}
1817 2023
2024static void cciss_free_drive_info(ctlr_info_t *h, int drv_index)
2025{
2026 kfree(h->drv[drv_index]);
2027 h->drv[drv_index] = NULL;
2028}
2029
2030static void cciss_free_gendisk(ctlr_info_t *h, int drv_index)
2031{
2032 put_disk(h->gendisk[drv_index]);
2033 h->gendisk[drv_index] = NULL;
2034}
2035
1818/* cciss_add_gendisk finds a free hba[]->drv structure 2036/* cciss_add_gendisk finds a free hba[]->drv structure
1819 * and allocates a gendisk if needed, and sets the lunid 2037 * and allocates a gendisk if needed, and sets the lunid
1820 * in the drvinfo structure. It returns the index into 2038 * in the drvinfo structure. It returns the index into
@@ -1824,13 +2042,15 @@ static int cciss_find_free_drive_index(int ctlr, int controller_node)
1824 * a means to talk to the controller in case no logical 2042 * a means to talk to the controller in case no logical
1825 * drives have yet been configured. 2043 * drives have yet been configured.
1826 */ 2044 */
1827static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node) 2045static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[],
2046 int controller_node)
1828{ 2047{
1829 int drv_index; 2048 int drv_index;
1830 2049
1831 drv_index = cciss_find_free_drive_index(h->ctlr, controller_node); 2050 drv_index = cciss_alloc_drive_info(h, controller_node);
1832 if (drv_index == -1) 2051 if (drv_index == -1)
1833 return -1; 2052 return -1;
2053
1834 /*Check if the gendisk needs to be allocated */ 2054 /*Check if the gendisk needs to be allocated */
1835 if (!h->gendisk[drv_index]) { 2055 if (!h->gendisk[drv_index]) {
1836 h->gendisk[drv_index] = 2056 h->gendisk[drv_index] =
@@ -1839,23 +2059,24 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
1839 printk(KERN_ERR "cciss%d: could not " 2059 printk(KERN_ERR "cciss%d: could not "
1840 "allocate a new disk %d\n", 2060 "allocate a new disk %d\n",
1841 h->ctlr, drv_index); 2061 h->ctlr, drv_index);
1842 return -1; 2062 goto err_free_drive_info;
1843 } 2063 }
1844 } 2064 }
1845 h->drv[drv_index].LunID = lunid; 2065 memcpy(h->drv[drv_index]->LunID, lunid,
1846 if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index)) 2066 sizeof(h->drv[drv_index]->LunID));
2067 if (cciss_create_ld_sysfs_entry(h, drv_index))
1847 goto err_free_disk; 2068 goto err_free_disk;
1848
1849 /* Don't need to mark this busy because nobody */ 2069 /* Don't need to mark this busy because nobody */
1850 /* else knows about this disk yet to contend */ 2070 /* else knows about this disk yet to contend */
1851 /* for access to it. */ 2071 /* for access to it. */
1852 h->drv[drv_index].busy_configuring = 0; 2072 h->drv[drv_index]->busy_configuring = 0;
1853 wmb(); 2073 wmb();
1854 return drv_index; 2074 return drv_index;
1855 2075
1856err_free_disk: 2076err_free_disk:
1857 put_disk(h->gendisk[drv_index]); 2077 cciss_free_gendisk(h, drv_index);
1858 h->gendisk[drv_index] = NULL; 2078err_free_drive_info:
2079 cciss_free_drive_info(h, drv_index);
1859 return -1; 2080 return -1;
1860} 2081}
1861 2082
@@ -1872,21 +2093,25 @@ static void cciss_add_controller_node(ctlr_info_t *h)
1872 if (h->gendisk[0] != NULL) /* already did this? Then bail. */ 2093 if (h->gendisk[0] != NULL) /* already did this? Then bail. */
1873 return; 2094 return;
1874 2095
1875 drv_index = cciss_add_gendisk(h, 0, 1); 2096 drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1);
1876 if (drv_index == -1) { 2097 if (drv_index == -1)
1877 printk(KERN_WARNING "cciss%d: could not " 2098 goto error;
1878 "add disk 0.\n", h->ctlr); 2099 h->drv[drv_index]->block_size = 512;
1879 return; 2100 h->drv[drv_index]->nr_blocks = 0;
1880 } 2101 h->drv[drv_index]->heads = 0;
1881 h->drv[drv_index].block_size = 512; 2102 h->drv[drv_index]->sectors = 0;
1882 h->drv[drv_index].nr_blocks = 0; 2103 h->drv[drv_index]->cylinders = 0;
1883 h->drv[drv_index].heads = 0; 2104 h->drv[drv_index]->raid_level = -1;
1884 h->drv[drv_index].sectors = 0; 2105 memset(h->drv[drv_index]->serial_no, 0, 16);
1885 h->drv[drv_index].cylinders = 0;
1886 h->drv[drv_index].raid_level = -1;
1887 memset(h->drv[drv_index].serial_no, 0, 16);
1888 disk = h->gendisk[drv_index]; 2106 disk = h->gendisk[drv_index];
1889 cciss_add_disk(h, disk, drv_index); 2107 if (cciss_add_disk(h, disk, drv_index) == 0)
2108 return;
2109 cciss_free_gendisk(h, drv_index);
2110 cciss_free_drive_info(h, drv_index);
2111error:
2112 printk(KERN_WARNING "cciss%d: could not "
2113 "add disk 0.\n", h->ctlr);
2114 return;
1890} 2115}
1891 2116
1892/* This function will add and remove logical drives from the Logical 2117/* This function will add and remove logical drives from the Logical
@@ -1897,7 +2122,8 @@ static void cciss_add_controller_node(ctlr_info_t *h)
1897 * INPUT 2122 * INPUT
1898 * h = The controller to perform the operations on 2123 * h = The controller to perform the operations on
1899 */ 2124 */
1900static int rebuild_lun_table(ctlr_info_t *h, int first_time) 2125static int rebuild_lun_table(ctlr_info_t *h, int first_time,
2126 int via_ioctl)
1901{ 2127{
1902 int ctlr = h->ctlr; 2128 int ctlr = h->ctlr;
1903 int num_luns; 2129 int num_luns;
@@ -1907,7 +2133,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1907 int i; 2133 int i;
1908 int drv_found; 2134 int drv_found;
1909 int drv_index = 0; 2135 int drv_index = 0;
1910 __u32 lunid = 0; 2136 unsigned char lunid[8] = CTLR_LUNID;
1911 unsigned long flags; 2137 unsigned long flags;
1912 2138
1913 if (!capable(CAP_SYS_RAWIO)) 2139 if (!capable(CAP_SYS_RAWIO))
@@ -1960,13 +2186,13 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1960 drv_found = 0; 2186 drv_found = 0;
1961 2187
1962 /* skip holes in the array from already deleted drives */ 2188 /* skip holes in the array from already deleted drives */
1963 if (h->drv[i].raid_level == -1) 2189 if (h->drv[i] == NULL)
1964 continue; 2190 continue;
1965 2191
1966 for (j = 0; j < num_luns; j++) { 2192 for (j = 0; j < num_luns; j++) {
1967 memcpy(&lunid, &ld_buff->LUN[j][0], 4); 2193 memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid));
1968 lunid = le32_to_cpu(lunid); 2194 if (memcmp(h->drv[i]->LunID, lunid,
1969 if (h->drv[i].LunID == lunid) { 2195 sizeof(lunid)) == 0) {
1970 drv_found = 1; 2196 drv_found = 1;
1971 break; 2197 break;
1972 } 2198 }
@@ -1974,11 +2200,11 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1974 if (!drv_found) { 2200 if (!drv_found) {
1975 /* Deregister it from the OS, it's gone. */ 2201 /* Deregister it from the OS, it's gone. */
1976 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 2202 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1977 h->drv[i].busy_configuring = 1; 2203 h->drv[i]->busy_configuring = 1;
1978 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2204 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1979 return_code = deregister_disk(h, i, 1); 2205 return_code = deregister_disk(h, i, 1, via_ioctl);
1980 cciss_destroy_ld_sysfs_entry(&h->drv[i]); 2206 if (h->drv[i] != NULL)
1981 h->drv[i].busy_configuring = 0; 2207 h->drv[i]->busy_configuring = 0;
1982 } 2208 }
1983 } 2209 }
1984 2210
@@ -1992,17 +2218,16 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1992 2218
1993 drv_found = 0; 2219 drv_found = 0;
1994 2220
1995 memcpy(&lunid, &ld_buff->LUN[i][0], 4); 2221 memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid));
1996 lunid = le32_to_cpu(lunid);
1997
1998 /* Find if the LUN is already in the drive array 2222 /* Find if the LUN is already in the drive array
1999 * of the driver. If so then update its info 2223 * of the driver. If so then update its info
2000 * if not in use. If it does not exist then find 2224 * if not in use. If it does not exist then find
2001 * the first free index and add it. 2225 * the first free index and add it.
2002 */ 2226 */
2003 for (j = 0; j <= h->highest_lun; j++) { 2227 for (j = 0; j <= h->highest_lun; j++) {
2004 if (h->drv[j].raid_level != -1 && 2228 if (h->drv[j] != NULL &&
2005 h->drv[j].LunID == lunid) { 2229 memcmp(h->drv[j]->LunID, lunid,
2230 sizeof(h->drv[j]->LunID)) == 0) {
2006 drv_index = j; 2231 drv_index = j;
2007 drv_found = 1; 2232 drv_found = 1;
2008 break; 2233 break;
@@ -2015,7 +2240,8 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
2015 if (drv_index == -1) 2240 if (drv_index == -1)
2016 goto freeret; 2241 goto freeret;
2017 } 2242 }
2018 cciss_update_drive_info(ctlr, drv_index, first_time); 2243 cciss_update_drive_info(ctlr, drv_index, first_time,
2244 via_ioctl);
2019 } /* end for */ 2245 } /* end for */
2020 2246
2021freeret: 2247freeret:
@@ -2032,6 +2258,25 @@ mem_msg:
2032 goto freeret; 2258 goto freeret;
2033} 2259}
2034 2260
2261static void cciss_clear_drive_info(drive_info_struct *drive_info)
2262{
2263 /* zero out the disk size info */
2264 drive_info->nr_blocks = 0;
2265 drive_info->block_size = 0;
2266 drive_info->heads = 0;
2267 drive_info->sectors = 0;
2268 drive_info->cylinders = 0;
2269 drive_info->raid_level = -1;
2270 memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no));
2271 memset(drive_info->model, 0, sizeof(drive_info->model));
2272 memset(drive_info->rev, 0, sizeof(drive_info->rev));
2273 memset(drive_info->vendor, 0, sizeof(drive_info->vendor));
2274 /*
2275 * don't clear the LUNID though, we need to remember which
2276 * one this one is.
2277 */
2278}
2279
2035/* This function will deregister the disk and it's queue from the 2280/* This function will deregister the disk and it's queue from the
2036 * kernel. It must be called with the controller lock held and the 2281 * kernel. It must be called with the controller lock held and the
2037 * drv structures busy_configuring flag set. It's parameters are: 2282 * drv structures busy_configuring flag set. It's parameters are:
@@ -2046,43 +2291,48 @@ mem_msg:
2046 * the disk in preparation for re-adding it. In this case 2291 * the disk in preparation for re-adding it. In this case
2047 * the highest_lun should be left unchanged and the LunID 2292 * the highest_lun should be left unchanged and the LunID
2048 * should not be cleared. 2293 * should not be cleared.
2294 * via_ioctl
2295 * This indicates whether we've reached this path via ioctl.
2296 * This affects the maximum usage count allowed for c0d0 to be messed with.
2297 * If this path is reached via ioctl(), then the max_usage_count will
2298 * be 1, as the process calling ioctl() has got to have the device open.
2299 * If we get here via sysfs, then the max usage count will be zero.
2049*/ 2300*/
2050static int deregister_disk(ctlr_info_t *h, int drv_index, 2301static int deregister_disk(ctlr_info_t *h, int drv_index,
2051 int clear_all) 2302 int clear_all, int via_ioctl)
2052{ 2303{
2053 int i; 2304 int i;
2054 struct gendisk *disk; 2305 struct gendisk *disk;
2055 drive_info_struct *drv; 2306 drive_info_struct *drv;
2307 int recalculate_highest_lun;
2056 2308
2057 if (!capable(CAP_SYS_RAWIO)) 2309 if (!capable(CAP_SYS_RAWIO))
2058 return -EPERM; 2310 return -EPERM;
2059 2311
2060 drv = &h->drv[drv_index]; 2312 drv = h->drv[drv_index];
2061 disk = h->gendisk[drv_index]; 2313 disk = h->gendisk[drv_index];
2062 2314
2063 /* make sure logical volume is NOT is use */ 2315 /* make sure logical volume is NOT is use */
2064 if (clear_all || (h->gendisk[0] == disk)) { 2316 if (clear_all || (h->gendisk[0] == disk)) {
2065 if (drv->usage_count > 1) 2317 if (drv->usage_count > via_ioctl)
2066 return -EBUSY; 2318 return -EBUSY;
2067 } else if (drv->usage_count > 0) 2319 } else if (drv->usage_count > 0)
2068 return -EBUSY; 2320 return -EBUSY;
2069 2321
2322 recalculate_highest_lun = (drv == h->drv[h->highest_lun]);
2323
2070 /* invalidate the devices and deregister the disk. If it is disk 2324 /* invalidate the devices and deregister the disk. If it is disk
2071 * zero do not deregister it but just zero out it's values. This 2325 * zero do not deregister it but just zero out it's values. This
2072 * allows us to delete disk zero but keep the controller registered. 2326 * allows us to delete disk zero but keep the controller registered.
2073 */ 2327 */
2074 if (h->gendisk[0] != disk) { 2328 if (h->gendisk[0] != disk) {
2075 struct request_queue *q = disk->queue; 2329 struct request_queue *q = disk->queue;
2076 if (disk->flags & GENHD_FL_UP) 2330 if (disk->flags & GENHD_FL_UP) {
2331 cciss_destroy_ld_sysfs_entry(h, drv_index, 0);
2077 del_gendisk(disk); 2332 del_gendisk(disk);
2078 if (q) {
2079 blk_cleanup_queue(q);
2080 /* Set drv->queue to NULL so that we do not try
2081 * to call blk_start_queue on this queue in the
2082 * interrupt handler
2083 */
2084 drv->queue = NULL;
2085 } 2333 }
2334 if (q)
2335 blk_cleanup_queue(q);
2086 /* If clear_all is set then we are deleting the logical 2336 /* If clear_all is set then we are deleting the logical
2087 * drive, not just refreshing its info. For drives 2337 * drive, not just refreshing its info. For drives
2088 * other than disk 0 we will call put_disk. We do not 2338 * other than disk 0 we will call put_disk. We do not
@@ -2105,34 +2355,20 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
2105 } 2355 }
2106 } else { 2356 } else {
2107 set_capacity(disk, 0); 2357 set_capacity(disk, 0);
2358 cciss_clear_drive_info(drv);
2108 } 2359 }
2109 2360
2110 --h->num_luns; 2361 --h->num_luns;
2111 /* zero out the disk size info */
2112 drv->nr_blocks = 0;
2113 drv->block_size = 0;
2114 drv->heads = 0;
2115 drv->sectors = 0;
2116 drv->cylinders = 0;
2117 drv->raid_level = -1; /* This can be used as a flag variable to
2118 * indicate that this element of the drive
2119 * array is free.
2120 */
2121
2122 if (clear_all) {
2123 /* check to see if it was the last disk */
2124 if (drv == h->drv + h->highest_lun) {
2125 /* if so, find the new hightest lun */
2126 int i, newhighest = -1;
2127 for (i = 0; i <= h->highest_lun; i++) {
2128 /* if the disk has size > 0, it is available */
2129 if (h->drv[i].heads)
2130 newhighest = i;
2131 }
2132 h->highest_lun = newhighest;
2133 }
2134 2362
2135 drv->LunID = 0; 2363 /* if it was the last disk, find the new hightest lun */
2364 if (clear_all && recalculate_highest_lun) {
2365 int i, newhighest = -1;
2366 for (i = 0; i <= h->highest_lun; i++) {
2367 /* if the disk has size > 0, it is available */
2368 if (h->drv[i] && h->drv[i]->heads)
2369 newhighest = i;
2370 }
2371 h->highest_lun = newhighest;
2136 } 2372 }
2137 return 0; 2373 return 0;
2138} 2374}
@@ -2479,8 +2715,6 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
2479 } else { /* Get geometry failed */ 2715 } else { /* Get geometry failed */
2480 printk(KERN_WARNING "cciss: reading geometry failed\n"); 2716 printk(KERN_WARNING "cciss: reading geometry failed\n");
2481 } 2717 }
2482 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
2483 drv->heads, drv->sectors, drv->cylinders);
2484} 2718}
2485 2719
2486static void 2720static void
@@ -2514,9 +2748,6 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2514 *total_size = 0; 2748 *total_size = 0;
2515 *block_size = BLOCK_SIZE; 2749 *block_size = BLOCK_SIZE;
2516 } 2750 }
2517 if (*total_size != 0)
2518 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2519 (unsigned long long)*total_size+1, *block_size);
2520 kfree(buf); 2751 kfree(buf);
2521} 2752}
2522 2753
@@ -2568,7 +2799,8 @@ static int cciss_revalidate(struct gendisk *disk)
2568 InquiryData_struct *inq_buff = NULL; 2799 InquiryData_struct *inq_buff = NULL;
2569 2800
2570 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { 2801 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2571 if (h->drv[logvol].LunID == drv->LunID) { 2802 if (memcmp(h->drv[logvol]->LunID, drv->LunID,
2803 sizeof(drv->LunID)) == 0) {
2572 FOUND = 1; 2804 FOUND = 1;
2573 break; 2805 break;
2574 } 2806 }
@@ -3053,8 +3285,7 @@ static void do_cciss_request(struct request_queue *q)
3053 /* The first 2 bits are reserved for controller error reporting. */ 3285 /* The first 2 bits are reserved for controller error reporting. */
3054 c->Header.Tag.lower = (c->cmdindex << 3); 3286 c->Header.Tag.lower = (c->cmdindex << 3);
3055 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ 3287 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
3056 c->Header.LUN.LogDev.VolId = drv->LunID; 3288 memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID));
3057 c->Header.LUN.LogDev.Mode = 1;
3058 c->Request.CDBLen = 10; // 12 byte commands not in FW yet; 3289 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
3059 c->Request.Type.Type = TYPE_CMD; // It is a command. 3290 c->Request.Type.Type = TYPE_CMD; // It is a command.
3060 c->Request.Type.Attribute = ATTR_SIMPLE; 3291 c->Request.Type.Attribute = ATTR_SIMPLE;
@@ -3232,20 +3463,121 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id)
3232 return IRQ_HANDLED; 3463 return IRQ_HANDLED;
3233} 3464}
3234 3465
3466/**
3467 * add_to_scan_list() - add controller to rescan queue
3468 * @h: Pointer to the controller.
3469 *
3470 * Adds the controller to the rescan queue if not already on the queue.
3471 *
3472 * returns 1 if added to the queue, 0 if skipped (could be on the
3473 * queue already, or the controller could be initializing or shutting
3474 * down).
3475 **/
3476static int add_to_scan_list(struct ctlr_info *h)
3477{
3478 struct ctlr_info *test_h;
3479 int found = 0;
3480 int ret = 0;
3481
3482 if (h->busy_initializing)
3483 return 0;
3484
3485 if (!mutex_trylock(&h->busy_shutting_down))
3486 return 0;
3487
3488 mutex_lock(&scan_mutex);
3489 list_for_each_entry(test_h, &scan_q, scan_list) {
3490 if (test_h == h) {
3491 found = 1;
3492 break;
3493 }
3494 }
3495 if (!found && !h->busy_scanning) {
3496 INIT_COMPLETION(h->scan_wait);
3497 list_add_tail(&h->scan_list, &scan_q);
3498 ret = 1;
3499 }
3500 mutex_unlock(&scan_mutex);
3501 mutex_unlock(&h->busy_shutting_down);
3502
3503 return ret;
3504}
3505
3506/**
3507 * remove_from_scan_list() - remove controller from rescan queue
3508 * @h: Pointer to the controller.
3509 *
3510 * Removes the controller from the rescan queue if present. Blocks if
3511 * the controller is currently conducting a rescan.
3512 **/
3513static void remove_from_scan_list(struct ctlr_info *h)
3514{
3515 struct ctlr_info *test_h, *tmp_h;
3516 int scanning = 0;
3517
3518 mutex_lock(&scan_mutex);
3519 list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) {
3520 if (test_h == h) {
3521 list_del(&h->scan_list);
3522 complete_all(&h->scan_wait);
3523 mutex_unlock(&scan_mutex);
3524 return;
3525 }
3526 }
3527 if (&h->busy_scanning)
3528 scanning = 0;
3529 mutex_unlock(&scan_mutex);
3530
3531 if (scanning)
3532 wait_for_completion(&h->scan_wait);
3533}
3534
3535/**
3536 * scan_thread() - kernel thread used to rescan controllers
3537 * @data: Ignored.
3538 *
3539 * A kernel thread used scan for drive topology changes on
3540 * controllers. The thread processes only one controller at a time
3541 * using a queue. Controllers are added to the queue using
3542 * add_to_scan_list() and removed from the queue either after done
3543 * processing or using remove_from_scan_list().
3544 *
3545 * returns 0.
3546 **/
3235static int scan_thread(void *data) 3547static int scan_thread(void *data)
3236{ 3548{
3237 ctlr_info_t *h = data; 3549 struct ctlr_info *h;
3238 int rc;
3239 DECLARE_COMPLETION_ONSTACK(wait);
3240 h->rescan_wait = &wait;
3241 3550
3242 for (;;) { 3551 while (1) {
3243 rc = wait_for_completion_interruptible(&wait); 3552 set_current_state(TASK_INTERRUPTIBLE);
3553 schedule();
3244 if (kthread_should_stop()) 3554 if (kthread_should_stop())
3245 break; 3555 break;
3246 if (!rc) 3556
3247 rebuild_lun_table(h, 0); 3557 while (1) {
3558 mutex_lock(&scan_mutex);
3559 if (list_empty(&scan_q)) {
3560 mutex_unlock(&scan_mutex);
3561 break;
3562 }
3563
3564 h = list_entry(scan_q.next,
3565 struct ctlr_info,
3566 scan_list);
3567 list_del(&h->scan_list);
3568 h->busy_scanning = 1;
3569 mutex_unlock(&scan_mutex);
3570
3571 if (h) {
3572 rebuild_lun_table(h, 0, 0);
3573 complete_all(&h->scan_wait);
3574 mutex_lock(&scan_mutex);
3575 h->busy_scanning = 0;
3576 mutex_unlock(&scan_mutex);
3577 }
3578 }
3248 } 3579 }
3580
3249 return 0; 3581 return 0;
3250} 3582}
3251 3583
@@ -3268,8 +3600,8 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
3268 case REPORT_LUNS_CHANGED: 3600 case REPORT_LUNS_CHANGED:
3269 printk(KERN_WARNING "cciss%d: report LUN data " 3601 printk(KERN_WARNING "cciss%d: report LUN data "
3270 "changed\n", h->ctlr); 3602 "changed\n", h->ctlr);
3271 if (h->rescan_wait) 3603 add_to_scan_list(h);
3272 complete(h->rescan_wait); 3604 wake_up_process(cciss_scan_thread);
3273 return 1; 3605 return 1;
3274 break; 3606 break;
3275 case POWER_OR_RESET: 3607 case POWER_OR_RESET:
@@ -3489,7 +3821,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3489 if (scratchpad == CCISS_FIRMWARE_READY) 3821 if (scratchpad == CCISS_FIRMWARE_READY)
3490 break; 3822 break;
3491 set_current_state(TASK_INTERRUPTIBLE); 3823 set_current_state(TASK_INTERRUPTIBLE);
3492 schedule_timeout(HZ / 10); /* wait 100ms */ 3824 schedule_timeout(msecs_to_jiffies(100)); /* wait 100ms */
3493 } 3825 }
3494 if (scratchpad != CCISS_FIRMWARE_READY) { 3826 if (scratchpad != CCISS_FIRMWARE_READY) {
3495 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n"); 3827 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
@@ -3615,7 +3947,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3615 break; 3947 break;
3616 /* delay and try again */ 3948 /* delay and try again */
3617 set_current_state(TASK_INTERRUPTIBLE); 3949 set_current_state(TASK_INTERRUPTIBLE);
3618 schedule_timeout(10); 3950 schedule_timeout(msecs_to_jiffies(1));
3619 } 3951 }
3620 3952
3621#ifdef CCISS_DEBUG 3953#ifdef CCISS_DEBUG
@@ -3669,15 +4001,16 @@ Enomem:
3669 return -1; 4001 return -1;
3670} 4002}
3671 4003
3672static void free_hba(int i) 4004static void free_hba(int n)
3673{ 4005{
3674 ctlr_info_t *p = hba[i]; 4006 ctlr_info_t *h = hba[n];
3675 int n; 4007 int i;
3676 4008
3677 hba[i] = NULL; 4009 hba[n] = NULL;
3678 for (n = 0; n < CISS_MAX_LUN; n++) 4010 for (i = 0; i < h->highest_lun + 1; i++)
3679 put_disk(p->gendisk[n]); 4011 if (h->gendisk[i] != NULL)
3680 kfree(p); 4012 put_disk(h->gendisk[i]);
4013 kfree(h);
3681} 4014}
3682 4015
3683/* Send a message CDB to the firmware. */ 4016/* Send a message CDB to the firmware. */
@@ -3918,6 +4251,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3918 hba[i]->busy_initializing = 1; 4251 hba[i]->busy_initializing = 1;
3919 INIT_HLIST_HEAD(&hba[i]->cmpQ); 4252 INIT_HLIST_HEAD(&hba[i]->cmpQ);
3920 INIT_HLIST_HEAD(&hba[i]->reqQ); 4253 INIT_HLIST_HEAD(&hba[i]->reqQ);
4254 mutex_init(&hba[i]->busy_shutting_down);
3921 4255
3922 if (cciss_pci_init(hba[i], pdev) != 0) 4256 if (cciss_pci_init(hba[i], pdev) != 0)
3923 goto clean0; 4257 goto clean0;
@@ -3926,6 +4260,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3926 hba[i]->ctlr = i; 4260 hba[i]->ctlr = i;
3927 hba[i]->pdev = pdev; 4261 hba[i]->pdev = pdev;
3928 4262
4263 init_completion(&hba[i]->scan_wait);
4264
3929 if (cciss_create_hba_sysfs_entry(hba[i])) 4265 if (cciss_create_hba_sysfs_entry(hba[i]))
3930 goto clean0; 4266 goto clean0;
3931 4267
@@ -4001,8 +4337,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4001 hba[i]->num_luns = 0; 4337 hba[i]->num_luns = 0;
4002 hba[i]->highest_lun = -1; 4338 hba[i]->highest_lun = -1;
4003 for (j = 0; j < CISS_MAX_LUN; j++) { 4339 for (j = 0; j < CISS_MAX_LUN; j++) {
4004 hba[i]->drv[j].raid_level = -1; 4340 hba[i]->drv[j] = NULL;
4005 hba[i]->drv[j].queue = NULL;
4006 hba[i]->gendisk[j] = NULL; 4341 hba[i]->gendisk[j] = NULL;
4007 } 4342 }
4008 4343
@@ -4035,14 +4370,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4035 4370
4036 hba[i]->cciss_max_sectors = 2048; 4371 hba[i]->cciss_max_sectors = 2048;
4037 4372
4373 rebuild_lun_table(hba[i], 1, 0);
4038 hba[i]->busy_initializing = 0; 4374 hba[i]->busy_initializing = 0;
4039
4040 rebuild_lun_table(hba[i], 1);
4041 hba[i]->cciss_scan_thread = kthread_run(scan_thread, hba[i],
4042 "cciss_scan%02d", i);
4043 if (IS_ERR(hba[i]->cciss_scan_thread))
4044 return PTR_ERR(hba[i]->cciss_scan_thread);
4045
4046 return 1; 4375 return 1;
4047 4376
4048clean4: 4377clean4:
@@ -4063,12 +4392,7 @@ clean1:
4063 cciss_destroy_hba_sysfs_entry(hba[i]); 4392 cciss_destroy_hba_sysfs_entry(hba[i]);
4064clean0: 4393clean0:
4065 hba[i]->busy_initializing = 0; 4394 hba[i]->busy_initializing = 0;
4066 /* cleanup any queues that may have been initialized */ 4395
4067 for (j=0; j <= hba[i]->highest_lun; j++){
4068 drive_info_struct *drv = &(hba[i]->drv[j]);
4069 if (drv->queue)
4070 blk_cleanup_queue(drv->queue);
4071 }
4072 /* 4396 /*
4073 * Deliberately omit pci_disable_device(): it does something nasty to 4397 * Deliberately omit pci_disable_device(): it does something nasty to
4074 * Smart Array controllers that pci_enable_device does not undo 4398 * Smart Array controllers that pci_enable_device does not undo
@@ -4125,8 +4449,9 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
4125 return; 4449 return;
4126 } 4450 }
4127 4451
4128 kthread_stop(hba[i]->cciss_scan_thread); 4452 mutex_lock(&hba[i]->busy_shutting_down);
4129 4453
4454 remove_from_scan_list(hba[i]);
4130 remove_proc_entry(hba[i]->devname, proc_cciss); 4455 remove_proc_entry(hba[i]->devname, proc_cciss);
4131 unregister_blkdev(hba[i]->major, hba[i]->devname); 4456 unregister_blkdev(hba[i]->major, hba[i]->devname);
4132 4457
@@ -4136,8 +4461,10 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
4136 if (disk) { 4461 if (disk) {
4137 struct request_queue *q = disk->queue; 4462 struct request_queue *q = disk->queue;
4138 4463
4139 if (disk->flags & GENHD_FL_UP) 4464 if (disk->flags & GENHD_FL_UP) {
4465 cciss_destroy_ld_sysfs_entry(hba[i], j, 1);
4140 del_gendisk(disk); 4466 del_gendisk(disk);
4467 }
4141 if (q) 4468 if (q)
4142 blk_cleanup_queue(q); 4469 blk_cleanup_queue(q);
4143 } 4470 }
@@ -4170,6 +4497,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
4170 pci_release_regions(pdev); 4497 pci_release_regions(pdev);
4171 pci_set_drvdata(pdev, NULL); 4498 pci_set_drvdata(pdev, NULL);
4172 cciss_destroy_hba_sysfs_entry(hba[i]); 4499 cciss_destroy_hba_sysfs_entry(hba[i]);
4500 mutex_unlock(&hba[i]->busy_shutting_down);
4173 free_hba(i); 4501 free_hba(i);
4174} 4502}
4175 4503
@@ -4202,15 +4530,25 @@ static int __init cciss_init(void)
4202 if (err) 4530 if (err)
4203 return err; 4531 return err;
4204 4532
4533 /* Start the scan thread */
4534 cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan");
4535 if (IS_ERR(cciss_scan_thread)) {
4536 err = PTR_ERR(cciss_scan_thread);
4537 goto err_bus_unregister;
4538 }
4539
4205 /* Register for our PCI devices */ 4540 /* Register for our PCI devices */
4206 err = pci_register_driver(&cciss_pci_driver); 4541 err = pci_register_driver(&cciss_pci_driver);
4207 if (err) 4542 if (err)
4208 goto err_bus_register; 4543 goto err_thread_stop;
4209 4544
4210 return 0; 4545 return err;
4211 4546
4212err_bus_register: 4547err_thread_stop:
4548 kthread_stop(cciss_scan_thread);
4549err_bus_unregister:
4213 bus_unregister(&cciss_bus_type); 4550 bus_unregister(&cciss_bus_type);
4551
4214 return err; 4552 return err;
4215} 4553}
4216 4554
@@ -4227,6 +4565,7 @@ static void __exit cciss_cleanup(void)
4227 cciss_remove_one(hba[i]->pdev); 4565 cciss_remove_one(hba[i]->pdev);
4228 } 4566 }
4229 } 4567 }
4568 kthread_stop(cciss_scan_thread);
4230 remove_proc_entry("driver/cciss", NULL); 4569 remove_proc_entry("driver/cciss", NULL);
4231 bus_unregister(&cciss_bus_type); 4570 bus_unregister(&cciss_bus_type);
4232} 4571}
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 06a5db25b298..31524cf42c77 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -2,6 +2,7 @@
2#define CCISS_H 2#define CCISS_H
3 3
4#include <linux/genhd.h> 4#include <linux/genhd.h>
5#include <linux/mutex.h>
5 6
6#include "cciss_cmd.h" 7#include "cciss_cmd.h"
7 8
@@ -29,7 +30,7 @@ struct access_method {
29}; 30};
30typedef struct _drive_info_struct 31typedef struct _drive_info_struct
31{ 32{
32 __u32 LunID; 33 unsigned char LunID[8];
33 int usage_count; 34 int usage_count;
34 struct request_queue *queue; 35 struct request_queue *queue;
35 sector_t nr_blocks; 36 sector_t nr_blocks;
@@ -51,6 +52,7 @@ typedef struct _drive_info_struct
51 char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */ 52 char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
52 char model[MODEL_LEN + 1]; /* SCSI model string */ 53 char model[MODEL_LEN + 1]; /* SCSI model string */
53 char rev[REV_LEN + 1]; /* SCSI revision string */ 54 char rev[REV_LEN + 1]; /* SCSI revision string */
55 char device_initialized; /* indicates whether dev is initialized */
54} drive_info_struct; 56} drive_info_struct;
55 57
56struct ctlr_info 58struct ctlr_info
@@ -86,7 +88,7 @@ struct ctlr_info
86 BYTE cciss_read_capacity; 88 BYTE cciss_read_capacity;
87 89
88 // information about each logical volume 90 // information about each logical volume
89 drive_info_struct drv[CISS_MAX_LUN]; 91 drive_info_struct *drv[CISS_MAX_LUN];
90 92
91 struct access_method access; 93 struct access_method access;
92 94
@@ -108,6 +110,8 @@ struct ctlr_info
108 int nr_frees; 110 int nr_frees;
109 int busy_configuring; 111 int busy_configuring;
110 int busy_initializing; 112 int busy_initializing;
113 int busy_scanning;
114 struct mutex busy_shutting_down;
111 115
112 /* This element holds the zero based queue number of the last 116 /* This element holds the zero based queue number of the last
113 * queue to be started. It is used for fairness. 117 * queue to be started. It is used for fairness.
@@ -122,8 +126,8 @@ struct ctlr_info
122 /* and saved for later processing */ 126 /* and saved for later processing */
123#endif 127#endif
124 unsigned char alive; 128 unsigned char alive;
125 struct completion *rescan_wait; 129 struct list_head scan_list;
126 struct task_struct *cciss_scan_thread; 130 struct completion scan_wait;
127 struct device dev; 131 struct device dev;
128}; 132};
129 133
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index b82d438e2607..6422651ec364 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -32,6 +32,7 @@
32#include <linux/blkpg.h> 32#include <linux/blkpg.h>
33#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/proc_fs.h> 34#include <linux/proc_fs.h>
35#include <linux/seq_file.h>
35#include <linux/init.h> 36#include <linux/init.h>
36#include <linux/hdreg.h> 37#include <linux/hdreg.h>
37#include <linux/spinlock.h> 38#include <linux/spinlock.h>
@@ -177,7 +178,6 @@ static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
177 178
178#ifdef CONFIG_PROC_FS 179#ifdef CONFIG_PROC_FS
179static void ida_procinit(int i); 180static void ida_procinit(int i);
180static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
181#else 181#else
182static void ida_procinit(int i) {} 182static void ida_procinit(int i) {}
183#endif 183#endif
@@ -206,6 +206,7 @@ static const struct block_device_operations ida_fops = {
206#ifdef CONFIG_PROC_FS 206#ifdef CONFIG_PROC_FS
207 207
208static struct proc_dir_entry *proc_array; 208static struct proc_dir_entry *proc_array;
209static const struct file_operations ida_proc_fops;
209 210
210/* 211/*
211 * Get us a file in /proc/array that says something about each controller. 212 * Get us a file in /proc/array that says something about each controller.
@@ -218,19 +219,16 @@ static void __init ida_procinit(int i)
218 if (!proc_array) return; 219 if (!proc_array) return;
219 } 220 }
220 221
221 create_proc_read_entry(hba[i]->devname, 0, proc_array, 222 proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]);
222 ida_proc_get_info, hba[i]);
223} 223}
224 224
225/* 225/*
226 * Report information about this controller. 226 * Report information about this controller.
227 */ 227 */
228static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) 228static int ida_proc_show(struct seq_file *m, void *v)
229{ 229{
230 off_t pos = 0; 230 int i, ctlr;
231 off_t len = 0; 231 ctlr_info_t *h = (ctlr_info_t*)m->private;
232 int size, i, ctlr;
233 ctlr_info_t *h = (ctlr_info_t*)data;
234 drv_info_t *drv; 232 drv_info_t *drv;
235#ifdef CPQ_PROC_PRINT_QUEUES 233#ifdef CPQ_PROC_PRINT_QUEUES
236 cmdlist_t *c; 234 cmdlist_t *c;
@@ -238,7 +236,7 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt
238#endif 236#endif
239 237
240 ctlr = h->ctlr; 238 ctlr = h->ctlr;
241 size = sprintf(buffer, "%s: Compaq %s Controller\n" 239 seq_printf(m, "%s: Compaq %s Controller\n"
242 " Board ID: 0x%08lx\n" 240 " Board ID: 0x%08lx\n"
243 " Firmware Revision: %c%c%c%c\n" 241 " Firmware Revision: %c%c%c%c\n"
244 " Controller Sig: 0x%08lx\n" 242 " Controller Sig: 0x%08lx\n"
@@ -258,55 +256,54 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt
258 h->log_drives, h->phys_drives, 256 h->log_drives, h->phys_drives,
259 h->Qdepth, h->maxQsinceinit); 257 h->Qdepth, h->maxQsinceinit);
260 258
261 pos += size; len += size; 259 seq_puts(m, "Logical Drive Info:\n");
262
263 size = sprintf(buffer+len, "Logical Drive Info:\n");
264 pos += size; len += size;
265 260
266 for(i=0; i<h->log_drives; i++) { 261 for(i=0; i<h->log_drives; i++) {
267 drv = &h->drv[i]; 262 drv = &h->drv[i];
268 size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n", 263 seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
269 ctlr, i, drv->blk_size, drv->nr_blks); 264 ctlr, i, drv->blk_size, drv->nr_blks);
270 pos += size; len += size;
271 } 265 }
272 266
273#ifdef CPQ_PROC_PRINT_QUEUES 267#ifdef CPQ_PROC_PRINT_QUEUES
274 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); 268 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
275 size = sprintf(buffer+len, "\nCurrent Queues:\n"); 269 seq_puts(m, "\nCurrent Queues:\n");
276 pos += size; len += size;
277 270
278 c = h->reqQ; 271 c = h->reqQ;
279 size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size; 272 seq_printf(m, "reqQ = %p", c);
280 if (c) c=c->next; 273 if (c) c=c->next;
281 while(c && c != h->reqQ) { 274 while(c && c != h->reqQ) {
282 size = sprintf(buffer+len, "->%p", c); 275 seq_printf(m, "->%p", c);
283 pos += size; len += size;
284 c=c->next; 276 c=c->next;
285 } 277 }
286 278
287 c = h->cmpQ; 279 c = h->cmpQ;
288 size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size; 280 seq_printf(m, "\ncmpQ = %p", c);
289 if (c) c=c->next; 281 if (c) c=c->next;
290 while(c && c != h->cmpQ) { 282 while(c && c != h->cmpQ) {
291 size = sprintf(buffer+len, "->%p", c); 283 seq_printf(m, "->%p", c);
292 pos += size; len += size;
293 c=c->next; 284 c=c->next;
294 } 285 }
295 286
296 size = sprintf(buffer+len, "\n"); pos += size; len += size; 287 seq_putc(m, '\n');
297 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); 288 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
298#endif 289#endif
299 size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n", 290 seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n",
300 h->nr_allocs, h->nr_frees); 291 h->nr_allocs, h->nr_frees);
301 pos += size; len += size; 292 return 0;
302 293}
303 *eof = 1; 294
304 *start = buffer+offset; 295static int ida_proc_open(struct inode *inode, struct file *file)
305 len -= offset; 296{
306 if (len>length) 297 return single_open(file, ida_proc_show, PDE(inode)->data);
307 len = length;
308 return len;
309} 298}
299
300static const struct file_operations ida_proc_fops = {
301 .owner = THIS_MODULE,
302 .open = ida_proc_open,
303 .read = seq_read,
304 .llseek = seq_lseek,
305 .release = single_release,
306};
310#endif /* CONFIG_PROC_FS */ 307#endif /* CONFIG_PROC_FS */
311 308
312module_param_array(eisa, int, NULL, 0); 309module_param_array(eisa, int, NULL, 0);
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index 60ab75104da9..1c129211302d 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -217,7 +217,7 @@ static const struct agp_bridge_driver parisc_agp_driver = {
217 .configure = parisc_agp_configure, 217 .configure = parisc_agp_configure,
218 .fetch_size = parisc_agp_fetch_size, 218 .fetch_size = parisc_agp_fetch_size,
219 .tlb_flush = parisc_agp_tlbflush, 219 .tlb_flush = parisc_agp_tlbflush,
220 .mask_memory = parisc_agp_page_mask_memory, 220 .mask_memory = parisc_agp_mask_memory,
221 .masks = parisc_agp_masks, 221 .masks = parisc_agp_masks,
222 .agp_enable = parisc_agp_enable, 222 .agp_enable = parisc_agp_enable,
223 .cache_flush = global_cache_flush, 223 .cache_flush = global_cache_flush,
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index aaca40283be9..4f568cb9af3f 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -393,7 +393,7 @@ static int apm_open(struct inode * inode, struct file * filp)
393 return as ? 0 : -ENOMEM; 393 return as ? 0 : -ENOMEM;
394} 394}
395 395
396static struct file_operations apm_bios_fops = { 396static const struct file_operations apm_bios_fops = {
397 .owner = THIS_MODULE, 397 .owner = THIS_MODULE,
398 .read = apm_read, 398 .read = apm_read,
399 .poll = apm_poll, 399 .poll = apm_poll,
diff --git a/drivers/char/bfin-otp.c b/drivers/char/bfin-otp.c
index e3dd24bff514..836d4f0a876f 100644
--- a/drivers/char/bfin-otp.c
+++ b/drivers/char/bfin-otp.c
@@ -217,7 +217,7 @@ static long bfin_otp_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
217# define bfin_otp_ioctl NULL 217# define bfin_otp_ioctl NULL
218#endif 218#endif
219 219
220static struct file_operations bfin_otp_fops = { 220static const struct file_operations bfin_otp_fops = {
221 .owner = THIS_MODULE, 221 .owner = THIS_MODULE,
222 .unlocked_ioctl = bfin_otp_ioctl, 222 .unlocked_ioctl = bfin_otp_ioctl,
223 .read = bfin_otp_read, 223 .read = bfin_otp_read,
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index df5038bbcbc2..4254457d3911 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -3354,7 +3354,7 @@ static int __init cy_detect_isa(void)
3354 continue; 3354 continue;
3355 } 3355 }
3356#ifdef MODULE 3356#ifdef MODULE
3357 if (isparam && irq[i]) 3357 if (isparam && i < NR_CARDS && irq[i])
3358 cy_isa_irq = irq[i]; 3358 cy_isa_irq = irq[i];
3359 else 3359 else
3360#endif 3360#endif
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 52e06589821d..045c930e6320 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -56,6 +56,7 @@
56#include <linux/errno.h> /* for -EBUSY */ 56#include <linux/errno.h> /* for -EBUSY */
57#include <linux/ioport.h> /* for request_region */ 57#include <linux/ioport.h> /* for request_region */
58#include <linux/delay.h> /* for loops_per_jiffy */ 58#include <linux/delay.h> /* for loops_per_jiffy */
59#include <linux/sched.h>
59#include <linux/smp_lock.h> /* cycle_kernel_lock() */ 60#include <linux/smp_lock.h> /* cycle_kernel_lock() */
60#include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */ 61#include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */
61#include <asm/uaccess.h> /* for get_user, etc. */ 62#include <asm/uaccess.h> /* for get_user, etc. */
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 41fc11dc921c..65545de3dbf4 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -36,6 +36,7 @@
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <asm/system.h> 37#include <asm/system.h>
38#include <linux/poll.h> 38#include <linux/poll.h>
39#include <linux/sched.h>
39#include <linux/spinlock.h> 40#include <linux/spinlock.h>
40#include <linux/slab.h> 41#include <linux/slab.h>
41#include <linux/ipmi.h> 42#include <linux/ipmi.h>
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 09050797c76a..ec5e3f8df648 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -35,6 +35,7 @@
35#include <linux/errno.h> 35#include <linux/errno.h>
36#include <asm/system.h> 36#include <asm/system.h>
37#include <linux/poll.h> 37#include <linux/poll.h>
38#include <linux/sched.h>
38#include <linux/spinlock.h> 39#include <linux/spinlock.h>
39#include <linux/mutex.h> 40#include <linux/mutex.h>
40#include <linux/slab.h> 41#include <linux/slab.h>
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 5942a9d674c0..452370af95de 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -220,8 +220,7 @@ static inline int serial_paranoia_check(struct cyclades_port *info, char *name,
220 return 1; 220 return 1;
221 } 221 }
222 222
223 if ((long)info < (long)(&cy_port[0]) 223 if (info < &cy_port[0] || info >= &cy_port[NR_PORTS]) {
224 || (long)(&cy_port[NR_PORTS]) < (long)info) {
225 printk("Warning: cyclades_port out of range for (%s) in %s\n", 224 printk("Warning: cyclades_port out of range for (%s) in %s\n",
226 name, routine); 225 name, routine);
227 return 1; 226 return 1;
@@ -520,15 +519,13 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id)
520 panic("TxInt on debug port!!!"); 519 panic("TxInt on debug port!!!");
521 } 520 }
522#endif 521#endif
523
524 info = &cy_port[channel];
525
526 /* validate the port number (as configured and open) */ 522 /* validate the port number (as configured and open) */
527 if ((channel < 0) || (NR_PORTS <= channel)) { 523 if ((channel < 0) || (NR_PORTS <= channel)) {
528 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy); 524 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
529 base_addr[CyTEOIR] = CyNOTRANS; 525 base_addr[CyTEOIR] = CyNOTRANS;
530 return IRQ_HANDLED; 526 return IRQ_HANDLED;
531 } 527 }
528 info = &cy_port[channel];
532 info->last_active = jiffies; 529 info->last_active = jiffies;
533 if (info->tty == 0) { 530 if (info->tty == 0) {
534 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy); 531 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index aafdbaebc16a..feb55075819b 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -518,7 +518,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
518static int tty_ldisc_halt(struct tty_struct *tty) 518static int tty_ldisc_halt(struct tty_struct *tty)
519{ 519{
520 clear_bit(TTY_LDISC, &tty->flags); 520 clear_bit(TTY_LDISC, &tty->flags);
521 return cancel_delayed_work(&tty->buf.work); 521 return cancel_delayed_work_sync(&tty->buf.work);
522} 522}
523 523
524/** 524/**
@@ -756,12 +756,9 @@ void tty_ldisc_hangup(struct tty_struct *tty)
756 * N_TTY. 756 * N_TTY.
757 */ 757 */
758 if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) { 758 if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
759 /* Make sure the old ldisc is quiescent */
760 tty_ldisc_halt(tty);
761 flush_scheduled_work();
762
763 /* Avoid racing set_ldisc or tty_ldisc_release */ 759 /* Avoid racing set_ldisc or tty_ldisc_release */
764 mutex_lock(&tty->ldisc_mutex); 760 mutex_lock(&tty->ldisc_mutex);
761 tty_ldisc_halt(tty);
765 if (tty->ldisc) { /* Not yet closed */ 762 if (tty->ldisc) { /* Not yet closed */
766 /* Switch back to N_TTY */ 763 /* Switch back to N_TTY */
767 tty_ldisc_reinit(tty); 764 tty_ldisc_reinit(tty);
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 29c651ab0d78..6b36ee56e6fe 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -981,8 +981,10 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
981 goto eperm; 981 goto eperm;
982 982
983 if (copy_from_user(&vsa, (struct vt_setactivate __user *)arg, 983 if (copy_from_user(&vsa, (struct vt_setactivate __user *)arg,
984 sizeof(struct vt_setactivate))) 984 sizeof(struct vt_setactivate))) {
985 return -EFAULT; 985 ret = -EFAULT;
986 goto out;
987 }
986 if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) 988 if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
987 ret = -ENXIO; 989 ret = -ENXIO;
988 else { 990 else {
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index f40ab699860f..4846d50199f3 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -559,7 +559,7 @@ static int hwicap_release(struct inode *inode, struct file *file)
559 return status; 559 return status;
560} 560}
561 561
562static struct file_operations hwicap_fops = { 562static const struct file_operations hwicap_fops = {
563 .owner = THIS_MODULE, 563 .owner = THIS_MODULE,
564 .write = hwicap_write, 564 .write = hwicap_write,
565 .read = hwicap_read, 565 .read = hwicap_read,
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index abf4a2529f80..60697909ebdb 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -227,7 +227,8 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
227 * cn_proc_mcast_ctl 227 * cn_proc_mcast_ctl
228 * @data: message sent from userspace via the connector 228 * @data: message sent from userspace via the connector
229 */ 229 */
230static void cn_proc_mcast_ctl(struct cn_msg *msg) 230static void cn_proc_mcast_ctl(struct cn_msg *msg,
231 struct netlink_skb_parms *nsp)
231{ 232{
232 enum proc_cn_mcast_op *mc_op = NULL; 233 enum proc_cn_mcast_op *mc_op = NULL;
233 int err = 0; 234 int err = 0;
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 4a1dfe1f4ba9..210338ea222f 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -78,18 +78,20 @@ void cn_queue_wrapper(struct work_struct *work)
78 struct cn_callback_entry *cbq = 78 struct cn_callback_entry *cbq =
79 container_of(work, struct cn_callback_entry, work); 79 container_of(work, struct cn_callback_entry, work);
80 struct cn_callback_data *d = &cbq->data; 80 struct cn_callback_data *d = &cbq->data;
81 struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb));
82 struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb);
81 83
82 d->callback(d->callback_priv); 84 d->callback(msg, nsp);
83 85
84 d->destruct_data(d->ddata); 86 kfree_skb(d->skb);
85 d->ddata = NULL; 87 d->skb = NULL;
86 88
87 kfree(d->free); 89 kfree(d->free);
88} 90}
89 91
90static struct cn_callback_entry * 92static struct cn_callback_entry *
91cn_queue_alloc_callback_entry(char *name, struct cb_id *id, 93cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
92 void (*callback)(struct cn_msg *)) 94 void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
93{ 95{
94 struct cn_callback_entry *cbq; 96 struct cn_callback_entry *cbq;
95 97
@@ -123,7 +125,7 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
123} 125}
124 126
125int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, 127int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id,
126 void (*callback)(struct cn_msg *)) 128 void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
127{ 129{
128 struct cn_callback_entry *cbq, *__cbq; 130 struct cn_callback_entry *cbq, *__cbq;
129 int found = 0; 131 int found = 0;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 74f52af79563..f06024668f99 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -129,21 +129,19 @@ EXPORT_SYMBOL_GPL(cn_netlink_send);
129/* 129/*
130 * Callback helper - queues work and setup destructor for given data. 130 * Callback helper - queues work and setup destructor for given data.
131 */ 131 */
132static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), void *data) 132static int cn_call_callback(struct sk_buff *skb)
133{ 133{
134 struct cn_callback_entry *__cbq, *__new_cbq; 134 struct cn_callback_entry *__cbq, *__new_cbq;
135 struct cn_dev *dev = &cdev; 135 struct cn_dev *dev = &cdev;
136 struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb));
136 int err = -ENODEV; 137 int err = -ENODEV;
137 138
138 spin_lock_bh(&dev->cbdev->queue_lock); 139 spin_lock_bh(&dev->cbdev->queue_lock);
139 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { 140 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
140 if (cn_cb_equal(&__cbq->id.id, &msg->id)) { 141 if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
141 if (likely(!work_pending(&__cbq->work) && 142 if (likely(!work_pending(&__cbq->work) &&
142 __cbq->data.ddata == NULL)) { 143 __cbq->data.skb == NULL)) {
143 __cbq->data.callback_priv = msg; 144 __cbq->data.skb = skb;
144
145 __cbq->data.ddata = data;
146 __cbq->data.destruct_data = destruct_data;
147 145
148 if (queue_cn_work(__cbq, &__cbq->work)) 146 if (queue_cn_work(__cbq, &__cbq->work))
149 err = 0; 147 err = 0;
@@ -156,10 +154,8 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
156 __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC); 154 __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC);
157 if (__new_cbq) { 155 if (__new_cbq) {
158 d = &__new_cbq->data; 156 d = &__new_cbq->data;
159 d->callback_priv = msg; 157 d->skb = skb;
160 d->callback = __cbq->data.callback; 158 d->callback = __cbq->data.callback;
161 d->ddata = data;
162 d->destruct_data = destruct_data;
163 d->free = __new_cbq; 159 d->free = __new_cbq;
164 160
165 __new_cbq->pdev = __cbq->pdev; 161 __new_cbq->pdev = __cbq->pdev;
@@ -191,7 +187,6 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
191 */ 187 */
192static void cn_rx_skb(struct sk_buff *__skb) 188static void cn_rx_skb(struct sk_buff *__skb)
193{ 189{
194 struct cn_msg *msg;
195 struct nlmsghdr *nlh; 190 struct nlmsghdr *nlh;
196 int err; 191 int err;
197 struct sk_buff *skb; 192 struct sk_buff *skb;
@@ -208,8 +203,7 @@ static void cn_rx_skb(struct sk_buff *__skb)
208 return; 203 return;
209 } 204 }
210 205
211 msg = NLMSG_DATA(nlh); 206 err = cn_call_callback(skb);
212 err = cn_call_callback(msg, (void (*)(void *))kfree_skb, skb);
213 if (err < 0) 207 if (err < 0)
214 kfree_skb(skb); 208 kfree_skb(skb);
215 } 209 }
@@ -270,7 +264,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event)
270 * May sleep. 264 * May sleep.
271 */ 265 */
272int cn_add_callback(struct cb_id *id, char *name, 266int cn_add_callback(struct cb_id *id, char *name,
273 void (*callback)(struct cn_msg *)) 267 void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
274{ 268{
275 int err; 269 int err;
276 struct cn_dev *dev = &cdev; 270 struct cn_dev *dev = &cdev;
@@ -352,7 +346,7 @@ static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2)
352 * 346 *
353 * Used for notification of a request's processing. 347 * Used for notification of a request's processing.
354 */ 348 */
355static void cn_callback(struct cn_msg *msg) 349static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
356{ 350{
357 struct cn_ctl_msg *ctl; 351 struct cn_ctl_msg *ctl;
358 struct cn_ctl_entry *ent; 352 struct cn_ctl_entry *ent;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 4e551e63b6dc..4f4ac82382f7 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -15,8 +15,8 @@ module_param(ecc_enable_override, int, 0644);
15 15
16/* Lookup table for all possible MC control instances */ 16/* Lookup table for all possible MC control instances */
17struct amd64_pvt; 17struct amd64_pvt;
18static struct mem_ctl_info *mci_lookup[MAX_NUMNODES]; 18static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
19static struct amd64_pvt *pvt_lookup[MAX_NUMNODES]; 19static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
20 20
21/* 21/*
22 * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only 22 * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
@@ -189,7 +189,10 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
189/* Map from a CSROW entry to the mask entry that operates on it */ 189/* Map from a CSROW entry to the mask entry that operates on it */
190static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) 190static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
191{ 191{
192 return csrow >> (pvt->num_dcsm >> 3); 192 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F)
193 return csrow;
194 else
195 return csrow >> 1;
193} 196}
194 197
195/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ 198/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
@@ -279,29 +282,26 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
279 intlv_en = pvt->dram_IntlvEn[0]; 282 intlv_en = pvt->dram_IntlvEn[0];
280 283
281 if (intlv_en == 0) { 284 if (intlv_en == 0) {
282 for (node_id = 0; ; ) { 285 for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
283 if (amd64_base_limit_match(pvt, sys_addr, node_id)) 286 if (amd64_base_limit_match(pvt, sys_addr, node_id))
284 break; 287 goto found;
285
286 if (++node_id >= DRAM_REG_COUNT)
287 goto err_no_match;
288 } 288 }
289 goto found; 289 goto err_no_match;
290 } 290 }
291 291
292 if (unlikely((intlv_en != (0x01 << 8)) && 292 if (unlikely((intlv_en != 0x01) &&
293 (intlv_en != (0x03 << 8)) && 293 (intlv_en != 0x03) &&
294 (intlv_en != (0x07 << 8)))) { 294 (intlv_en != 0x07))) {
295 amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " 295 amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
296 "IntlvEn field of DRAM Base Register for node 0: " 296 "IntlvEn field of DRAM Base Register for node 0: "
297 "This probably indicates a BIOS bug.\n", intlv_en); 297 "this probably indicates a BIOS bug.\n", intlv_en);
298 return NULL; 298 return NULL;
299 } 299 }
300 300
301 bits = (((u32) sys_addr) >> 12) & intlv_en; 301 bits = (((u32) sys_addr) >> 12) & intlv_en;
302 302
303 for (node_id = 0; ; ) { 303 for (node_id = 0; ; ) {
304 if ((pvt->dram_limit[node_id] & intlv_en) == bits) 304 if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
305 break; /* intlv_sel field matches */ 305 break; /* intlv_sel field matches */
306 306
307 if (++node_id >= DRAM_REG_COUNT) 307 if (++node_id >= DRAM_REG_COUNT)
@@ -311,10 +311,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
311 /* sanity test for sys_addr */ 311 /* sanity test for sys_addr */
312 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { 312 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
313 amd64_printk(KERN_WARNING, 313 amd64_printk(KERN_WARNING,
314 "%s(): sys_addr 0x%lx falls outside base/limit " 314 "%s(): sys_addr 0x%llx falls outside base/limit "
315 "address range for node %d with node interleaving " 315 "address range for node %d with node interleaving "
316 "enabled.\n", __func__, (unsigned long)sys_addr, 316 "enabled.\n",
317 node_id); 317 __func__, sys_addr, node_id);
318 return NULL; 318 return NULL;
319 } 319 }
320 320
@@ -377,7 +377,7 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
377 * base/mask register pair, test the condition shown near the start of 377 * base/mask register pair, test the condition shown near the start of
378 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). 378 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
379 */ 379 */
380 for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { 380 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
381 381
382 /* This DRAM chip select is disabled on this node */ 382 /* This DRAM chip select is disabled on this node */
383 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) 383 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
@@ -734,7 +734,7 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
734 u64 base, mask; 734 u64 base, mask;
735 735
736 pvt = mci->pvt_info; 736 pvt = mci->pvt_info;
737 BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT)); 737 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
738 738
739 base = base_from_dct_base(pvt, csrow); 739 base = base_from_dct_base(pvt, csrow);
740 mask = mask_from_dct_mask(pvt, csrow); 740 mask = mask_from_dct_mask(pvt, csrow);
@@ -962,35 +962,27 @@ err_reg:
962 */ 962 */
963static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) 963static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
964{ 964{
965 if (pvt->ext_model >= OPTERON_CPU_REV_F) { 965
966 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) {
967 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
968 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
969 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
970 pvt->dcs_shift = REV_E_DCS_SHIFT;
971 pvt->cs_count = 8;
972 pvt->num_dcsm = 8;
973 } else {
966 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; 974 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
967 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; 975 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
968 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; 976 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
969 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; 977 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
970 978
971 switch (boot_cpu_data.x86) { 979 if (boot_cpu_data.x86 == 0x11) {
972 case 0xf: 980 pvt->cs_count = 4;
973 pvt->num_dcsm = REV_F_DCSM_COUNT; 981 pvt->num_dcsm = 2;
974 break; 982 } else {
975 983 pvt->cs_count = 8;
976 case 0x10: 984 pvt->num_dcsm = 4;
977 pvt->num_dcsm = F10_DCSM_COUNT;
978 break;
979
980 case 0x11:
981 pvt->num_dcsm = F11_DCSM_COUNT;
982 break;
983
984 default:
985 amd64_printk(KERN_ERR, "Unsupported family!\n");
986 break;
987 } 985 }
988 } else {
989 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
990 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
991 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
992 pvt->dcs_shift = REV_E_DCS_SHIFT;
993 pvt->num_dcsm = REV_E_DCSM_COUNT;
994 } 986 }
995} 987}
996 988
@@ -1003,7 +995,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
1003 995
1004 amd64_set_dct_base_and_mask(pvt); 996 amd64_set_dct_base_and_mask(pvt);
1005 997
1006 for (cs = 0; cs < CHIPSELECT_COUNT; cs++) { 998 for (cs = 0; cs < pvt->cs_count; cs++) {
1007 reg = K8_DCSB0 + (cs * 4); 999 reg = K8_DCSB0 + (cs * 4);
1008 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, 1000 err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
1009 &pvt->dcsb0[cs]); 1001 &pvt->dcsb0[cs]);
@@ -1130,7 +1122,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1130 debugf0("Reading K8_DRAM_BASE_LOW failed\n"); 1122 debugf0("Reading K8_DRAM_BASE_LOW failed\n");
1131 1123
1132 /* Extract parts into separate data entries */ 1124 /* Extract parts into separate data entries */
1133 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; 1125 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 24;
1134 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; 1126 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1135 pvt->dram_rw_en[dram] = (low & 0x3); 1127 pvt->dram_rw_en[dram] = (low & 0x3);
1136 1128
@@ -1143,7 +1135,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1143 * Extract parts into separate data entries. Limit is the HIGHEST memory 1135 * Extract parts into separate data entries. Limit is the HIGHEST memory
1144 * location of the region, so lower 24 bits need to be all ones 1136 * location of the region, so lower 24 bits need to be all ones
1145 */ 1137 */
1146 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; 1138 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 24) | 0x00FFFFFF;
1147 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; 1139 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1148 pvt->dram_DstNode[dram] = (low & 0x7); 1140 pvt->dram_DstNode[dram] = (low & 0x7);
1149} 1141}
@@ -1193,7 +1185,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1193 * different from the node that detected the error. 1185 * different from the node that detected the error.
1194 */ 1186 */
1195 src_mci = find_mc_by_sys_addr(mci, SystemAddress); 1187 src_mci = find_mc_by_sys_addr(mci, SystemAddress);
1196 if (src_mci) { 1188 if (!src_mci) {
1197 amd64_mc_printk(mci, KERN_ERR, 1189 amd64_mc_printk(mci, KERN_ERR,
1198 "failed to map error address 0x%lx to a node\n", 1190 "failed to map error address 0x%lx to a node\n",
1199 (unsigned long)SystemAddress); 1191 (unsigned long)SystemAddress);
@@ -1376,8 +1368,8 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1376 1368
1377 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; 1369 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1378 1370
1379 pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) | 1371 pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
1380 ((u64) low_base & 0xFFFF0000))) << 8; 1372 (((u64)low_base & 0xFFFF0000) << 24);
1381 1373
1382 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); 1374 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1383 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); 1375 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
@@ -1398,9 +1390,9 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1398 * Extract address values and form a LIMIT address. Limit is the HIGHEST 1390 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1399 * memory location of the region, so low 24 bits need to be all ones. 1391 * memory location of the region, so low 24 bits need to be all ones.
1400 */ 1392 */
1401 low_limit |= 0x0000FFFF; 1393 pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
1402 pvt->dram_limit[dram] = 1394 (((u64) low_limit & 0xFFFF0000) << 24) |
1403 ((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF); 1395 0x00FFFFFF;
1404} 1396}
1405 1397
1406static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) 1398static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
@@ -1566,7 +1558,7 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1566 1558
1567 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); 1559 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
1568 1560
1569 for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { 1561 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1570 1562
1571 cs_base = amd64_get_dct_base(pvt, cs, csrow); 1563 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1572 if (!(cs_base & K8_DCSB_CS_ENABLE)) 1564 if (!(cs_base & K8_DCSB_CS_ENABLE))
@@ -2497,7 +2489,7 @@ err_reg:
2497 * NOTE: CPU Revision Dependent code 2489 * NOTE: CPU Revision Dependent code
2498 * 2490 *
2499 * Input: 2491 * Input:
2500 * @csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1) 2492 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
2501 * k8 private pointer to --> 2493 * k8 private pointer to -->
2502 * DRAM Bank Address mapping register 2494 * DRAM Bank Address mapping register
2503 * node_id 2495 * node_id
@@ -2577,7 +2569,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
2577 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" 2569 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
2578 ); 2570 );
2579 2571
2580 for (i = 0; i < CHIPSELECT_COUNT; i++) { 2572 for (i = 0; i < pvt->cs_count; i++) {
2581 csrow = &mci->csrows[i]; 2573 csrow = &mci->csrows[i];
2582 2574
2583 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { 2575 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
@@ -2988,7 +2980,7 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
2988 goto err_exit; 2980 goto err_exit;
2989 2981
2990 ret = -ENOMEM; 2982 ret = -ENOMEM;
2991 mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id); 2983 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
2992 if (!mci) 2984 if (!mci)
2993 goto err_exit; 2985 goto err_exit;
2994 2986
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 8ea07e2715dc..c6f359a85207 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -132,6 +132,8 @@
132#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__ 132#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__
133#define EDAC_MOD_STR "amd64_edac" 133#define EDAC_MOD_STR "amd64_edac"
134 134
135#define EDAC_MAX_NUMNODES 8
136
135/* Extended Model from CPUID, for CPU Revision numbers */ 137/* Extended Model from CPUID, for CPU Revision numbers */
136#define OPTERON_CPU_LE_REV_C 0 138#define OPTERON_CPU_LE_REV_C 0
137#define OPTERON_CPU_REV_D 1 139#define OPTERON_CPU_REV_D 1
@@ -142,7 +144,7 @@
142#define OPTERON_CPU_REV_FA 5 144#define OPTERON_CPU_REV_FA 5
143 145
144/* Hardware limit on ChipSelect rows per MC and processors per system */ 146/* Hardware limit on ChipSelect rows per MC and processors per system */
145#define CHIPSELECT_COUNT 8 147#define MAX_CS_COUNT 8
146#define DRAM_REG_COUNT 8 148#define DRAM_REG_COUNT 8
147 149
148 150
@@ -193,7 +195,6 @@
193 */ 195 */
194#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL) 196#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
195#define REV_E_DCS_SHIFT 4 197#define REV_E_DCS_SHIFT 4
196#define REV_E_DCSM_COUNT 8
197 198
198#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL) 199#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
199#define REV_F_F1Xh_DCS_SHIFT 8 200#define REV_F_F1Xh_DCS_SHIFT 8
@@ -204,9 +205,6 @@
204 */ 205 */
205#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL) 206#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
206#define REV_F_DCS_SHIFT 8 207#define REV_F_DCS_SHIFT 8
207#define REV_F_DCSM_COUNT 4
208#define F10_DCSM_COUNT 4
209#define F11_DCSM_COUNT 2
210 208
211/* DRAM CS Mask Registers */ 209/* DRAM CS Mask Registers */
212#define K8_DCSM0 0x60 210#define K8_DCSM0 0x60
@@ -374,13 +372,11 @@ enum {
374 372
375#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ 373#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
376 (BIT(((word) & 0xF) + 20) | \ 374 (BIT(((word) & 0xF) + 20) | \
377 BIT(17) | \ 375 BIT(17) | bits)
378 ((bits) & 0xF))
379 376
380#define SET_NB_DRAM_INJECTION_READ(word, bits) \ 377#define SET_NB_DRAM_INJECTION_READ(word, bits) \
381 (BIT(((word) & 0xF) + 20) | \ 378 (BIT(((word) & 0xF) + 20) | \
382 BIT(16) | \ 379 BIT(16) | bits)
383 ((bits) & 0xF))
384 380
385#define K8_NBCAP 0xE8 381#define K8_NBCAP 0xE8
386#define K8_NBCAP_CORES (BIT(12)|BIT(13)) 382#define K8_NBCAP_CORES (BIT(12)|BIT(13))
@@ -445,12 +441,12 @@ struct amd64_pvt {
445 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ 441 u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
446 442
447 /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ 443 /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
448 u32 dcsb0[CHIPSELECT_COUNT]; 444 u32 dcsb0[MAX_CS_COUNT];
449 u32 dcsb1[CHIPSELECT_COUNT]; 445 u32 dcsb1[MAX_CS_COUNT];
450 446
451 /* DRAM CS Mask Registers F2x[1,0][6C:60] */ 447 /* DRAM CS Mask Registers F2x[1,0][6C:60] */
452 u32 dcsm0[CHIPSELECT_COUNT]; 448 u32 dcsm0[MAX_CS_COUNT];
453 u32 dcsm1[CHIPSELECT_COUNT]; 449 u32 dcsm1[MAX_CS_COUNT];
454 450
455 /* 451 /*
456 * Decoded parts of DRAM BASE and LIMIT Registers 452 * Decoded parts of DRAM BASE and LIMIT Registers
@@ -470,6 +466,7 @@ struct amd64_pvt {
470 */ 466 */
471 u32 dcsb_base; /* DCSB base bits */ 467 u32 dcsb_base; /* DCSB base bits */
472 u32 dcsm_mask; /* DCSM mask bits */ 468 u32 dcsm_mask; /* DCSM mask bits */
469 u32 cs_count; /* num chip selects (== num DCSB registers) */
473 u32 num_dcsm; /* Number of DCSM registers */ 470 u32 num_dcsm; /* Number of DCSM registers */
474 u32 dcs_mask_notused; /* DCSM notused mask bits */ 471 u32 dcs_mask_notused; /* DCSM notused mask bits */
475 u32 dcs_shift; /* DCSB and DCSM shift value */ 472 u32 dcs_shift; /* DCSB and DCSM shift value */
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index d3675b76b3a7..29f1f7a612d9 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -1,5 +1,11 @@
1#include "amd64_edac.h" 1#include "amd64_edac.h"
2 2
3static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf)
4{
5 struct amd64_pvt *pvt = mci->pvt_info;
6 return sprintf(buf, "0x%x\n", pvt->injection.section);
7}
8
3/* 9/*
4 * store error injection section value which refers to one of 4 16-byte sections 10 * store error injection section value which refers to one of 4 16-byte sections
5 * within a 64-byte cacheline 11 * within a 64-byte cacheline
@@ -15,12 +21,26 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
15 21
16 ret = strict_strtoul(data, 10, &value); 22 ret = strict_strtoul(data, 10, &value);
17 if (ret != -EINVAL) { 23 if (ret != -EINVAL) {
24
25 if (value > 3) {
26 amd64_printk(KERN_WARNING,
27 "%s: invalid section 0x%lx\n",
28 __func__, value);
29 return -EINVAL;
30 }
31
18 pvt->injection.section = (u32) value; 32 pvt->injection.section = (u32) value;
19 return count; 33 return count;
20 } 34 }
21 return ret; 35 return ret;
22} 36}
23 37
38static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf)
39{
40 struct amd64_pvt *pvt = mci->pvt_info;
41 return sprintf(buf, "0x%x\n", pvt->injection.word);
42}
43
24/* 44/*
25 * store error injection word value which refers to one of 9 16-bit word of the 45 * store error injection word value which refers to one of 9 16-bit word of the
26 * 16-byte (128-bit + ECC bits) section 46 * 16-byte (128-bit + ECC bits) section
@@ -37,14 +57,25 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
37 ret = strict_strtoul(data, 10, &value); 57 ret = strict_strtoul(data, 10, &value);
38 if (ret != -EINVAL) { 58 if (ret != -EINVAL) {
39 59
40 value = (value <= 8) ? value : 0; 60 if (value > 8) {
41 pvt->injection.word = (u32) value; 61 amd64_printk(KERN_WARNING,
62 "%s: invalid word 0x%lx\n",
63 __func__, value);
64 return -EINVAL;
65 }
42 66
67 pvt->injection.word = (u32) value;
43 return count; 68 return count;
44 } 69 }
45 return ret; 70 return ret;
46} 71}
47 72
73static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf)
74{
75 struct amd64_pvt *pvt = mci->pvt_info;
76 return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
77}
78
48/* 79/*
49 * store 16 bit error injection vector which enables injecting errors to the 80 * store 16 bit error injection vector which enables injecting errors to the
50 * corresponding bit within the error injection word above. When used during a 81 * corresponding bit within the error injection word above. When used during a
@@ -60,8 +91,14 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
60 ret = strict_strtoul(data, 16, &value); 91 ret = strict_strtoul(data, 16, &value);
61 if (ret != -EINVAL) { 92 if (ret != -EINVAL) {
62 93
63 pvt->injection.bit_map = (u32) value & 0xFFFF; 94 if (value & 0xFFFF0000) {
95 amd64_printk(KERN_WARNING,
96 "%s: invalid EccVector: 0x%lx\n",
97 __func__, value);
98 return -EINVAL;
99 }
64 100
101 pvt->injection.bit_map = (u32) value;
65 return count; 102 return count;
66 } 103 }
67 return ret; 104 return ret;
@@ -147,7 +184,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
147 .name = "inject_section", 184 .name = "inject_section",
148 .mode = (S_IRUGO | S_IWUSR) 185 .mode = (S_IRUGO | S_IWUSR)
149 }, 186 },
150 .show = NULL, 187 .show = amd64_inject_section_show,
151 .store = amd64_inject_section_store, 188 .store = amd64_inject_section_store,
152 }, 189 },
153 { 190 {
@@ -155,7 +192,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
155 .name = "inject_word", 192 .name = "inject_word",
156 .mode = (S_IRUGO | S_IWUSR) 193 .mode = (S_IRUGO | S_IWUSR)
157 }, 194 },
158 .show = NULL, 195 .show = amd64_inject_word_show,
159 .store = amd64_inject_word_store, 196 .store = amd64_inject_word_store,
160 }, 197 },
161 { 198 {
@@ -163,7 +200,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
163 .name = "inject_ecc_vector", 200 .name = "inject_ecc_vector",
164 .mode = (S_IRUGO | S_IWUSR) 201 .mode = (S_IRUGO | S_IWUSR)
165 }, 202 },
166 .show = NULL, 203 .show = amd64_inject_ecc_vector_show,
167 .store = amd64_inject_ecc_vector_store, 204 .store = amd64_inject_ecc_vector_store,
168 }, 205 },
169 { 206 {
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index ced186d7e9a9..5089331544ed 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -33,6 +33,7 @@
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/poll.h> 34#include <linux/poll.h>
35#include <linux/preempt.h> 35#include <linux/preempt.h>
36#include <linux/sched.h>
36#include <linux/spinlock.h> 37#include <linux/spinlock.h>
37#include <linux/time.h> 38#include <linux/time.h>
38#include <linux/uaccess.h> 39#include <linux/uaccess.h>
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 420a96e7f2db..051d1ebbd287 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -939,7 +939,7 @@ static int __init ibft_init(void)
939 939
940 if (ibft_addr) { 940 if (ibft_addr) {
941 printk(KERN_INFO "iBFT detected at 0x%llx.\n", 941 printk(KERN_INFO "iBFT detected at 0x%llx.\n",
942 (u64)virt_to_phys((void *)ibft_addr)); 942 (u64)isa_virt_to_bus(ibft_addr));
943 943
944 rc = ibft_check_device(); 944 rc = ibft_check_device();
945 if (rc) 945 if (rc)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index d53fbbfefa3e..dfb15c06c88f 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -65,10 +65,10 @@ void __init reserve_ibft_region(void)
65 * so skip that area */ 65 * so skip that area */
66 if (pos == VGA_MEM) 66 if (pos == VGA_MEM)
67 pos += VGA_SIZE; 67 pos += VGA_SIZE;
68 virt = phys_to_virt(pos); 68 virt = isa_bus_to_virt(pos);
69 if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) { 69 if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) {
70 unsigned long *addr = 70 unsigned long *addr =
71 (unsigned long *)phys_to_virt(pos + 4); 71 (unsigned long *)isa_bus_to_virt(pos + 4);
72 len = *addr; 72 len = *addr;
73 /* if the length of the table extends past 1M, 73 /* if the length of the table extends past 1M,
74 * the table cannot be valid. */ 74 * the table cannot be valid. */
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index bb11a429394a..662ed923d9eb 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1487,7 +1487,7 @@ static int gpiolib_open(struct inode *inode, struct file *file)
1487 return single_open(file, gpiolib_show, NULL); 1487 return single_open(file, gpiolib_show, NULL);
1488} 1488}
1489 1489
1490static struct file_operations gpiolib_operations = { 1490static const struct file_operations gpiolib_operations = {
1491 .open = gpiolib_open, 1491 .open = gpiolib_open,
1492 .read = seq_read, 1492 .read = seq_read,
1493 .llseek = seq_lseek, 1493 .llseek = seq_lseek,
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 8e7b0ebece0c..5cae0b3eee9b 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1556,8 +1556,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
1556 struct drm_crtc *crtc; 1556 struct drm_crtc *crtc;
1557 int ret = 0; 1557 int ret = 0;
1558 1558
1559 DRM_DEBUG_KMS("\n");
1560
1561 if (!req->flags) { 1559 if (!req->flags) {
1562 DRM_ERROR("no operation set\n"); 1560 DRM_ERROR("no operation set\n");
1563 return -EINVAL; 1561 return -EINVAL;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 819ddcbfcce5..23dc9c115fd9 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -454,6 +454,96 @@ out_free:
454} 454}
455EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); 455EXPORT_SYMBOL(drm_fb_helper_init_crtc_count);
456 456
457static void setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
458 u16 blue, u16 regno, struct fb_info *info)
459{
460 struct drm_fb_helper *fb_helper = info->par;
461 struct drm_framebuffer *fb = fb_helper->fb;
462 int pindex;
463
464 pindex = regno;
465
466 if (fb->bits_per_pixel == 16) {
467 pindex = regno << 3;
468
469 if (fb->depth == 16 && regno > 63)
470 return;
471 if (fb->depth == 15 && regno > 31)
472 return;
473
474 if (fb->depth == 16) {
475 u16 r, g, b;
476 int i;
477 if (regno < 32) {
478 for (i = 0; i < 8; i++)
479 fb_helper->funcs->gamma_set(crtc, red,
480 green, blue, pindex + i);
481 }
482
483 fb_helper->funcs->gamma_get(crtc, &r,
484 &g, &b,
485 pindex >> 1);
486
487 for (i = 0; i < 4; i++)
488 fb_helper->funcs->gamma_set(crtc, r,
489 green, b,
490 (pindex >> 1) + i);
491 }
492 }
493
494 if (fb->depth != 16)
495 fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
496
497 if (regno < 16 && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
498 ((u32 *) fb->pseudo_palette)[regno] =
499 (regno << info->var.red.offset) |
500 (regno << info->var.green.offset) |
501 (regno << info->var.blue.offset);
502 }
503}
504
505int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
506{
507 struct drm_fb_helper *fb_helper = info->par;
508 struct drm_device *dev = fb_helper->dev;
509 u16 *red, *green, *blue, *transp;
510 struct drm_crtc *crtc;
511 int i, rc = 0;
512 int start;
513
514 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
515 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
516 for (i = 0; i < fb_helper->crtc_count; i++) {
517 if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
518 break;
519 }
520 if (i == fb_helper->crtc_count)
521 continue;
522
523 red = cmap->red;
524 green = cmap->green;
525 blue = cmap->blue;
526 transp = cmap->transp;
527 start = cmap->start;
528
529 for (i = 0; i < cmap->len; i++) {
530 u16 hred, hgreen, hblue, htransp = 0xffff;
531
532 hred = *red++;
533 hgreen = *green++;
534 hblue = *blue++;
535
536 if (transp)
537 htransp = *transp++;
538
539 setcolreg(crtc, hred, hgreen, hblue, start++, info);
540 }
541 crtc_funcs->load_lut(crtc);
542 }
543 return rc;
544}
545EXPORT_SYMBOL(drm_fb_helper_setcmap);
546
457int drm_fb_helper_setcolreg(unsigned regno, 547int drm_fb_helper_setcolreg(unsigned regno,
458 unsigned red, 548 unsigned red,
459 unsigned green, 549 unsigned green,
@@ -466,9 +556,11 @@ int drm_fb_helper_setcolreg(unsigned regno,
466 struct drm_crtc *crtc; 556 struct drm_crtc *crtc;
467 int i; 557 int i;
468 558
469 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 559 if (regno > 255)
470 struct drm_framebuffer *fb = fb_helper->fb; 560 return 1;
471 561
562 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
563 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
472 for (i = 0; i < fb_helper->crtc_count; i++) { 564 for (i = 0; i < fb_helper->crtc_count; i++) {
473 if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) 565 if (crtc->base.id == fb_helper->crtc_info[i].crtc_id)
474 break; 566 break;
@@ -476,35 +568,9 @@ int drm_fb_helper_setcolreg(unsigned regno,
476 if (i == fb_helper->crtc_count) 568 if (i == fb_helper->crtc_count)
477 continue; 569 continue;
478 570
479 if (regno > 255)
480 return 1;
481
482 if (fb->depth == 8) {
483 fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
484 return 0;
485 }
486 571
487 if (regno < 16) { 572 setcolreg(crtc, red, green, blue, regno, info);
488 switch (fb->depth) { 573 crtc_funcs->load_lut(crtc);
489 case 15:
490 fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) |
491 ((green & 0xf800) >> 6) |
492 ((blue & 0xf800) >> 11);
493 break;
494 case 16:
495 fb->pseudo_palette[regno] = (red & 0xf800) |
496 ((green & 0xfc00) >> 5) |
497 ((blue & 0xf800) >> 11);
498 break;
499 case 24:
500 case 32:
501 fb->pseudo_palette[regno] =
502 (((red >> 8) & 0xff) << info->var.red.offset) |
503 (((green >> 8) & 0xff) << info->var.green.offset) |
504 (((blue >> 8) & 0xff) << info->var.blue.offset);
505 break;
506 }
507 }
508 } 574 }
509 return 0; 575 return 0;
510} 576}
@@ -674,6 +740,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
674EXPORT_SYMBOL(drm_fb_helper_pan_display); 740EXPORT_SYMBOL(drm_fb_helper_pan_display);
675 741
676int drm_fb_helper_single_fb_probe(struct drm_device *dev, 742int drm_fb_helper_single_fb_probe(struct drm_device *dev,
743 int preferred_bpp,
677 int (*fb_create)(struct drm_device *dev, 744 int (*fb_create)(struct drm_device *dev,
678 uint32_t fb_width, 745 uint32_t fb_width,
679 uint32_t fb_height, 746 uint32_t fb_height,
@@ -696,6 +763,11 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
696 struct drm_fb_helper *fb_helper; 763 struct drm_fb_helper *fb_helper;
697 uint32_t surface_depth = 24, surface_bpp = 32; 764 uint32_t surface_depth = 24, surface_bpp = 32;
698 765
766 /* if driver picks 8 or 16 by default use that
767 for both depth/bpp */
768 if (preferred_bpp != surface_bpp) {
769 surface_depth = surface_bpp = preferred_bpp;
770 }
699 /* first up get a count of crtcs now in use and new min/maxes width/heights */ 771 /* first up get a count of crtcs now in use and new min/maxes width/heights */
700 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 772 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
701 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; 773 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
@@ -851,10 +923,12 @@ void drm_fb_helper_free(struct drm_fb_helper *helper)
851} 923}
852EXPORT_SYMBOL(drm_fb_helper_free); 924EXPORT_SYMBOL(drm_fb_helper_free);
853 925
854void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch) 926void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
927 uint32_t depth)
855{ 928{
856 info->fix.type = FB_TYPE_PACKED_PIXELS; 929 info->fix.type = FB_TYPE_PACKED_PIXELS;
857 info->fix.visual = FB_VISUAL_TRUECOLOR; 930 info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
931 FB_VISUAL_DIRECTCOLOR;
858 info->fix.type_aux = 0; 932 info->fix.type_aux = 0;
859 info->fix.xpanstep = 1; /* doing it in hw */ 933 info->fix.xpanstep = 1; /* doing it in hw */
860 info->fix.ypanstep = 1; /* doing it in hw */ 934 info->fix.ypanstep = 1; /* doing it in hw */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 93ff6c03733e..ffa39671751f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3244,6 +3244,16 @@ void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
3244 intel_crtc->lut_b[regno] = blue >> 8; 3244 intel_crtc->lut_b[regno] = blue >> 8;
3245} 3245}
3246 3246
3247void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
3248 u16 *blue, int regno)
3249{
3250 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3251
3252 *red = intel_crtc->lut_r[regno] << 8;
3253 *green = intel_crtc->lut_g[regno] << 8;
3254 *blue = intel_crtc->lut_b[regno] << 8;
3255}
3256
3247static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 3257static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
3248 u16 *blue, uint32_t size) 3258 u16 *blue, uint32_t size)
3249{ 3259{
@@ -3835,6 +3845,7 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = {
3835 .mode_set_base = intel_pipe_set_base, 3845 .mode_set_base = intel_pipe_set_base,
3836 .prepare = intel_crtc_prepare, 3846 .prepare = intel_crtc_prepare,
3837 .commit = intel_crtc_commit, 3847 .commit = intel_crtc_commit,
3848 .load_lut = intel_crtc_load_lut,
3838}; 3849};
3839 3850
3840static const struct drm_crtc_funcs intel_crtc_funcs = { 3851static const struct drm_crtc_funcs intel_crtc_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8aa4b7f30daa..ef61fe9507e2 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -175,6 +175,8 @@ extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc);
175extern void intelfb_restore(void); 175extern void intelfb_restore(void);
176extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 176extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
177 u16 blue, int regno); 177 u16 blue, int regno);
178extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
179 u16 *blue, int regno);
178 180
179extern int intel_framebuffer_create(struct drm_device *dev, 181extern int intel_framebuffer_create(struct drm_device *dev,
180 struct drm_mode_fb_cmd *mode_cmd, 182 struct drm_mode_fb_cmd *mode_cmd,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index e85d7e9eed7d..2b0fe54cd92c 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -60,10 +60,12 @@ static struct fb_ops intelfb_ops = {
60 .fb_imageblit = cfb_imageblit, 60 .fb_imageblit = cfb_imageblit,
61 .fb_pan_display = drm_fb_helper_pan_display, 61 .fb_pan_display = drm_fb_helper_pan_display,
62 .fb_blank = drm_fb_helper_blank, 62 .fb_blank = drm_fb_helper_blank,
63 .fb_setcmap = drm_fb_helper_setcmap,
63}; 64};
64 65
65static struct drm_fb_helper_funcs intel_fb_helper_funcs = { 66static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
66 .gamma_set = intel_crtc_fb_gamma_set, 67 .gamma_set = intel_crtc_fb_gamma_set,
68 .gamma_get = intel_crtc_fb_gamma_get,
67}; 69};
68 70
69 71
@@ -123,6 +125,10 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
123 struct device *device = &dev->pdev->dev; 125 struct device *device = &dev->pdev->dev;
124 int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; 126 int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
125 127
128 /* we don't do packed 24bpp */
129 if (surface_bpp == 24)
130 surface_bpp = 32;
131
126 mode_cmd.width = surface_width; 132 mode_cmd.width = surface_width;
127 mode_cmd.height = surface_height; 133 mode_cmd.height = surface_height;
128 134
@@ -206,7 +212,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
206 212
207// memset(info->screen_base, 0, size); 213// memset(info->screen_base, 0, size);
208 214
209 drm_fb_helper_fill_fix(info, fb->pitch); 215 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
210 drm_fb_helper_fill_var(info, fb, fb_width, fb_height); 216 drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
211 217
212 /* FIXME: we really shouldn't expose mmio space at all */ 218 /* FIXME: we really shouldn't expose mmio space at all */
@@ -244,7 +250,7 @@ int intelfb_probe(struct drm_device *dev)
244 int ret; 250 int ret;
245 251
246 DRM_DEBUG("\n"); 252 DRM_DEBUG("\n");
247 ret = drm_fb_helper_single_fb_probe(dev, intelfb_create); 253 ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
248 return ret; 254 return ret;
249} 255}
250EXPORT_SYMBOL(intelfb_probe); 256EXPORT_SYMBOL(intelfb_probe);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 6a015929deee..14fa9701aeb3 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -733,6 +733,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
733 .mode_set_base = atombios_crtc_set_base, 733 .mode_set_base = atombios_crtc_set_base,
734 .prepare = atombios_crtc_prepare, 734 .prepare = atombios_crtc_prepare,
735 .commit = atombios_crtc_commit, 735 .commit = atombios_crtc_commit,
736 .load_lut = radeon_crtc_load_lut,
736}; 737};
737 738
738void radeon_atombios_init_crtc(struct drm_device *dev, 739void radeon_atombios_init_crtc(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index e6cce24de802..161094c07d94 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -32,6 +32,9 @@
32#include "radeon_reg.h" 32#include "radeon_reg.h"
33#include "radeon.h" 33#include "radeon.h"
34#include "r100d.h" 34#include "r100d.h"
35#include "rs100d.h"
36#include "rv200d.h"
37#include "rv250d.h"
35 38
36#include <linux/firmware.h> 39#include <linux/firmware.h>
37#include <linux/platform_device.h> 40#include <linux/platform_device.h>
@@ -60,18 +63,7 @@ MODULE_FIRMWARE(FIRMWARE_R520);
60 63
61/* This files gather functions specifics to: 64/* This files gather functions specifics to:
62 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 65 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
63 *
64 * Some of these functions might be used by newer ASICs.
65 */ 66 */
66int r200_init(struct radeon_device *rdev);
67void r100_hdp_reset(struct radeon_device *rdev);
68void r100_gpu_init(struct radeon_device *rdev);
69int r100_gui_wait_for_idle(struct radeon_device *rdev);
70int r100_mc_wait_for_idle(struct radeon_device *rdev);
71void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
72void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
73int r100_debugfs_mc_info_init(struct radeon_device *rdev);
74
75 67
76/* 68/*
77 * PCI GART 69 * PCI GART
@@ -152,136 +144,6 @@ void r100_pci_gart_fini(struct radeon_device *rdev)
152 radeon_gart_fini(rdev); 144 radeon_gart_fini(rdev);
153} 145}
154 146
155
156/*
157 * MC
158 */
159void r100_mc_disable_clients(struct radeon_device *rdev)
160{
161 uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
162
163 /* FIXME: is this function correct for rs100,rs200,rs300 ? */
164 if (r100_gui_wait_for_idle(rdev)) {
165 printk(KERN_WARNING "Failed to wait GUI idle while "
166 "programming pipes. Bad things might happen.\n");
167 }
168
169 /* stop display and memory access */
170 ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
171 WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
172 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
173 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
174 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
175
176 r100_gpu_wait_for_vsync(rdev);
177
178 WREG32(RADEON_CRTC_GEN_CNTL,
179 (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
180 RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
181
182 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
183 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
184
185 r100_gpu_wait_for_vsync2(rdev);
186 WREG32(RADEON_CRTC2_GEN_CNTL,
187 (crtc2_gen_cntl &
188 ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
189 RADEON_CRTC2_DISP_REQ_EN_B);
190 }
191
192 udelay(500);
193}
194
195void r100_mc_setup(struct radeon_device *rdev)
196{
197 uint32_t tmp;
198 int r;
199
200 r = r100_debugfs_mc_info_init(rdev);
201 if (r) {
202 DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
203 }
204 /* Write VRAM size in case we are limiting it */
205 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
206 /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM,
207 * if the aperture is 64MB but we have 32MB VRAM
208 * we report only 32MB VRAM but we have to set MC_FB_LOCATION
209 * to 64MB, otherwise the gpu accidentially dies */
210 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
211 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
212 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
213 WREG32(RADEON_MC_FB_LOCATION, tmp);
214
215 /* Enable bus mastering */
216 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
217 WREG32(RADEON_BUS_CNTL, tmp);
218
219 if (rdev->flags & RADEON_IS_AGP) {
220 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
221 tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
222 tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
223 WREG32(RADEON_MC_AGP_LOCATION, tmp);
224 WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
225 } else {
226 WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
227 WREG32(RADEON_AGP_BASE, 0);
228 }
229
230 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
231 tmp |= (7 << 28);
232 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
233 (void)RREG32(RADEON_HOST_PATH_CNTL);
234 WREG32(RADEON_HOST_PATH_CNTL, tmp);
235 (void)RREG32(RADEON_HOST_PATH_CNTL);
236}
237
238int r100_mc_init(struct radeon_device *rdev)
239{
240 int r;
241
242 if (r100_debugfs_rbbm_init(rdev)) {
243 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
244 }
245
246 r100_gpu_init(rdev);
247 /* Disable gart which also disable out of gart access */
248 r100_pci_gart_disable(rdev);
249
250 /* Setup GPU memory space */
251 rdev->mc.gtt_location = 0xFFFFFFFFUL;
252 if (rdev->flags & RADEON_IS_AGP) {
253 r = radeon_agp_init(rdev);
254 if (r) {
255 printk(KERN_WARNING "[drm] Disabling AGP\n");
256 rdev->flags &= ~RADEON_IS_AGP;
257 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
258 } else {
259 rdev->mc.gtt_location = rdev->mc.agp_base;
260 }
261 }
262 r = radeon_mc_setup(rdev);
263 if (r) {
264 return r;
265 }
266
267 r100_mc_disable_clients(rdev);
268 if (r100_mc_wait_for_idle(rdev)) {
269 printk(KERN_WARNING "Failed to wait MC idle while "
270 "programming pipes. Bad things might happen.\n");
271 }
272
273 r100_mc_setup(rdev);
274 return 0;
275}
276
277void r100_mc_fini(struct radeon_device *rdev)
278{
279}
280
281
282/*
283 * Interrupts
284 */
285int r100_irq_set(struct radeon_device *rdev) 147int r100_irq_set(struct radeon_device *rdev)
286{ 148{
287 uint32_t tmp = 0; 149 uint32_t tmp = 0;
@@ -358,10 +220,6 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
358 return RREG32(RADEON_CRTC2_CRNT_FRAME); 220 return RREG32(RADEON_CRTC2_CRNT_FRAME);
359} 221}
360 222
361
362/*
363 * Fence emission
364 */
365void r100_fence_ring_emit(struct radeon_device *rdev, 223void r100_fence_ring_emit(struct radeon_device *rdev,
366 struct radeon_fence *fence) 224 struct radeon_fence *fence)
367{ 225{
@@ -377,10 +235,6 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
377 radeon_ring_write(rdev, RADEON_SW_INT_FIRE); 235 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
378} 236}
379 237
380
381/*
382 * Writeback
383 */
384int r100_wb_init(struct radeon_device *rdev) 238int r100_wb_init(struct radeon_device *rdev)
385{ 239{
386 int r; 240 int r;
@@ -504,10 +358,6 @@ int r100_copy_blit(struct radeon_device *rdev,
504 return r; 358 return r;
505} 359}
506 360
507
508/*
509 * CP
510 */
511static int r100_cp_wait_for_idle(struct radeon_device *rdev) 361static int r100_cp_wait_for_idle(struct radeon_device *rdev)
512{ 362{
513 unsigned i; 363 unsigned i;
@@ -612,6 +462,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
612 } 462 }
613 return err; 463 return err;
614} 464}
465
615static void r100_cp_load_microcode(struct radeon_device *rdev) 466static void r100_cp_load_microcode(struct radeon_device *rdev)
616{ 467{
617 const __be32 *fw_data; 468 const __be32 *fw_data;
@@ -978,7 +829,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
978 829
979 header = radeon_get_ib_value(p, h_idx); 830 header = radeon_get_ib_value(p, h_idx);
980 crtc_id = radeon_get_ib_value(p, h_idx + 5); 831 crtc_id = radeon_get_ib_value(p, h_idx + 5);
981 reg = header >> 2; 832 reg = CP_PACKET0_GET_REG(header);
982 mutex_lock(&p->rdev->ddev->mode_config.mutex); 833 mutex_lock(&p->rdev->ddev->mode_config.mutex);
983 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 834 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
984 if (!obj) { 835 if (!obj) {
@@ -1990,7 +1841,7 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1990 r100_pll_errata_after_data(rdev); 1841 r100_pll_errata_after_data(rdev);
1991} 1842}
1992 1843
1993int r100_init(struct radeon_device *rdev) 1844void r100_set_safe_registers(struct radeon_device *rdev)
1994{ 1845{
1995 if (ASIC_IS_RN50(rdev)) { 1846 if (ASIC_IS_RN50(rdev)) {
1996 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; 1847 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
@@ -1999,9 +1850,8 @@ int r100_init(struct radeon_device *rdev)
1999 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; 1850 rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
2000 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); 1851 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
2001 } else { 1852 } else {
2002 return r200_init(rdev); 1853 r200_set_safe_registers(rdev);
2003 } 1854 }
2004 return 0;
2005} 1855}
2006 1856
2007/* 1857/*
@@ -2299,9 +2149,11 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2299 mode1 = &rdev->mode_info.crtcs[0]->base.mode; 2149 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
2300 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; 2150 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
2301 } 2151 }
2302 if (rdev->mode_info.crtcs[1]->base.enabled) { 2152 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
2303 mode2 = &rdev->mode_info.crtcs[1]->base.mode; 2153 if (rdev->mode_info.crtcs[1]->base.enabled) {
2304 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; 2154 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
2155 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
2156 }
2305 } 2157 }
2306 2158
2307 min_mem_eff.full = rfixed_const_8(0); 2159 min_mem_eff.full = rfixed_const_8(0);
@@ -3114,7 +2966,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3114 WREG32(R_000740_CP_CSQ_CNTL, 0); 2966 WREG32(R_000740_CP_CSQ_CNTL, 0);
3115 2967
3116 /* Save few CRTC registers */ 2968 /* Save few CRTC registers */
3117 save->GENMO_WT = RREG32(R_0003C0_GENMO_WT); 2969 save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
3118 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); 2970 save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
3119 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); 2971 save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
3120 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); 2972 save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
@@ -3124,7 +2976,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3124 } 2976 }
3125 2977
3126 /* Disable VGA aperture access */ 2978 /* Disable VGA aperture access */
3127 WREG32(R_0003C0_GENMO_WT, C_0003C0_VGA_RAM_EN & save->GENMO_WT); 2979 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
3128 /* Disable cursor, overlay, crtc */ 2980 /* Disable cursor, overlay, crtc */
3129 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); 2981 WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
3130 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | 2982 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
@@ -3156,10 +3008,264 @@ void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
3156 rdev->mc.vram_location); 3008 rdev->mc.vram_location);
3157 } 3009 }
3158 /* Restore CRTC registers */ 3010 /* Restore CRTC registers */
3159 WREG32(R_0003C0_GENMO_WT, save->GENMO_WT); 3011 WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
3160 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); 3012 WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
3161 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); 3013 WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
3162 if (!(rdev->flags & RADEON_SINGLE_CRTC)) { 3014 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3163 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); 3015 WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
3164 } 3016 }
3165} 3017}
3018
3019void r100_vga_render_disable(struct radeon_device *rdev)
3020{
3021 u32 tmp;
3022
3023 tmp = RREG8(R_0003C2_GENMO_WT);
3024 WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
3025}
3026
3027static void r100_debugfs(struct radeon_device *rdev)
3028{
3029 int r;
3030
3031 r = r100_debugfs_mc_info_init(rdev);
3032 if (r)
3033 dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
3034}
3035
3036static void r100_mc_program(struct radeon_device *rdev)
3037{
3038 struct r100_mc_save save;
3039
3040 /* Stops all mc clients */
3041 r100_mc_stop(rdev, &save);
3042 if (rdev->flags & RADEON_IS_AGP) {
3043 WREG32(R_00014C_MC_AGP_LOCATION,
3044 S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
3045 S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
3046 WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
3047 if (rdev->family > CHIP_RV200)
3048 WREG32(R_00015C_AGP_BASE_2,
3049 upper_32_bits(rdev->mc.agp_base) & 0xff);
3050 } else {
3051 WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
3052 WREG32(R_000170_AGP_BASE, 0);
3053 if (rdev->family > CHIP_RV200)
3054 WREG32(R_00015C_AGP_BASE_2, 0);
3055 }
3056 /* Wait for mc idle */
3057 if (r100_mc_wait_for_idle(rdev))
3058 dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
3059 /* Program MC, should be a 32bits limited address space */
3060 WREG32(R_000148_MC_FB_LOCATION,
3061 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
3062 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
3063 r100_mc_resume(rdev, &save);
3064}
3065
3066void r100_clock_startup(struct radeon_device *rdev)
3067{
3068 u32 tmp;
3069
3070 if (radeon_dynclks != -1 && radeon_dynclks)
3071 radeon_legacy_set_clock_gating(rdev, 1);
3072 /* We need to force on some of the block */
3073 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
3074 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
3075 if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
3076 tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
3077 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
3078}
3079
3080static int r100_startup(struct radeon_device *rdev)
3081{
3082 int r;
3083
3084 r100_mc_program(rdev);
3085 /* Resume clock */
3086 r100_clock_startup(rdev);
3087 /* Initialize GPU configuration (# pipes, ...) */
3088 r100_gpu_init(rdev);
3089 /* Initialize GART (initialize after TTM so we can allocate
3090 * memory through TTM but finalize after TTM) */
3091 if (rdev->flags & RADEON_IS_PCI) {
3092 r = r100_pci_gart_enable(rdev);
3093 if (r)
3094 return r;
3095 }
3096 /* Enable IRQ */
3097 rdev->irq.sw_int = true;
3098 r100_irq_set(rdev);
3099 /* 1M ring buffer */
3100 r = r100_cp_init(rdev, 1024 * 1024);
3101 if (r) {
3102 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
3103 return r;
3104 }
3105 r = r100_wb_init(rdev);
3106 if (r)
3107 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
3108 r = r100_ib_init(rdev);
3109 if (r) {
3110 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
3111 return r;
3112 }
3113 return 0;
3114}
3115
3116int r100_resume(struct radeon_device *rdev)
3117{
3118 /* Make sur GART are not working */
3119 if (rdev->flags & RADEON_IS_PCI)
3120 r100_pci_gart_disable(rdev);
3121 /* Resume clock before doing reset */
3122 r100_clock_startup(rdev);
3123 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3124 if (radeon_gpu_reset(rdev)) {
3125 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3126 RREG32(R_000E40_RBBM_STATUS),
3127 RREG32(R_0007C0_CP_STAT));
3128 }
3129 /* post */
3130 radeon_combios_asic_init(rdev->ddev);
3131 /* Resume clock after posting */
3132 r100_clock_startup(rdev);
3133 return r100_startup(rdev);
3134}
3135
3136int r100_suspend(struct radeon_device *rdev)
3137{
3138 r100_cp_disable(rdev);
3139 r100_wb_disable(rdev);
3140 r100_irq_disable(rdev);
3141 if (rdev->flags & RADEON_IS_PCI)
3142 r100_pci_gart_disable(rdev);
3143 return 0;
3144}
3145
3146void r100_fini(struct radeon_device *rdev)
3147{
3148 r100_suspend(rdev);
3149 r100_cp_fini(rdev);
3150 r100_wb_fini(rdev);
3151 r100_ib_fini(rdev);
3152 radeon_gem_fini(rdev);
3153 if (rdev->flags & RADEON_IS_PCI)
3154 r100_pci_gart_fini(rdev);
3155 radeon_irq_kms_fini(rdev);
3156 radeon_fence_driver_fini(rdev);
3157 radeon_object_fini(rdev);
3158 radeon_atombios_fini(rdev);
3159 kfree(rdev->bios);
3160 rdev->bios = NULL;
3161}
3162
3163int r100_mc_init(struct radeon_device *rdev)
3164{
3165 int r;
3166 u32 tmp;
3167
3168 /* Setup GPU memory space */
3169 rdev->mc.vram_location = 0xFFFFFFFFUL;
3170 rdev->mc.gtt_location = 0xFFFFFFFFUL;
3171 if (rdev->flags & RADEON_IS_IGP) {
3172 tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
3173 rdev->mc.vram_location = tmp << 16;
3174 }
3175 if (rdev->flags & RADEON_IS_AGP) {
3176 r = radeon_agp_init(rdev);
3177 if (r) {
3178 printk(KERN_WARNING "[drm] Disabling AGP\n");
3179 rdev->flags &= ~RADEON_IS_AGP;
3180 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
3181 } else {
3182 rdev->mc.gtt_location = rdev->mc.agp_base;
3183 }
3184 }
3185 r = radeon_mc_setup(rdev);
3186 if (r)
3187 return r;
3188 return 0;
3189}
3190
3191int r100_init(struct radeon_device *rdev)
3192{
3193 int r;
3194
3195 /* Register debugfs file specific to this group of asics */
3196 r100_debugfs(rdev);
3197 /* Disable VGA */
3198 r100_vga_render_disable(rdev);
3199 /* Initialize scratch registers */
3200 radeon_scratch_init(rdev);
3201 /* Initialize surface registers */
3202 radeon_surface_init(rdev);
3203 /* TODO: disable VGA need to use VGA request */
3204 /* BIOS*/
3205 if (!radeon_get_bios(rdev)) {
3206 if (ASIC_IS_AVIVO(rdev))
3207 return -EINVAL;
3208 }
3209 if (rdev->is_atom_bios) {
3210 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
3211 return -EINVAL;
3212 } else {
3213 r = radeon_combios_init(rdev);
3214 if (r)
3215 return r;
3216 }
3217 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3218 if (radeon_gpu_reset(rdev)) {
3219 dev_warn(rdev->dev,
3220 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3221 RREG32(R_000E40_RBBM_STATUS),
3222 RREG32(R_0007C0_CP_STAT));
3223 }
3224 /* check if cards are posted or not */
3225 if (!radeon_card_posted(rdev) && rdev->bios) {
3226 DRM_INFO("GPU not posted. posting now...\n");
3227 radeon_combios_asic_init(rdev->ddev);
3228 }
3229 /* Set asic errata */
3230 r100_errata(rdev);
3231 /* Initialize clocks */
3232 radeon_get_clock_info(rdev->ddev);
3233 /* Get vram informations */
3234 r100_vram_info(rdev);
3235 /* Initialize memory controller (also test AGP) */
3236 r = r100_mc_init(rdev);
3237 if (r)
3238 return r;
3239 /* Fence driver */
3240 r = radeon_fence_driver_init(rdev);
3241 if (r)
3242 return r;
3243 r = radeon_irq_kms_init(rdev);
3244 if (r)
3245 return r;
3246 /* Memory manager */
3247 r = radeon_object_init(rdev);
3248 if (r)
3249 return r;
3250 if (rdev->flags & RADEON_IS_PCI) {
3251 r = r100_pci_gart_init(rdev);
3252 if (r)
3253 return r;
3254 }
3255 r100_set_safe_registers(rdev);
3256 rdev->accel_working = true;
3257 r = r100_startup(rdev);
3258 if (r) {
3259 /* Somethings want wront with the accel init stop accel */
3260 dev_err(rdev->dev, "Disabling GPU acceleration\n");
3261 r100_suspend(rdev);
3262 r100_cp_fini(rdev);
3263 r100_wb_fini(rdev);
3264 r100_ib_fini(rdev);
3265 if (rdev->flags & RADEON_IS_PCI)
3266 r100_pci_gart_fini(rdev);
3267 radeon_irq_kms_fini(rdev);
3268 rdev->accel_working = false;
3269 }
3270 return 0;
3271}
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index c4b257ec920e..df29a630c466 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -381,6 +381,24 @@
381#define S_000054_VCRTC_IDX_MASTER(x) (((x) & 0x7F) << 24) 381#define S_000054_VCRTC_IDX_MASTER(x) (((x) & 0x7F) << 24)
382#define G_000054_VCRTC_IDX_MASTER(x) (((x) >> 24) & 0x7F) 382#define G_000054_VCRTC_IDX_MASTER(x) (((x) >> 24) & 0x7F)
383#define C_000054_VCRTC_IDX_MASTER 0x80FFFFFF 383#define C_000054_VCRTC_IDX_MASTER 0x80FFFFFF
384#define R_000148_MC_FB_LOCATION 0x000148
385#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
386#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
387#define C_000148_MC_FB_START 0xFFFF0000
388#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
389#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
390#define C_000148_MC_FB_TOP 0x0000FFFF
391#define R_00014C_MC_AGP_LOCATION 0x00014C
392#define S_00014C_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
393#define G_00014C_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
394#define C_00014C_MC_AGP_START 0xFFFF0000
395#define S_00014C_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
396#define G_00014C_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
397#define C_00014C_MC_AGP_TOP 0x0000FFFF
398#define R_000170_AGP_BASE 0x000170
399#define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
400#define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
401#define C_000170_AGP_BASE_ADDR 0x00000000
384#define R_00023C_DISPLAY_BASE_ADDR 0x00023C 402#define R_00023C_DISPLAY_BASE_ADDR 0x00023C
385#define S_00023C_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) 403#define S_00023C_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
386#define G_00023C_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) 404#define G_00023C_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
@@ -403,25 +421,25 @@
403#define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) 421#define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31)
404#define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) 422#define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1)
405#define C_000360_CUR2_LOCK 0x7FFFFFFF 423#define C_000360_CUR2_LOCK 0x7FFFFFFF
406#define R_0003C0_GENMO_WT 0x0003C0 424#define R_0003C2_GENMO_WT 0x0003C0
407#define S_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) 425#define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0)
408#define G_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) 426#define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1)
409#define C_0003C0_GENMO_MONO_ADDRESS_B 0xFFFFFFFE 427#define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE
410#define S_0003C0_VGA_RAM_EN(x) (((x) & 0x1) << 1) 428#define S_0003C2_VGA_RAM_EN(x) (((x) & 0x1) << 1)
411#define G_0003C0_VGA_RAM_EN(x) (((x) >> 1) & 0x1) 429#define G_0003C2_VGA_RAM_EN(x) (((x) >> 1) & 0x1)
412#define C_0003C0_VGA_RAM_EN 0xFFFFFFFD 430#define C_0003C2_VGA_RAM_EN 0xFD
413#define S_0003C0_VGA_CKSEL(x) (((x) & 0x3) << 2) 431#define S_0003C2_VGA_CKSEL(x) (((x) & 0x3) << 2)
414#define G_0003C0_VGA_CKSEL(x) (((x) >> 2) & 0x3) 432#define G_0003C2_VGA_CKSEL(x) (((x) >> 2) & 0x3)
415#define C_0003C0_VGA_CKSEL 0xFFFFFFF3 433#define C_0003C2_VGA_CKSEL 0xF3
416#define S_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5) 434#define S_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5)
417#define G_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1) 435#define G_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1)
418#define C_0003C0_ODD_EVEN_MD_PGSEL 0xFFFFFFDF 436#define C_0003C2_ODD_EVEN_MD_PGSEL 0xDF
419#define S_0003C0_VGA_HSYNC_POL(x) (((x) & 0x1) << 6) 437#define S_0003C2_VGA_HSYNC_POL(x) (((x) & 0x1) << 6)
420#define G_0003C0_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1) 438#define G_0003C2_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1)
421#define C_0003C0_VGA_HSYNC_POL 0xFFFFFFBF 439#define C_0003C2_VGA_HSYNC_POL 0xBF
422#define S_0003C0_VGA_VSYNC_POL(x) (((x) & 0x1) << 7) 440#define S_0003C2_VGA_VSYNC_POL(x) (((x) & 0x1) << 7)
423#define G_0003C0_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1) 441#define G_0003C2_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1)
424#define C_0003C0_VGA_VSYNC_POL 0xFFFFFF7F 442#define C_0003C2_VGA_VSYNC_POL 0x7F
425#define R_0003F8_CRTC2_GEN_CNTL 0x0003F8 443#define R_0003F8_CRTC2_GEN_CNTL 0x0003F8
426#define S_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) & 0x1) << 0) 444#define S_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) & 0x1) << 0)
427#define G_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) >> 0) & 0x1) 445#define G_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) >> 0) & 0x1)
@@ -545,6 +563,46 @@
545#define S_000774_SCRATCH_ADDR(x) (((x) & 0x7FFFFFF) << 5) 563#define S_000774_SCRATCH_ADDR(x) (((x) & 0x7FFFFFF) << 5)
546#define G_000774_SCRATCH_ADDR(x) (((x) >> 5) & 0x7FFFFFF) 564#define G_000774_SCRATCH_ADDR(x) (((x) >> 5) & 0x7FFFFFF)
547#define C_000774_SCRATCH_ADDR 0x0000001F 565#define C_000774_SCRATCH_ADDR 0x0000001F
566#define R_0007C0_CP_STAT 0x0007C0
567#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
568#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
569#define C_0007C0_MRU_BUSY 0xFFFFFFFE
570#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
571#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
572#define C_0007C0_MWU_BUSY 0xFFFFFFFD
573#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
574#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
575#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
576#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
577#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
578#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
579#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
580#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
581#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
582#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
583#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
584#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
585#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
586#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
587#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
588#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
589#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
590#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
591#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
592#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
593#define C_0007C0_CSI_BUSY 0xFFFFDFFF
594#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
595#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
596#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
597#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
598#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
599#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
600#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
601#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
602#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
603#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
604#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
605#define C_0007C0_CP_BUSY 0x7FFFFFFF
548#define R_000E40_RBBM_STATUS 0x000E40 606#define R_000E40_RBBM_STATUS 0x000E40
549#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) 607#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
550#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) 608#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
@@ -604,4 +662,53 @@
604#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) 662#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
605#define C_000E40_GUI_ACTIVE 0x7FFFFFFF 663#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
606 664
665
666#define R_00000D_SCLK_CNTL 0x00000D
667#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
668#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
669#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8
670#define S_00000D_TCLK_SRC_SEL(x) (((x) & 0x7) << 8)
671#define G_00000D_TCLK_SRC_SEL(x) (((x) >> 8) & 0x7)
672#define C_00000D_TCLK_SRC_SEL 0xFFFFF8FF
673#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16)
674#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1)
675#define C_00000D_FORCE_CP 0xFFFEFFFF
676#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17)
677#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1)
678#define C_00000D_FORCE_HDP 0xFFFDFFFF
679#define S_00000D_FORCE_DISP(x) (((x) & 0x1) << 18)
680#define G_00000D_FORCE_DISP(x) (((x) >> 18) & 0x1)
681#define C_00000D_FORCE_DISP 0xFFFBFFFF
682#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19)
683#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1)
684#define C_00000D_FORCE_TOP 0xFFF7FFFF
685#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
686#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
687#define C_00000D_FORCE_E2 0xFFEFFFFF
688#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21)
689#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1)
690#define C_00000D_FORCE_SE 0xFFDFFFFF
691#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
692#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
693#define C_00000D_FORCE_IDCT 0xFFBFFFFF
694#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23)
695#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1)
696#define C_00000D_FORCE_VIP 0xFF7FFFFF
697#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
698#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
699#define C_00000D_FORCE_RE 0xFEFFFFFF
700#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25)
701#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1)
702#define C_00000D_FORCE_PB 0xFDFFFFFF
703#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26)
704#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1)
705#define C_00000D_FORCE_TAM 0xFBFFFFFF
706#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27)
707#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1)
708#define C_00000D_FORCE_TDM 0xF7FFFFFF
709#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28)
710#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
711#define C_00000D_FORCE_RB 0xEFFFFFFF
712
713
607#endif 714#endif
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index cf7fea5ff2e5..eb740fc3549f 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -447,9 +447,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
447 return 0; 447 return 0;
448} 448}
449 449
450int r200_init(struct radeon_device *rdev) 450void r200_set_safe_registers(struct radeon_device *rdev)
451{ 451{
452 rdev->config.r100.reg_safe_bm = r200_reg_safe_bm; 452 rdev->config.r100.reg_safe_bm = r200_reg_safe_bm;
453 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm); 453 rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm);
454 return 0;
455} 454}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 1ebea8cc8c93..e08c4a8974ca 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -33,43 +33,16 @@
33#include "radeon_drm.h" 33#include "radeon_drm.h"
34#include "r100_track.h" 34#include "r100_track.h"
35#include "r300d.h" 35#include "r300d.h"
36 36#include "rv350d.h"
37#include "r300_reg_safe.h" 37#include "r300_reg_safe.h"
38 38
39/* r300,r350,rv350,rv370,rv380 depends on : */ 39/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */
40void r100_hdp_reset(struct radeon_device *rdev);
41int r100_cp_reset(struct radeon_device *rdev);
42int r100_rb2d_reset(struct radeon_device *rdev);
43int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
44int r100_pci_gart_enable(struct radeon_device *rdev);
45void r100_mc_setup(struct radeon_device *rdev);
46void r100_mc_disable_clients(struct radeon_device *rdev);
47int r100_gui_wait_for_idle(struct radeon_device *rdev);
48int r100_cs_packet_parse(struct radeon_cs_parser *p,
49 struct radeon_cs_packet *pkt,
50 unsigned idx);
51int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
52int r100_cs_parse_packet0(struct radeon_cs_parser *p,
53 struct radeon_cs_packet *pkt,
54 const unsigned *auth, unsigned n,
55 radeon_packet0_check_t check);
56int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
57 struct radeon_cs_packet *pkt,
58 struct radeon_object *robj);
59
60/* This files gather functions specifics to:
61 * r300,r350,rv350,rv370,rv380
62 *
63 * Some of these functions might be used by newer ASICs.
64 */
65void r300_gpu_init(struct radeon_device *rdev);
66int r300_mc_wait_for_idle(struct radeon_device *rdev);
67int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
68
69 40
70/* 41/*
71 * rv370,rv380 PCIE GART 42 * rv370,rv380 PCIE GART
72 */ 43 */
44static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
45
73void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) 46void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
74{ 47{
75 uint32_t tmp; 48 uint32_t tmp;
@@ -182,59 +155,6 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
182 radeon_gart_fini(rdev); 155 radeon_gart_fini(rdev);
183} 156}
184 157
185/*
186 * MC
187 */
188int r300_mc_init(struct radeon_device *rdev)
189{
190 int r;
191
192 if (r100_debugfs_rbbm_init(rdev)) {
193 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
194 }
195
196 r300_gpu_init(rdev);
197 r100_pci_gart_disable(rdev);
198 if (rdev->flags & RADEON_IS_PCIE) {
199 rv370_pcie_gart_disable(rdev);
200 }
201
202 /* Setup GPU memory space */
203 rdev->mc.vram_location = 0xFFFFFFFFUL;
204 rdev->mc.gtt_location = 0xFFFFFFFFUL;
205 if (rdev->flags & RADEON_IS_AGP) {
206 r = radeon_agp_init(rdev);
207 if (r) {
208 printk(KERN_WARNING "[drm] Disabling AGP\n");
209 rdev->flags &= ~RADEON_IS_AGP;
210 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
211 } else {
212 rdev->mc.gtt_location = rdev->mc.agp_base;
213 }
214 }
215 r = radeon_mc_setup(rdev);
216 if (r) {
217 return r;
218 }
219
220 /* Program GPU memory space */
221 r100_mc_disable_clients(rdev);
222 if (r300_mc_wait_for_idle(rdev)) {
223 printk(KERN_WARNING "Failed to wait MC idle while "
224 "programming pipes. Bad things might happen.\n");
225 }
226 r100_mc_setup(rdev);
227 return 0;
228}
229
230void r300_mc_fini(struct radeon_device *rdev)
231{
232}
233
234
235/*
236 * Fence emission
237 */
238void r300_fence_ring_emit(struct radeon_device *rdev, 158void r300_fence_ring_emit(struct radeon_device *rdev,
239 struct radeon_fence *fence) 159 struct radeon_fence *fence)
240{ 160{
@@ -260,10 +180,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
260 radeon_ring_write(rdev, RADEON_SW_INT_FIRE); 180 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
261} 181}
262 182
263
264/*
265 * Global GPU functions
266 */
267int r300_copy_dma(struct radeon_device *rdev, 183int r300_copy_dma(struct radeon_device *rdev,
268 uint64_t src_offset, 184 uint64_t src_offset,
269 uint64_t dst_offset, 185 uint64_t dst_offset,
@@ -582,11 +498,6 @@ void r300_vram_info(struct radeon_device *rdev)
582 r100_vram_init_sizes(rdev); 498 r100_vram_init_sizes(rdev);
583} 499}
584 500
585
586/*
587 * PCIE Lanes
588 */
589
590void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) 501void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
591{ 502{
592 uint32_t link_width_cntl, mask; 503 uint32_t link_width_cntl, mask;
@@ -646,10 +557,6 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
646 557
647} 558}
648 559
649
650/*
651 * Debugfs info
652 */
653#if defined(CONFIG_DEBUG_FS) 560#if defined(CONFIG_DEBUG_FS)
654static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) 561static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
655{ 562{
@@ -680,7 +587,7 @@ static struct drm_info_list rv370_pcie_gart_info_list[] = {
680}; 587};
681#endif 588#endif
682 589
683int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) 590static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
684{ 591{
685#if defined(CONFIG_DEBUG_FS) 592#if defined(CONFIG_DEBUG_FS)
686 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); 593 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
@@ -689,10 +596,6 @@ int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
689#endif 596#endif
690} 597}
691 598
692
693/*
694 * CS functions
695 */
696static int r300_packet0_check(struct radeon_cs_parser *p, 599static int r300_packet0_check(struct radeon_cs_parser *p,
697 struct radeon_cs_packet *pkt, 600 struct radeon_cs_packet *pkt,
698 unsigned idx, unsigned reg) 601 unsigned idx, unsigned reg)
@@ -1226,12 +1129,6 @@ void r300_set_reg_safe(struct radeon_device *rdev)
1226 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); 1129 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
1227} 1130}
1228 1131
1229int r300_init(struct radeon_device *rdev)
1230{
1231 r300_set_reg_safe(rdev);
1232 return 0;
1233}
1234
1235void r300_mc_program(struct radeon_device *rdev) 1132void r300_mc_program(struct radeon_device *rdev)
1236{ 1133{
1237 struct r100_mc_save save; 1134 struct r100_mc_save save;
@@ -1265,3 +1162,198 @@ void r300_mc_program(struct radeon_device *rdev)
1265 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 1162 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
1266 r100_mc_resume(rdev, &save); 1163 r100_mc_resume(rdev, &save);
1267} 1164}
1165
1166void r300_clock_startup(struct radeon_device *rdev)
1167{
1168 u32 tmp;
1169
1170 if (radeon_dynclks != -1 && radeon_dynclks)
1171 radeon_legacy_set_clock_gating(rdev, 1);
1172 /* We need to force on some of the block */
1173 tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
1174 tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
1175 if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380))
1176 tmp |= S_00000D_FORCE_VAP(1);
1177 WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
1178}
1179
1180static int r300_startup(struct radeon_device *rdev)
1181{
1182 int r;
1183
1184 r300_mc_program(rdev);
1185 /* Resume clock */
1186 r300_clock_startup(rdev);
1187 /* Initialize GPU configuration (# pipes, ...) */
1188 r300_gpu_init(rdev);
1189 /* Initialize GART (initialize after TTM so we can allocate
1190 * memory through TTM but finalize after TTM) */
1191 if (rdev->flags & RADEON_IS_PCIE) {
1192 r = rv370_pcie_gart_enable(rdev);
1193 if (r)
1194 return r;
1195 }
1196 if (rdev->flags & RADEON_IS_PCI) {
1197 r = r100_pci_gart_enable(rdev);
1198 if (r)
1199 return r;
1200 }
1201 /* Enable IRQ */
1202 rdev->irq.sw_int = true;
1203 r100_irq_set(rdev);
1204 /* 1M ring buffer */
1205 r = r100_cp_init(rdev, 1024 * 1024);
1206 if (r) {
1207 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
1208 return r;
1209 }
1210 r = r100_wb_init(rdev);
1211 if (r)
1212 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
1213 r = r100_ib_init(rdev);
1214 if (r) {
1215 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
1216 return r;
1217 }
1218 return 0;
1219}
1220
1221int r300_resume(struct radeon_device *rdev)
1222{
1223 /* Make sur GART are not working */
1224 if (rdev->flags & RADEON_IS_PCIE)
1225 rv370_pcie_gart_disable(rdev);
1226 if (rdev->flags & RADEON_IS_PCI)
1227 r100_pci_gart_disable(rdev);
1228 /* Resume clock before doing reset */
1229 r300_clock_startup(rdev);
1230 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1231 if (radeon_gpu_reset(rdev)) {
1232 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1233 RREG32(R_000E40_RBBM_STATUS),
1234 RREG32(R_0007C0_CP_STAT));
1235 }
1236 /* post */
1237 radeon_combios_asic_init(rdev->ddev);
1238 /* Resume clock after posting */
1239 r300_clock_startup(rdev);
1240 return r300_startup(rdev);
1241}
1242
1243int r300_suspend(struct radeon_device *rdev)
1244{
1245 r100_cp_disable(rdev);
1246 r100_wb_disable(rdev);
1247 r100_irq_disable(rdev);
1248 if (rdev->flags & RADEON_IS_PCIE)
1249 rv370_pcie_gart_disable(rdev);
1250 if (rdev->flags & RADEON_IS_PCI)
1251 r100_pci_gart_disable(rdev);
1252 return 0;
1253}
1254
1255void r300_fini(struct radeon_device *rdev)
1256{
1257 r300_suspend(rdev);
1258 r100_cp_fini(rdev);
1259 r100_wb_fini(rdev);
1260 r100_ib_fini(rdev);
1261 radeon_gem_fini(rdev);
1262 if (rdev->flags & RADEON_IS_PCIE)
1263 rv370_pcie_gart_fini(rdev);
1264 if (rdev->flags & RADEON_IS_PCI)
1265 r100_pci_gart_fini(rdev);
1266 radeon_irq_kms_fini(rdev);
1267 radeon_fence_driver_fini(rdev);
1268 radeon_object_fini(rdev);
1269 radeon_atombios_fini(rdev);
1270 kfree(rdev->bios);
1271 rdev->bios = NULL;
1272}
1273
1274int r300_init(struct radeon_device *rdev)
1275{
1276 int r;
1277
1278 /* Disable VGA */
1279 r100_vga_render_disable(rdev);
1280 /* Initialize scratch registers */
1281 radeon_scratch_init(rdev);
1282 /* Initialize surface registers */
1283 radeon_surface_init(rdev);
1284 /* TODO: disable VGA need to use VGA request */
1285 /* BIOS*/
1286 if (!radeon_get_bios(rdev)) {
1287 if (ASIC_IS_AVIVO(rdev))
1288 return -EINVAL;
1289 }
1290 if (rdev->is_atom_bios) {
1291 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
1292 return -EINVAL;
1293 } else {
1294 r = radeon_combios_init(rdev);
1295 if (r)
1296 return r;
1297 }
1298 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1299 if (radeon_gpu_reset(rdev)) {
1300 dev_warn(rdev->dev,
1301 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1302 RREG32(R_000E40_RBBM_STATUS),
1303 RREG32(R_0007C0_CP_STAT));
1304 }
1305 /* check if cards are posted or not */
1306 if (!radeon_card_posted(rdev) && rdev->bios) {
1307 DRM_INFO("GPU not posted. posting now...\n");
1308 radeon_combios_asic_init(rdev->ddev);
1309 }
1310 /* Set asic errata */
1311 r300_errata(rdev);
1312 /* Initialize clocks */
1313 radeon_get_clock_info(rdev->ddev);
1314 /* Get vram informations */
1315 r300_vram_info(rdev);
1316 /* Initialize memory controller (also test AGP) */
1317 r = r420_mc_init(rdev);
1318 if (r)
1319 return r;
1320 /* Fence driver */
1321 r = radeon_fence_driver_init(rdev);
1322 if (r)
1323 return r;
1324 r = radeon_irq_kms_init(rdev);
1325 if (r)
1326 return r;
1327 /* Memory manager */
1328 r = radeon_object_init(rdev);
1329 if (r)
1330 return r;
1331 if (rdev->flags & RADEON_IS_PCIE) {
1332 r = rv370_pcie_gart_init(rdev);
1333 if (r)
1334 return r;
1335 }
1336 if (rdev->flags & RADEON_IS_PCI) {
1337 r = r100_pci_gart_init(rdev);
1338 if (r)
1339 return r;
1340 }
1341 r300_set_reg_safe(rdev);
1342 rdev->accel_working = true;
1343 r = r300_startup(rdev);
1344 if (r) {
1345 /* Somethings want wront with the accel init stop accel */
1346 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1347 r300_suspend(rdev);
1348 r100_cp_fini(rdev);
1349 r100_wb_fini(rdev);
1350 r100_ib_fini(rdev);
1351 if (rdev->flags & RADEON_IS_PCIE)
1352 rv370_pcie_gart_fini(rdev);
1353 if (rdev->flags & RADEON_IS_PCI)
1354 r100_pci_gart_fini(rdev);
1355 radeon_irq_kms_fini(rdev);
1356 rdev->accel_working = false;
1357 }
1358 return 0;
1359}
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index d4fa3eb1074f..4c73114f0de9 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -96,6 +96,211 @@
96#define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) 96#define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
97#define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) 97#define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
98#define C_000170_AGP_BASE_ADDR 0x00000000 98#define C_000170_AGP_BASE_ADDR 0x00000000
99#define R_0007C0_CP_STAT 0x0007C0
100#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
101#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
102#define C_0007C0_MRU_BUSY 0xFFFFFFFE
103#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
104#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
105#define C_0007C0_MWU_BUSY 0xFFFFFFFD
106#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
107#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
108#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
109#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
110#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
111#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
112#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
113#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
114#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
115#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
116#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
117#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
118#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
119#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
120#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
121#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
122#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
123#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
124#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
125#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
126#define C_0007C0_CSI_BUSY 0xFFFFDFFF
127#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
128#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
129#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
130#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
131#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
132#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
133#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
134#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
135#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
136#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
137#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
138#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
139#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
140#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
141#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
142#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
143#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
144#define C_0007C0_CP_BUSY 0x7FFFFFFF
145#define R_000E40_RBBM_STATUS 0x000E40
146#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
147#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
148#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
149#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
150#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
151#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
152#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
153#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
154#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
155#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
156#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
157#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
158#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
159#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
160#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
161#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
162#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
163#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
164#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
165#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
166#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
167#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
168#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
169#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
170#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
171#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
172#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
173#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
174#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
175#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
176#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
177#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
178#define C_000E40_E2_BUSY 0xFFFDFFFF
179#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
180#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
181#define C_000E40_RB2D_BUSY 0xFFFBFFFF
182#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
183#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
184#define C_000E40_RB3D_BUSY 0xFFF7FFFF
185#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
186#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
187#define C_000E40_VAP_BUSY 0xFFEFFFFF
188#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
189#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
190#define C_000E40_RE_BUSY 0xFFDFFFFF
191#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
192#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
193#define C_000E40_TAM_BUSY 0xFFBFFFFF
194#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
195#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
196#define C_000E40_TDM_BUSY 0xFF7FFFFF
197#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
198#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
199#define C_000E40_PB_BUSY 0xFEFFFFFF
200#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
201#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
202#define C_000E40_TIM_BUSY 0xFDFFFFFF
203#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
204#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
205#define C_000E40_GA_BUSY 0xFBFFFFFF
206#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
207#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
208#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
209#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
210#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
211#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
99 212
100 213
214#define R_00000D_SCLK_CNTL 0x00000D
215#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
216#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
217#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8
218#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3)
219#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1)
220#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7
221#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4)
222#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1)
223#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF
224#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5)
225#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1)
226#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF
227#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6)
228#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1)
229#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF
230#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7)
231#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1)
232#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F
233#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8)
234#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1)
235#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF
236#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9)
237#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1)
238#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF
239#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10)
240#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1)
241#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF
242#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11)
243#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1)
244#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF
245#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12)
246#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1)
247#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF
248#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13)
249#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1)
250#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF
251#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14)
252#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1)
253#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF
254#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15)
255#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1)
256#define C_00000D_FORCE_DISP2 0xFFFF7FFF
257#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16)
258#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1)
259#define C_00000D_FORCE_CP 0xFFFEFFFF
260#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17)
261#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1)
262#define C_00000D_FORCE_HDP 0xFFFDFFFF
263#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18)
264#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1)
265#define C_00000D_FORCE_DISP1 0xFFFBFFFF
266#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19)
267#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1)
268#define C_00000D_FORCE_TOP 0xFFF7FFFF
269#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
270#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
271#define C_00000D_FORCE_E2 0xFFEFFFFF
272#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21)
273#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1)
274#define C_00000D_FORCE_SE 0xFFDFFFFF
275#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
276#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
277#define C_00000D_FORCE_IDCT 0xFFBFFFFF
278#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23)
279#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1)
280#define C_00000D_FORCE_VIP 0xFF7FFFFF
281#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
282#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
283#define C_00000D_FORCE_RE 0xFEFFFFFF
284#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25)
285#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1)
286#define C_00000D_FORCE_PB 0xFDFFFFFF
287#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26)
288#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1)
289#define C_00000D_FORCE_TAM 0xFBFFFFFF
290#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27)
291#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1)
292#define C_00000D_FORCE_TDM 0xF7FFFFFF
293#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28)
294#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
295#define C_00000D_FORCE_RB 0xEFFFFFFF
296#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29)
297#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1)
298#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF
299#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30)
300#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1)
301#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF
302#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31)
303#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1)
304#define C_00000D_FORCE_OV0 0x7FFFFFFF
305
101#endif 306#endif
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 49a2fdc57d27..5c7fe52de30e 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -155,6 +155,9 @@ static void r420_debugfs(struct radeon_device *rdev)
155static void r420_clock_resume(struct radeon_device *rdev) 155static void r420_clock_resume(struct radeon_device *rdev)
156{ 156{
157 u32 sclk_cntl; 157 u32 sclk_cntl;
158
159 if (radeon_dynclks != -1 && radeon_dynclks)
160 radeon_atom_set_clock_gating(rdev, 1);
158 sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); 161 sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
159 sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); 162 sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
160 if (rdev->family == CHIP_R420) 163 if (rdev->family == CHIP_R420)
@@ -167,6 +170,8 @@ static int r420_startup(struct radeon_device *rdev)
167 int r; 170 int r;
168 171
169 r300_mc_program(rdev); 172 r300_mc_program(rdev);
173 /* Resume clock */
174 r420_clock_resume(rdev);
170 /* Initialize GART (initialize after TTM so we can allocate 175 /* Initialize GART (initialize after TTM so we can allocate
171 * memory through TTM but finalize after TTM) */ 176 * memory through TTM but finalize after TTM) */
172 if (rdev->flags & RADEON_IS_PCIE) { 177 if (rdev->flags & RADEON_IS_PCIE) {
@@ -267,7 +272,6 @@ int r420_init(struct radeon_device *rdev)
267{ 272{
268 int r; 273 int r;
269 274
270 rdev->new_init_path = true;
271 /* Initialize scratch registers */ 275 /* Initialize scratch registers */
272 radeon_scratch_init(rdev); 276 radeon_scratch_init(rdev);
273 /* Initialize surface registers */ 277 /* Initialize surface registers */
diff --git a/drivers/gpu/drm/radeon/r420d.h b/drivers/gpu/drm/radeon/r420d.h
index a48a7db1e2aa..fc78d31a0b4a 100644
--- a/drivers/gpu/drm/radeon/r420d.h
+++ b/drivers/gpu/drm/radeon/r420d.h
@@ -212,9 +212,9 @@
212#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) 212#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
213#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) 213#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
214#define C_00000D_FORCE_E2 0xFFEFFFFF 214#define C_00000D_FORCE_E2 0xFFEFFFFF
215#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) 215#define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21)
216#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) 216#define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1)
217#define C_00000D_FORCE_SE 0xFFDFFFFF 217#define C_00000D_FORCE_VAP 0xFFDFFFFF
218#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) 218#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
219#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) 219#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
220#define C_00000D_FORCE_IDCT 0xFFBFFFFF 220#define C_00000D_FORCE_IDCT 0xFFBFFFFF
@@ -224,24 +224,24 @@
224#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) 224#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
225#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) 225#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
226#define C_00000D_FORCE_RE 0xFEFFFFFF 226#define C_00000D_FORCE_RE 0xFEFFFFFF
227#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) 227#define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25)
228#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) 228#define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1)
229#define C_00000D_FORCE_PB 0xFDFFFFFF 229#define C_00000D_FORCE_SR 0xFDFFFFFF
230#define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) 230#define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26)
231#define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) 231#define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1)
232#define C_00000D_FORCE_PX 0xFBFFFFFF 232#define C_00000D_FORCE_PX 0xFBFFFFFF
233#define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) 233#define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27)
234#define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) 234#define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1)
235#define C_00000D_FORCE_TX 0xF7FFFFFF 235#define C_00000D_FORCE_TX 0xF7FFFFFF
236#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) 236#define S_00000D_FORCE_US(x) (((x) & 0x1) << 28)
237#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) 237#define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1)
238#define C_00000D_FORCE_RB 0xEFFFFFFF 238#define C_00000D_FORCE_US 0xEFFFFFFF
239#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) 239#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29)
240#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) 240#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1)
241#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF 241#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF
242#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) 242#define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30)
243#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) 243#define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1)
244#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF 244#define C_00000D_FORCE_SU 0xBFFFFFFF
245#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) 245#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31)
246#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) 246#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1)
247#define C_00000D_FORCE_OV0 0x7FFFFFFF 247#define C_00000D_FORCE_OV0 0x7FFFFFFF
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 0bf13fccdaf2..a555b7b19b48 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -186,7 +186,7 @@ static int r520_startup(struct radeon_device *rdev)
186 } 186 }
187 /* Enable IRQ */ 187 /* Enable IRQ */
188 rdev->irq.sw_int = true; 188 rdev->irq.sw_int = true;
189 r100_irq_set(rdev); 189 rs600_irq_set(rdev);
190 /* 1M ring buffer */ 190 /* 1M ring buffer */
191 r = r100_cp_init(rdev, 1024 * 1024); 191 r = r100_cp_init(rdev, 1024 * 1024);
192 if (r) { 192 if (r) {
@@ -228,7 +228,6 @@ int r520_init(struct radeon_device *rdev)
228{ 228{
229 int r; 229 int r;
230 230
231 rdev->new_init_path = true;
232 /* Initialize scratch registers */ 231 /* Initialize scratch registers */
233 radeon_scratch_init(rdev); 232 radeon_scratch_init(rdev);
234 /* Initialize surface registers */ 233 /* Initialize surface registers */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2e4e60edbff4..609719490ec2 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -65,16 +65,11 @@ MODULE_FIRMWARE("radeon/RV710_me.bin");
65 65
66int r600_debugfs_mc_info_init(struct radeon_device *rdev); 66int r600_debugfs_mc_info_init(struct radeon_device *rdev);
67 67
68/* This files gather functions specifics to: 68/* r600,rv610,rv630,rv620,rv635,rv670 */
69 * r600,rv610,rv630,rv620,rv635,rv670
70 *
71 * Some of these functions might be used by newer ASICs.
72 */
73int r600_mc_wait_for_idle(struct radeon_device *rdev); 69int r600_mc_wait_for_idle(struct radeon_device *rdev);
74void r600_gpu_init(struct radeon_device *rdev); 70void r600_gpu_init(struct radeon_device *rdev);
75void r600_fini(struct radeon_device *rdev); 71void r600_fini(struct radeon_device *rdev);
76 72
77
78/* 73/*
79 * R600 PCIE GART 74 * R600 PCIE GART
80 */ 75 */
@@ -168,7 +163,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
168 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 163 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
169 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 164 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
170 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 165 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
171 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12); 166 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
172 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 167 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
173 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 168 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
174 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 169 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
@@ -225,6 +220,40 @@ void r600_pcie_gart_fini(struct radeon_device *rdev)
225 radeon_gart_fini(rdev); 220 radeon_gart_fini(rdev);
226} 221}
227 222
223void r600_agp_enable(struct radeon_device *rdev)
224{
225 u32 tmp;
226 int i;
227
228 /* Setup L2 cache */
229 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
230 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
231 EFFECTIVE_L2_QUEUE_SIZE(7));
232 WREG32(VM_L2_CNTL2, 0);
233 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
234 /* Setup TLB control */
235 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
236 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
237 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
238 ENABLE_WAIT_L2_QUERY;
239 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
240 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
241 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
242 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
243 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
244 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
245 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
246 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
247 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
248 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
249 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
250 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
251 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
252 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
253 for (i = 0; i < 7; i++)
254 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
255}
256
228int r600_mc_wait_for_idle(struct radeon_device *rdev) 257int r600_mc_wait_for_idle(struct radeon_device *rdev)
229{ 258{
230 unsigned i; 259 unsigned i;
@@ -240,14 +269,9 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
240 return -1; 269 return -1;
241} 270}
242 271
243static void r600_mc_resume(struct radeon_device *rdev) 272static void r600_mc_program(struct radeon_device *rdev)
244{ 273{
245 u32 d1vga_control, d2vga_control; 274 struct rv515_mc_save save;
246 u32 vga_render_control, vga_hdp_control;
247 u32 d1crtc_control, d2crtc_control;
248 u32 new_d1grph_primary, new_d1grph_secondary;
249 u32 new_d2grph_primary, new_d2grph_secondary;
250 u64 old_vram_start;
251 u32 tmp; 275 u32 tmp;
252 int i, j; 276 int i, j;
253 277
@@ -261,85 +285,51 @@ static void r600_mc_resume(struct radeon_device *rdev)
261 } 285 }
262 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); 286 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
263 287
264 d1vga_control = RREG32(D1VGA_CONTROL); 288 rv515_mc_stop(rdev, &save);
265 d2vga_control = RREG32(D2VGA_CONTROL);
266 vga_render_control = RREG32(VGA_RENDER_CONTROL);
267 vga_hdp_control = RREG32(VGA_HDP_CONTROL);
268 d1crtc_control = RREG32(D1CRTC_CONTROL);
269 d2crtc_control = RREG32(D2CRTC_CONTROL);
270 old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
271 new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS);
272 new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS);
273 new_d1grph_primary += rdev->mc.vram_start - old_vram_start;
274 new_d1grph_secondary += rdev->mc.vram_start - old_vram_start;
275 new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS);
276 new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS);
277 new_d2grph_primary += rdev->mc.vram_start - old_vram_start;
278 new_d2grph_secondary += rdev->mc.vram_start - old_vram_start;
279
280 /* Stop all video */
281 WREG32(D1VGA_CONTROL, 0);
282 WREG32(D2VGA_CONTROL, 0);
283 WREG32(VGA_RENDER_CONTROL, 0);
284 WREG32(D1CRTC_UPDATE_LOCK, 1);
285 WREG32(D2CRTC_UPDATE_LOCK, 1);
286 WREG32(D1CRTC_CONTROL, 0);
287 WREG32(D2CRTC_CONTROL, 0);
288 WREG32(D1CRTC_UPDATE_LOCK, 0);
289 WREG32(D2CRTC_UPDATE_LOCK, 0);
290
291 mdelay(1);
292 if (r600_mc_wait_for_idle(rdev)) { 289 if (r600_mc_wait_for_idle(rdev)) {
293 printk(KERN_WARNING "[drm] MC not idle !\n"); 290 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
294 } 291 }
295 292 /* Lockout access through VGA aperture (doesn't exist before R600) */
296 /* Lockout access through VGA aperture*/
297 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 293 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
298
299 /* Update configuration */ 294 /* Update configuration */
300 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); 295 if (rdev->flags & RADEON_IS_AGP) {
301 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12); 296 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
297 /* VRAM before AGP */
298 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
299 rdev->mc.vram_start >> 12);
300 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
301 rdev->mc.gtt_end >> 12);
302 } else {
303 /* VRAM after AGP */
304 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
305 rdev->mc.gtt_start >> 12);
306 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
307 rdev->mc.vram_end >> 12);
308 }
309 } else {
310 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
311 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
312 }
302 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); 313 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
303 tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16; 314 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
304 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 315 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
305 WREG32(MC_VM_FB_LOCATION, tmp); 316 WREG32(MC_VM_FB_LOCATION, tmp);
306 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 317 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
307 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 318 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
308 WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); 319 WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
309 if (rdev->flags & RADEON_IS_AGP) { 320 if (rdev->flags & RADEON_IS_AGP) {
310 WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16); 321 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
311 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); 322 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
312 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); 323 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
313 } else { 324 } else {
314 WREG32(MC_VM_AGP_BASE, 0); 325 WREG32(MC_VM_AGP_BASE, 0);
315 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); 326 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
316 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); 327 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
317 } 328 }
318 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary);
319 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary);
320 WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary);
321 WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary);
322 WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
323
324 /* Unlock host access */
325 WREG32(VGA_HDP_CONTROL, vga_hdp_control);
326
327 mdelay(1);
328 if (r600_mc_wait_for_idle(rdev)) { 329 if (r600_mc_wait_for_idle(rdev)) {
329 printk(KERN_WARNING "[drm] MC not idle !\n"); 330 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
330 } 331 }
331 332 rv515_mc_resume(rdev, &save);
332 /* Restore video state */
333 WREG32(D1CRTC_UPDATE_LOCK, 1);
334 WREG32(D2CRTC_UPDATE_LOCK, 1);
335 WREG32(D1CRTC_CONTROL, d1crtc_control);
336 WREG32(D2CRTC_CONTROL, d2crtc_control);
337 WREG32(D1CRTC_UPDATE_LOCK, 0);
338 WREG32(D2CRTC_UPDATE_LOCK, 0);
339 WREG32(D1VGA_CONTROL, d1vga_control);
340 WREG32(D2VGA_CONTROL, d2vga_control);
341 WREG32(VGA_RENDER_CONTROL, vga_render_control);
342
343 /* we need to own VRAM, so turn off the VGA renderer here 333 /* we need to own VRAM, so turn off the VGA renderer here
344 * to stop it overwriting our objects */ 334 * to stop it overwriting our objects */
345 rv515_vga_render_disable(rdev); 335 rv515_vga_render_disable(rdev);
@@ -445,9 +435,9 @@ int r600_mc_init(struct radeon_device *rdev)
445 } 435 }
446 } 436 }
447 rdev->mc.vram_start = rdev->mc.vram_location; 437 rdev->mc.vram_start = rdev->mc.vram_location;
448 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size; 438 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
449 rdev->mc.gtt_start = rdev->mc.gtt_location; 439 rdev->mc.gtt_start = rdev->mc.gtt_location;
450 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size; 440 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
451 /* FIXME: we should enforce default clock in case GPU is not in 441 /* FIXME: we should enforce default clock in case GPU is not in
452 * default setup 442 * default setup
453 */ 443 */
@@ -463,6 +453,7 @@ int r600_mc_init(struct radeon_device *rdev)
463 */ 453 */
464int r600_gpu_soft_reset(struct radeon_device *rdev) 454int r600_gpu_soft_reset(struct radeon_device *rdev)
465{ 455{
456 struct rv515_mc_save save;
466 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | 457 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
467 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | 458 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
468 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | 459 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
@@ -480,13 +471,25 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
480 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | 471 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
481 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); 472 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
482 u32 srbm_reset = 0; 473 u32 srbm_reset = 0;
474 u32 tmp;
483 475
476 dev_info(rdev->dev, "GPU softreset \n");
477 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
478 RREG32(R_008010_GRBM_STATUS));
479 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
480 RREG32(R_008014_GRBM_STATUS2));
481 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
482 RREG32(R_000E50_SRBM_STATUS));
483 rv515_mc_stop(rdev, &save);
484 if (r600_mc_wait_for_idle(rdev)) {
485 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
486 }
484 /* Disable CP parsing/prefetching */ 487 /* Disable CP parsing/prefetching */
485 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); 488 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff));
486 /* Check if any of the rendering block is busy and reset it */ 489 /* Check if any of the rendering block is busy and reset it */
487 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || 490 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
488 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { 491 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
489 WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CR(1) | 492 tmp = S_008020_SOFT_RESET_CR(1) |
490 S_008020_SOFT_RESET_DB(1) | 493 S_008020_SOFT_RESET_DB(1) |
491 S_008020_SOFT_RESET_CB(1) | 494 S_008020_SOFT_RESET_CB(1) |
492 S_008020_SOFT_RESET_PA(1) | 495 S_008020_SOFT_RESET_PA(1) |
@@ -498,14 +501,18 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
498 S_008020_SOFT_RESET_TC(1) | 501 S_008020_SOFT_RESET_TC(1) |
499 S_008020_SOFT_RESET_TA(1) | 502 S_008020_SOFT_RESET_TA(1) |
500 S_008020_SOFT_RESET_VC(1) | 503 S_008020_SOFT_RESET_VC(1) |
501 S_008020_SOFT_RESET_VGT(1)); 504 S_008020_SOFT_RESET_VGT(1);
505 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
506 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
502 (void)RREG32(R_008020_GRBM_SOFT_RESET); 507 (void)RREG32(R_008020_GRBM_SOFT_RESET);
503 udelay(50); 508 udelay(50);
504 WREG32(R_008020_GRBM_SOFT_RESET, 0); 509 WREG32(R_008020_GRBM_SOFT_RESET, 0);
505 (void)RREG32(R_008020_GRBM_SOFT_RESET); 510 (void)RREG32(R_008020_GRBM_SOFT_RESET);
506 } 511 }
507 /* Reset CP (we always reset CP) */ 512 /* Reset CP (we always reset CP) */
508 WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CP(1)); 513 tmp = S_008020_SOFT_RESET_CP(1);
514 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
515 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
509 (void)RREG32(R_008020_GRBM_SOFT_RESET); 516 (void)RREG32(R_008020_GRBM_SOFT_RESET);
510 udelay(50); 517 udelay(50);
511 WREG32(R_008020_GRBM_SOFT_RESET, 0); 518 WREG32(R_008020_GRBM_SOFT_RESET, 0);
@@ -533,6 +540,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
533 srbm_reset |= S_000E60_SOFT_RESET_RLC(1); 540 srbm_reset |= S_000E60_SOFT_RESET_RLC(1);
534 if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) 541 if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS)))
535 srbm_reset |= S_000E60_SOFT_RESET_SEM(1); 542 srbm_reset |= S_000E60_SOFT_RESET_SEM(1);
543 if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS)))
544 srbm_reset |= S_000E60_SOFT_RESET_BIF(1);
545 dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
546 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
547 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
548 udelay(50);
549 WREG32(R_000E60_SRBM_SOFT_RESET, 0);
550 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
536 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); 551 WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset);
537 (void)RREG32(R_000E60_SRBM_SOFT_RESET); 552 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
538 udelay(50); 553 udelay(50);
@@ -540,6 +555,17 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
540 (void)RREG32(R_000E60_SRBM_SOFT_RESET); 555 (void)RREG32(R_000E60_SRBM_SOFT_RESET);
541 /* Wait a little for things to settle down */ 556 /* Wait a little for things to settle down */
542 udelay(50); 557 udelay(50);
558 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
559 RREG32(R_008010_GRBM_STATUS));
560 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
561 RREG32(R_008014_GRBM_STATUS2));
562 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
563 RREG32(R_000E50_SRBM_STATUS));
564 /* After reset we need to reinit the asic as GPU often endup in an
565 * incoherent state.
566 */
567 atom_asic_init(rdev->mode_info.atom_context);
568 rv515_mc_resume(rdev, &save);
543 return 0; 569 return 0;
544} 570}
545 571
@@ -1350,32 +1376,47 @@ int r600_ring_test(struct radeon_device *rdev)
1350 return r; 1376 return r;
1351} 1377}
1352 1378
1353/* 1379void r600_wb_disable(struct radeon_device *rdev)
1354 * Writeback 1380{
1355 */ 1381 WREG32(SCRATCH_UMSK, 0);
1356int r600_wb_init(struct radeon_device *rdev) 1382 if (rdev->wb.wb_obj) {
1383 radeon_object_kunmap(rdev->wb.wb_obj);
1384 radeon_object_unpin(rdev->wb.wb_obj);
1385 }
1386}
1387
1388void r600_wb_fini(struct radeon_device *rdev)
1389{
1390 r600_wb_disable(rdev);
1391 if (rdev->wb.wb_obj) {
1392 radeon_object_unref(&rdev->wb.wb_obj);
1393 rdev->wb.wb = NULL;
1394 rdev->wb.wb_obj = NULL;
1395 }
1396}
1397
1398int r600_wb_enable(struct radeon_device *rdev)
1357{ 1399{
1358 int r; 1400 int r;
1359 1401
1360 if (rdev->wb.wb_obj == NULL) { 1402 if (rdev->wb.wb_obj == NULL) {
1361 r = radeon_object_create(rdev, NULL, 4096, 1403 r = radeon_object_create(rdev, NULL, 4096, true,
1362 true, 1404 RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
1363 RADEON_GEM_DOMAIN_GTT,
1364 false, &rdev->wb.wb_obj);
1365 if (r) { 1405 if (r) {
1366 DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); 1406 dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
1367 return r; 1407 return r;
1368 } 1408 }
1369 r = radeon_object_pin(rdev->wb.wb_obj, 1409 r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
1370 RADEON_GEM_DOMAIN_GTT, 1410 &rdev->wb.gpu_addr);
1371 &rdev->wb.gpu_addr);
1372 if (r) { 1411 if (r) {
1373 DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); 1412 dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r);
1413 r600_wb_fini(rdev);
1374 return r; 1414 return r;
1375 } 1415 }
1376 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 1416 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
1377 if (r) { 1417 if (r) {
1378 DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); 1418 dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r);
1419 r600_wb_fini(rdev);
1379 return r; 1420 return r;
1380 } 1421 }
1381 } 1422 }
@@ -1386,21 +1427,6 @@ int r600_wb_init(struct radeon_device *rdev)
1386 return 0; 1427 return 0;
1387} 1428}
1388 1429
1389void r600_wb_fini(struct radeon_device *rdev)
1390{
1391 if (rdev->wb.wb_obj) {
1392 radeon_object_kunmap(rdev->wb.wb_obj);
1393 radeon_object_unpin(rdev->wb.wb_obj);
1394 radeon_object_unref(&rdev->wb.wb_obj);
1395 rdev->wb.wb = NULL;
1396 rdev->wb.wb_obj = NULL;
1397 }
1398}
1399
1400
1401/*
1402 * CS
1403 */
1404void r600_fence_ring_emit(struct radeon_device *rdev, 1430void r600_fence_ring_emit(struct radeon_device *rdev,
1405 struct radeon_fence *fence) 1431 struct radeon_fence *fence)
1406{ 1432{
@@ -1477,11 +1503,14 @@ int r600_startup(struct radeon_device *rdev)
1477{ 1503{
1478 int r; 1504 int r;
1479 1505
1480 r600_gpu_reset(rdev); 1506 r600_mc_program(rdev);
1481 r600_mc_resume(rdev); 1507 if (rdev->flags & RADEON_IS_AGP) {
1482 r = r600_pcie_gart_enable(rdev); 1508 r600_agp_enable(rdev);
1483 if (r) 1509 } else {
1484 return r; 1510 r = r600_pcie_gart_enable(rdev);
1511 if (r)
1512 return r;
1513 }
1485 r600_gpu_init(rdev); 1514 r600_gpu_init(rdev);
1486 1515
1487 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, 1516 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
@@ -1500,9 +1529,8 @@ int r600_startup(struct radeon_device *rdev)
1500 r = r600_cp_resume(rdev); 1529 r = r600_cp_resume(rdev);
1501 if (r) 1530 if (r)
1502 return r; 1531 return r;
1503 r = r600_wb_init(rdev); 1532 /* write back buffer are not vital so don't worry about failure */
1504 if (r) 1533 r600_wb_enable(rdev);
1505 return r;
1506 return 0; 1534 return 0;
1507} 1535}
1508 1536
@@ -1524,15 +1552,12 @@ int r600_resume(struct radeon_device *rdev)
1524{ 1552{
1525 int r; 1553 int r;
1526 1554
1527 if (radeon_gpu_reset(rdev)) { 1555 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
1528 /* FIXME: what do we want to do here ? */ 1556 * posting will perform necessary task to bring back GPU into good
1529 } 1557 * shape.
1558 */
1530 /* post card */ 1559 /* post card */
1531 if (rdev->is_atom_bios) { 1560 atom_asic_init(rdev->mode_info.atom_context);
1532 atom_asic_init(rdev->mode_info.atom_context);
1533 } else {
1534 radeon_combios_asic_init(rdev->ddev);
1535 }
1536 /* Initialize clocks */ 1561 /* Initialize clocks */
1537 r = radeon_clocks_init(rdev); 1562 r = radeon_clocks_init(rdev);
1538 if (r) { 1563 if (r) {
@@ -1545,7 +1570,7 @@ int r600_resume(struct radeon_device *rdev)
1545 return r; 1570 return r;
1546 } 1571 }
1547 1572
1548 r = radeon_ib_test(rdev); 1573 r = r600_ib_test(rdev);
1549 if (r) { 1574 if (r) {
1550 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1575 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1551 return r; 1576 return r;
@@ -1553,13 +1578,12 @@ int r600_resume(struct radeon_device *rdev)
1553 return r; 1578 return r;
1554} 1579}
1555 1580
1556
1557int r600_suspend(struct radeon_device *rdev) 1581int r600_suspend(struct radeon_device *rdev)
1558{ 1582{
1559 /* FIXME: we should wait for ring to be empty */ 1583 /* FIXME: we should wait for ring to be empty */
1560 r600_cp_stop(rdev); 1584 r600_cp_stop(rdev);
1561 rdev->cp.ready = false; 1585 rdev->cp.ready = false;
1562 1586 r600_wb_disable(rdev);
1563 r600_pcie_gart_disable(rdev); 1587 r600_pcie_gart_disable(rdev);
1564 /* unpin shaders bo */ 1588 /* unpin shaders bo */
1565 radeon_object_unpin(rdev->r600_blit.shader_obj); 1589 radeon_object_unpin(rdev->r600_blit.shader_obj);
@@ -1576,7 +1600,6 @@ int r600_init(struct radeon_device *rdev)
1576{ 1600{
1577 int r; 1601 int r;
1578 1602
1579 rdev->new_init_path = true;
1580 r = radeon_dummy_page_init(rdev); 1603 r = radeon_dummy_page_init(rdev);
1581 if (r) 1604 if (r)
1582 return r; 1605 return r;
@@ -1593,8 +1616,10 @@ int r600_init(struct radeon_device *rdev)
1593 return -EINVAL; 1616 return -EINVAL;
1594 } 1617 }
1595 /* Must be an ATOMBIOS */ 1618 /* Must be an ATOMBIOS */
1596 if (!rdev->is_atom_bios) 1619 if (!rdev->is_atom_bios) {
1620 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
1597 return -EINVAL; 1621 return -EINVAL;
1622 }
1598 r = radeon_atombios_init(rdev); 1623 r = radeon_atombios_init(rdev);
1599 if (r) 1624 if (r)
1600 return r; 1625 return r;
@@ -1616,15 +1641,8 @@ int r600_init(struct radeon_device *rdev)
1616 if (r) 1641 if (r)
1617 return r; 1642 return r;
1618 r = r600_mc_init(rdev); 1643 r = r600_mc_init(rdev);
1619 if (r) { 1644 if (r)
1620 if (rdev->flags & RADEON_IS_AGP) {
1621 /* Retry with disabling AGP */
1622 r600_fini(rdev);
1623 rdev->flags &= ~RADEON_IS_AGP;
1624 return r600_init(rdev);
1625 }
1626 return r; 1645 return r;
1627 }
1628 /* Memory manager */ 1646 /* Memory manager */
1629 r = radeon_object_init(rdev); 1647 r = radeon_object_init(rdev);
1630 if (r) 1648 if (r)
@@ -1653,12 +1671,10 @@ int r600_init(struct radeon_device *rdev)
1653 1671
1654 r = r600_startup(rdev); 1672 r = r600_startup(rdev);
1655 if (r) { 1673 if (r) {
1656 if (rdev->flags & RADEON_IS_AGP) { 1674 r600_suspend(rdev);
1657 /* Retry with disabling AGP */ 1675 r600_wb_fini(rdev);
1658 r600_fini(rdev); 1676 radeon_ring_fini(rdev);
1659 rdev->flags &= ~RADEON_IS_AGP; 1677 r600_pcie_gart_fini(rdev);
1660 return r600_init(rdev);
1661 }
1662 rdev->accel_working = false; 1678 rdev->accel_working = false;
1663 } 1679 }
1664 if (rdev->accel_working) { 1680 if (rdev->accel_working) {
@@ -1667,7 +1683,7 @@ int r600_init(struct radeon_device *rdev)
1667 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); 1683 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
1668 rdev->accel_working = false; 1684 rdev->accel_working = false;
1669 } 1685 }
1670 r = radeon_ib_test(rdev); 1686 r = r600_ib_test(rdev);
1671 if (r) { 1687 if (r) {
1672 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1688 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1673 rdev->accel_working = false; 1689 rdev->accel_working = false;
@@ -1683,19 +1699,15 @@ void r600_fini(struct radeon_device *rdev)
1683 1699
1684 r600_blit_fini(rdev); 1700 r600_blit_fini(rdev);
1685 radeon_ring_fini(rdev); 1701 radeon_ring_fini(rdev);
1702 r600_wb_fini(rdev);
1686 r600_pcie_gart_fini(rdev); 1703 r600_pcie_gart_fini(rdev);
1687 radeon_gem_fini(rdev); 1704 radeon_gem_fini(rdev);
1688 radeon_fence_driver_fini(rdev); 1705 radeon_fence_driver_fini(rdev);
1689 radeon_clocks_fini(rdev); 1706 radeon_clocks_fini(rdev);
1690#if __OS_HAS_AGP
1691 if (rdev->flags & RADEON_IS_AGP) 1707 if (rdev->flags & RADEON_IS_AGP)
1692 radeon_agp_fini(rdev); 1708 radeon_agp_fini(rdev);
1693#endif
1694 radeon_object_fini(rdev); 1709 radeon_object_fini(rdev);
1695 if (rdev->is_atom_bios) 1710 radeon_atombios_fini(rdev);
1696 radeon_atombios_fini(rdev);
1697 else
1698 radeon_combios_fini(rdev);
1699 kfree(rdev->bios); 1711 kfree(rdev->bios);
1700 rdev->bios = NULL; 1712 rdev->bios = NULL;
1701 radeon_dummy_page_fini(rdev); 1713 radeon_dummy_page_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index d988eece0187..dec501081608 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -582,8 +582,6 @@ r600_blit_copy(struct drm_device *dev,
582 u64 vb_addr; 582 u64 vb_addr;
583 u32 *vb; 583 u32 *vb;
584 584
585 vb = r600_nomm_get_vb_ptr(dev);
586
587 if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { 585 if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
588 max_bytes = 8192; 586 max_bytes = 8192;
589 587
@@ -619,8 +617,8 @@ r600_blit_copy(struct drm_device *dev,
619 if (!dev_priv->blit_vb) 617 if (!dev_priv->blit_vb)
620 return; 618 return;
621 set_shaders(dev); 619 set_shaders(dev);
622 vb = r600_nomm_get_vb_ptr(dev);
623 } 620 }
621 vb = r600_nomm_get_vb_ptr(dev);
624 622
625 vb[0] = i2f(dst_x); 623 vb[0] = i2f(dst_x);
626 vb[1] = 0; 624 vb[1] = 0;
@@ -708,8 +706,8 @@ r600_blit_copy(struct drm_device *dev,
708 return; 706 return;
709 707
710 set_shaders(dev); 708 set_shaders(dev);
711 vb = r600_nomm_get_vb_ptr(dev);
712 } 709 }
710 vb = r600_nomm_get_vb_ptr(dev);
713 711
714 vb[0] = i2f(dst_x / 4); 712 vb[0] = i2f(dst_x / 4);
715 vb[1] = 0; 713 vb[1] = 0;
@@ -777,8 +775,6 @@ r600_blit_swap(struct drm_device *dev,
777 u64 vb_addr; 775 u64 vb_addr;
778 u32 *vb; 776 u32 *vb;
779 777
780 vb = r600_nomm_get_vb_ptr(dev);
781
782 if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { 778 if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
783 779
784 r600_nomm_put_vb(dev); 780 r600_nomm_put_vb(dev);
@@ -787,8 +783,8 @@ r600_blit_swap(struct drm_device *dev,
787 return; 783 return;
788 784
789 set_shaders(dev); 785 set_shaders(dev);
790 vb = r600_nomm_get_vb_ptr(dev);
791 } 786 }
787 vb = r600_nomm_get_vb_ptr(dev);
792 788
793 if (cpp == 4) { 789 if (cpp == 4) {
794 cb_format = COLOR_8_8_8_8; 790 cb_format = COLOR_8_8_8_8;
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index acae33e2ad51..93108bb31d1d 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -610,7 +610,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
610 610
611 DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, 611 DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
612 size_bytes, rdev->r600_blit.vb_used); 612 size_bytes, rdev->r600_blit.vb_used);
613 vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
614 if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { 613 if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
615 max_bytes = 8192; 614 max_bytes = 8192;
616 615
@@ -653,6 +652,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
653 vb = r600_nomm_get_vb_ptr(dev); 652 vb = r600_nomm_get_vb_ptr(dev);
654#endif 653#endif
655 } 654 }
655 vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
656 656
657 vb[0] = i2f(dst_x); 657 vb[0] = i2f(dst_x);
658 vb[1] = 0; 658 vb[1] = 0;
@@ -747,6 +747,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
747 vb = r600_nomm_get_vb_ptr(dev); 747 vb = r600_nomm_get_vb_ptr(dev);
748 } 748 }
749#endif 749#endif
750 vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
750 751
751 vb[0] = i2f(dst_x / 4); 752 vb[0] = i2f(dst_x / 4);
752 vb[1] = 0; 753 vb[1] = 0;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index d28970db6a2d..17e42195c632 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -252,7 +252,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
252 252
253 header = radeon_get_ib_value(p, h_idx); 253 header = radeon_get_ib_value(p, h_idx);
254 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); 254 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
255 reg = header >> 2; 255 reg = CP_PACKET0_GET_REG(header);
256 mutex_lock(&p->rdev->ddev->mode_config.mutex); 256 mutex_lock(&p->rdev->ddev->mode_config.mutex);
257 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 257 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
258 if (!obj) { 258 if (!obj) {
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 4a9028a85c9b..9b64d47f1f82 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -643,6 +643,7 @@
643#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1) 643#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1)
644#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1) 644#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1)
645#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1) 645#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1)
646#define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1)
646#define R_000E60_SRBM_SOFT_RESET 0x0E60 647#define R_000E60_SRBM_SOFT_RESET 0x0E60
647#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1) 648#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1)
648#define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2) 649#define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 950b346e343f..5ab35b81c86b 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -590,18 +590,8 @@ struct radeon_asic {
590 void (*fini)(struct radeon_device *rdev); 590 void (*fini)(struct radeon_device *rdev);
591 int (*resume)(struct radeon_device *rdev); 591 int (*resume)(struct radeon_device *rdev);
592 int (*suspend)(struct radeon_device *rdev); 592 int (*suspend)(struct radeon_device *rdev);
593 void (*errata)(struct radeon_device *rdev);
594 void (*vram_info)(struct radeon_device *rdev);
595 void (*vga_set_state)(struct radeon_device *rdev, bool state); 593 void (*vga_set_state)(struct radeon_device *rdev, bool state);
596 int (*gpu_reset)(struct radeon_device *rdev); 594 int (*gpu_reset)(struct radeon_device *rdev);
597 int (*mc_init)(struct radeon_device *rdev);
598 void (*mc_fini)(struct radeon_device *rdev);
599 int (*wb_init)(struct radeon_device *rdev);
600 void (*wb_fini)(struct radeon_device *rdev);
601 int (*gart_init)(struct radeon_device *rdev);
602 void (*gart_fini)(struct radeon_device *rdev);
603 int (*gart_enable)(struct radeon_device *rdev);
604 void (*gart_disable)(struct radeon_device *rdev);
605 void (*gart_tlb_flush)(struct radeon_device *rdev); 595 void (*gart_tlb_flush)(struct radeon_device *rdev);
606 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); 596 int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
607 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); 597 int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
@@ -611,7 +601,6 @@ struct radeon_asic {
611 void (*ring_start)(struct radeon_device *rdev); 601 void (*ring_start)(struct radeon_device *rdev);
612 int (*ring_test)(struct radeon_device *rdev); 602 int (*ring_test)(struct radeon_device *rdev);
613 void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); 603 void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
614 int (*ib_test)(struct radeon_device *rdev);
615 int (*irq_set)(struct radeon_device *rdev); 604 int (*irq_set)(struct radeon_device *rdev);
616 int (*irq_process)(struct radeon_device *rdev); 605 int (*irq_process)(struct radeon_device *rdev);
617 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); 606 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
@@ -789,7 +778,6 @@ struct radeon_device {
789 bool shutdown; 778 bool shutdown;
790 bool suspend; 779 bool suspend;
791 bool need_dma32; 780 bool need_dma32;
792 bool new_init_path;
793 bool accel_working; 781 bool accel_working;
794 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; 782 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
795 const struct firmware *me_fw; /* all family ME firmware */ 783 const struct firmware *me_fw; /* all family ME firmware */
@@ -949,28 +937,14 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
949#define radeon_resume(rdev) (rdev)->asic->resume((rdev)) 937#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
950#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) 938#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
951#define radeon_cs_parse(p) rdev->asic->cs_parse((p)) 939#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
952#define radeon_errata(rdev) (rdev)->asic->errata((rdev))
953#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev))
954#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) 940#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
955#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) 941#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
956#define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev))
957#define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev))
958#define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev))
959#define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev))
960#define radeon_gpu_gart_init(rdev) (rdev)->asic->gart_init((rdev))
961#define radeon_gpu_gart_fini(rdev) (rdev)->asic->gart_fini((rdev))
962#define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev))
963#define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev))
964#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) 942#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
965#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) 943#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
966#define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize))
967#define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev))
968#define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev))
969#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) 944#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
970#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) 945#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
971#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) 946#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
972#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) 947#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
973#define radeon_ib_test(rdev) (rdev)->asic->ib_test((rdev))
974#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) 948#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
975#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) 949#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
976#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) 950#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
@@ -996,6 +970,7 @@ extern void radeon_clocks_fini(struct radeon_device *rdev);
996extern void radeon_scratch_init(struct radeon_device *rdev); 970extern void radeon_scratch_init(struct radeon_device *rdev);
997extern void radeon_surface_init(struct radeon_device *rdev); 971extern void radeon_surface_init(struct radeon_device *rdev);
998extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 972extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
973extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
999extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 974extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
1000 975
1001/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 976/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
@@ -1031,11 +1006,27 @@ extern int r100_wb_init(struct radeon_device *rdev);
1031extern void r100_hdp_reset(struct radeon_device *rdev); 1006extern void r100_hdp_reset(struct radeon_device *rdev);
1032extern int r100_rb2d_reset(struct radeon_device *rdev); 1007extern int r100_rb2d_reset(struct radeon_device *rdev);
1033extern int r100_cp_reset(struct radeon_device *rdev); 1008extern int r100_cp_reset(struct radeon_device *rdev);
1009extern void r100_vga_render_disable(struct radeon_device *rdev);
1010extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1011 struct radeon_cs_packet *pkt,
1012 struct radeon_object *robj);
1013extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1014 struct radeon_cs_packet *pkt,
1015 const unsigned *auth, unsigned n,
1016 radeon_packet0_check_t check);
1017extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
1018 struct radeon_cs_packet *pkt,
1019 unsigned idx);
1020
1021/* rv200,rv250,rv280 */
1022extern void r200_set_safe_registers(struct radeon_device *rdev);
1034 1023
1035/* r300,r350,rv350,rv370,rv380 */ 1024/* r300,r350,rv350,rv370,rv380 */
1036extern void r300_set_reg_safe(struct radeon_device *rdev); 1025extern void r300_set_reg_safe(struct radeon_device *rdev);
1037extern void r300_mc_program(struct radeon_device *rdev); 1026extern void r300_mc_program(struct radeon_device *rdev);
1038extern void r300_vram_info(struct radeon_device *rdev); 1027extern void r300_vram_info(struct radeon_device *rdev);
1028extern void r300_clock_startup(struct radeon_device *rdev);
1029extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
1039extern int rv370_pcie_gart_init(struct radeon_device *rdev); 1030extern int rv370_pcie_gart_init(struct radeon_device *rdev);
1040extern void rv370_pcie_gart_fini(struct radeon_device *rdev); 1031extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
1041extern int rv370_pcie_gart_enable(struct radeon_device *rdev); 1032extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
@@ -1066,6 +1057,18 @@ extern void rv515_clock_startup(struct radeon_device *rdev);
1066extern void rv515_debugfs(struct radeon_device *rdev); 1057extern void rv515_debugfs(struct radeon_device *rdev);
1067extern int rv515_suspend(struct radeon_device *rdev); 1058extern int rv515_suspend(struct radeon_device *rdev);
1068 1059
1060/* rs400 */
1061extern int rs400_gart_init(struct radeon_device *rdev);
1062extern int rs400_gart_enable(struct radeon_device *rdev);
1063extern void rs400_gart_adjust_size(struct radeon_device *rdev);
1064extern void rs400_gart_disable(struct radeon_device *rdev);
1065extern void rs400_gart_fini(struct radeon_device *rdev);
1066
1067/* rs600 */
1068extern void rs600_set_safe_registers(struct radeon_device *rdev);
1069extern int rs600_irq_set(struct radeon_device *rdev);
1070extern void rs600_irq_disable(struct radeon_device *rdev);
1071
1069/* rs690, rs740 */ 1072/* rs690, rs740 */
1070extern void rs690_line_buffer_adjust(struct radeon_device *rdev, 1073extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
1071 struct drm_display_mode *mode1, 1074 struct drm_display_mode *mode1,
@@ -1083,8 +1086,9 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev);
1083extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); 1086extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
1084extern int r600_ib_test(struct radeon_device *rdev); 1087extern int r600_ib_test(struct radeon_device *rdev);
1085extern int r600_ring_test(struct radeon_device *rdev); 1088extern int r600_ring_test(struct radeon_device *rdev);
1086extern int r600_wb_init(struct radeon_device *rdev);
1087extern void r600_wb_fini(struct radeon_device *rdev); 1089extern void r600_wb_fini(struct radeon_device *rdev);
1090extern int r600_wb_enable(struct radeon_device *rdev);
1091extern void r600_wb_disable(struct radeon_device *rdev);
1088extern void r600_scratch_init(struct radeon_device *rdev); 1092extern void r600_scratch_init(struct radeon_device *rdev);
1089extern int r600_blit_init(struct radeon_device *rdev); 1093extern int r600_blit_init(struct radeon_device *rdev);
1090extern void r600_blit_fini(struct radeon_device *rdev); 1094extern void r600_blit_fini(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c8a4e7b5663d..c3532c7a6f3f 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -41,28 +41,17 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
41/* 41/*
42 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 42 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
43 */ 43 */
44int r100_init(struct radeon_device *rdev); 44extern int r100_init(struct radeon_device *rdev);
45int r200_init(struct radeon_device *rdev); 45extern void r100_fini(struct radeon_device *rdev);
46extern int r100_suspend(struct radeon_device *rdev);
47extern int r100_resume(struct radeon_device *rdev);
46uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); 48uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
47void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 49void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
48void r100_errata(struct radeon_device *rdev);
49void r100_vram_info(struct radeon_device *rdev);
50void r100_vga_set_state(struct radeon_device *rdev, bool state); 50void r100_vga_set_state(struct radeon_device *rdev, bool state);
51int r100_gpu_reset(struct radeon_device *rdev); 51int r100_gpu_reset(struct radeon_device *rdev);
52int r100_mc_init(struct radeon_device *rdev);
53void r100_mc_fini(struct radeon_device *rdev);
54u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); 52u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
55int r100_wb_init(struct radeon_device *rdev);
56void r100_wb_fini(struct radeon_device *rdev);
57int r100_pci_gart_init(struct radeon_device *rdev);
58void r100_pci_gart_fini(struct radeon_device *rdev);
59int r100_pci_gart_enable(struct radeon_device *rdev);
60void r100_pci_gart_disable(struct radeon_device *rdev);
61void r100_pci_gart_tlb_flush(struct radeon_device *rdev); 53void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
62int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 54int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
63int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
64void r100_cp_fini(struct radeon_device *rdev);
65void r100_cp_disable(struct radeon_device *rdev);
66void r100_cp_commit(struct radeon_device *rdev); 55void r100_cp_commit(struct radeon_device *rdev);
67void r100_ring_start(struct radeon_device *rdev); 56void r100_ring_start(struct radeon_device *rdev);
68int r100_irq_set(struct radeon_device *rdev); 57int r100_irq_set(struct radeon_device *rdev);
@@ -83,33 +72,21 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
83int r100_clear_surface_reg(struct radeon_device *rdev, int reg); 72int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
84void r100_bandwidth_update(struct radeon_device *rdev); 73void r100_bandwidth_update(struct radeon_device *rdev);
85void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 74void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
86int r100_ib_test(struct radeon_device *rdev);
87int r100_ring_test(struct radeon_device *rdev); 75int r100_ring_test(struct radeon_device *rdev);
88 76
89static struct radeon_asic r100_asic = { 77static struct radeon_asic r100_asic = {
90 .init = &r100_init, 78 .init = &r100_init,
91 .errata = &r100_errata, 79 .fini = &r100_fini,
92 .vram_info = &r100_vram_info, 80 .suspend = &r100_suspend,
81 .resume = &r100_resume,
93 .vga_set_state = &r100_vga_set_state, 82 .vga_set_state = &r100_vga_set_state,
94 .gpu_reset = &r100_gpu_reset, 83 .gpu_reset = &r100_gpu_reset,
95 .mc_init = &r100_mc_init,
96 .mc_fini = &r100_mc_fini,
97 .wb_init = &r100_wb_init,
98 .wb_fini = &r100_wb_fini,
99 .gart_init = &r100_pci_gart_init,
100 .gart_fini = &r100_pci_gart_fini,
101 .gart_enable = &r100_pci_gart_enable,
102 .gart_disable = &r100_pci_gart_disable,
103 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 84 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
104 .gart_set_page = &r100_pci_gart_set_page, 85 .gart_set_page = &r100_pci_gart_set_page,
105 .cp_init = &r100_cp_init,
106 .cp_fini = &r100_cp_fini,
107 .cp_disable = &r100_cp_disable,
108 .cp_commit = &r100_cp_commit, 86 .cp_commit = &r100_cp_commit,
109 .ring_start = &r100_ring_start, 87 .ring_start = &r100_ring_start,
110 .ring_test = &r100_ring_test, 88 .ring_test = &r100_ring_test,
111 .ring_ib_execute = &r100_ring_ib_execute, 89 .ring_ib_execute = &r100_ring_ib_execute,
112 .ib_test = &r100_ib_test,
113 .irq_set = &r100_irq_set, 90 .irq_set = &r100_irq_set,
114 .irq_process = &r100_irq_process, 91 .irq_process = &r100_irq_process,
115 .get_vblank_counter = &r100_get_vblank_counter, 92 .get_vblank_counter = &r100_get_vblank_counter,
@@ -131,55 +108,38 @@ static struct radeon_asic r100_asic = {
131/* 108/*
132 * r300,r350,rv350,rv380 109 * r300,r350,rv350,rv380
133 */ 110 */
134int r300_init(struct radeon_device *rdev); 111extern int r300_init(struct radeon_device *rdev);
135void r300_errata(struct radeon_device *rdev); 112extern void r300_fini(struct radeon_device *rdev);
136void r300_vram_info(struct radeon_device *rdev); 113extern int r300_suspend(struct radeon_device *rdev);
137int r300_gpu_reset(struct radeon_device *rdev); 114extern int r300_resume(struct radeon_device *rdev);
138int r300_mc_init(struct radeon_device *rdev); 115extern int r300_gpu_reset(struct radeon_device *rdev);
139void r300_mc_fini(struct radeon_device *rdev); 116extern void r300_ring_start(struct radeon_device *rdev);
140void r300_ring_start(struct radeon_device *rdev); 117extern void r300_fence_ring_emit(struct radeon_device *rdev,
141void r300_fence_ring_emit(struct radeon_device *rdev, 118 struct radeon_fence *fence);
142 struct radeon_fence *fence); 119extern int r300_cs_parse(struct radeon_cs_parser *p);
143int r300_cs_parse(struct radeon_cs_parser *p); 120extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
144int rv370_pcie_gart_init(struct radeon_device *rdev); 121extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
145void rv370_pcie_gart_fini(struct radeon_device *rdev); 122extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
146int rv370_pcie_gart_enable(struct radeon_device *rdev); 123extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
147void rv370_pcie_gart_disable(struct radeon_device *rdev); 124extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
148void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); 125extern int r300_copy_dma(struct radeon_device *rdev,
149int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 126 uint64_t src_offset,
150uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); 127 uint64_t dst_offset,
151void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 128 unsigned num_pages,
152void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 129 struct radeon_fence *fence);
153int r300_copy_dma(struct radeon_device *rdev,
154 uint64_t src_offset,
155 uint64_t dst_offset,
156 unsigned num_pages,
157 struct radeon_fence *fence);
158
159static struct radeon_asic r300_asic = { 130static struct radeon_asic r300_asic = {
160 .init = &r300_init, 131 .init = &r300_init,
161 .errata = &r300_errata, 132 .fini = &r300_fini,
162 .vram_info = &r300_vram_info, 133 .suspend = &r300_suspend,
134 .resume = &r300_resume,
163 .vga_set_state = &r100_vga_set_state, 135 .vga_set_state = &r100_vga_set_state,
164 .gpu_reset = &r300_gpu_reset, 136 .gpu_reset = &r300_gpu_reset,
165 .mc_init = &r300_mc_init,
166 .mc_fini = &r300_mc_fini,
167 .wb_init = &r100_wb_init,
168 .wb_fini = &r100_wb_fini,
169 .gart_init = &r100_pci_gart_init,
170 .gart_fini = &r100_pci_gart_fini,
171 .gart_enable = &r100_pci_gart_enable,
172 .gart_disable = &r100_pci_gart_disable,
173 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 137 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
174 .gart_set_page = &r100_pci_gart_set_page, 138 .gart_set_page = &r100_pci_gart_set_page,
175 .cp_init = &r100_cp_init,
176 .cp_fini = &r100_cp_fini,
177 .cp_disable = &r100_cp_disable,
178 .cp_commit = &r100_cp_commit, 139 .cp_commit = &r100_cp_commit,
179 .ring_start = &r300_ring_start, 140 .ring_start = &r300_ring_start,
180 .ring_test = &r100_ring_test, 141 .ring_test = &r100_ring_test,
181 .ring_ib_execute = &r100_ring_ib_execute, 142 .ring_ib_execute = &r100_ring_ib_execute,
182 .ib_test = &r100_ib_test,
183 .irq_set = &r100_irq_set, 143 .irq_set = &r100_irq_set,
184 .irq_process = &r100_irq_process, 144 .irq_process = &r100_irq_process,
185 .get_vblank_counter = &r100_get_vblank_counter, 145 .get_vblank_counter = &r100_get_vblank_counter,
@@ -209,26 +169,14 @@ static struct radeon_asic r420_asic = {
209 .fini = &r420_fini, 169 .fini = &r420_fini,
210 .suspend = &r420_suspend, 170 .suspend = &r420_suspend,
211 .resume = &r420_resume, 171 .resume = &r420_resume,
212 .errata = NULL,
213 .vram_info = NULL,
214 .vga_set_state = &r100_vga_set_state, 172 .vga_set_state = &r100_vga_set_state,
215 .gpu_reset = &r300_gpu_reset, 173 .gpu_reset = &r300_gpu_reset,
216 .mc_init = NULL,
217 .mc_fini = NULL,
218 .wb_init = NULL,
219 .wb_fini = NULL,
220 .gart_enable = NULL,
221 .gart_disable = NULL,
222 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 174 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
223 .gart_set_page = &rv370_pcie_gart_set_page, 175 .gart_set_page = &rv370_pcie_gart_set_page,
224 .cp_init = NULL,
225 .cp_fini = NULL,
226 .cp_disable = NULL,
227 .cp_commit = &r100_cp_commit, 176 .cp_commit = &r100_cp_commit,
228 .ring_start = &r300_ring_start, 177 .ring_start = &r300_ring_start,
229 .ring_test = &r100_ring_test, 178 .ring_test = &r100_ring_test,
230 .ring_ib_execute = &r100_ring_ib_execute, 179 .ring_ib_execute = &r100_ring_ib_execute,
231 .ib_test = NULL,
232 .irq_set = &r100_irq_set, 180 .irq_set = &r100_irq_set,
233 .irq_process = &r100_irq_process, 181 .irq_process = &r100_irq_process,
234 .get_vblank_counter = &r100_get_vblank_counter, 182 .get_vblank_counter = &r100_get_vblank_counter,
@@ -250,42 +198,27 @@ static struct radeon_asic r420_asic = {
250/* 198/*
251 * rs400,rs480 199 * rs400,rs480
252 */ 200 */
253void rs400_errata(struct radeon_device *rdev); 201extern int rs400_init(struct radeon_device *rdev);
254void rs400_vram_info(struct radeon_device *rdev); 202extern void rs400_fini(struct radeon_device *rdev);
255int rs400_mc_init(struct radeon_device *rdev); 203extern int rs400_suspend(struct radeon_device *rdev);
256void rs400_mc_fini(struct radeon_device *rdev); 204extern int rs400_resume(struct radeon_device *rdev);
257int rs400_gart_init(struct radeon_device *rdev);
258void rs400_gart_fini(struct radeon_device *rdev);
259int rs400_gart_enable(struct radeon_device *rdev);
260void rs400_gart_disable(struct radeon_device *rdev);
261void rs400_gart_tlb_flush(struct radeon_device *rdev); 205void rs400_gart_tlb_flush(struct radeon_device *rdev);
262int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 206int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
263uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); 207uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
264void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 208void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
265static struct radeon_asic rs400_asic = { 209static struct radeon_asic rs400_asic = {
266 .init = &r300_init, 210 .init = &rs400_init,
267 .errata = &rs400_errata, 211 .fini = &rs400_fini,
268 .vram_info = &rs400_vram_info, 212 .suspend = &rs400_suspend,
213 .resume = &rs400_resume,
269 .vga_set_state = &r100_vga_set_state, 214 .vga_set_state = &r100_vga_set_state,
270 .gpu_reset = &r300_gpu_reset, 215 .gpu_reset = &r300_gpu_reset,
271 .mc_init = &rs400_mc_init,
272 .mc_fini = &rs400_mc_fini,
273 .wb_init = &r100_wb_init,
274 .wb_fini = &r100_wb_fini,
275 .gart_init = &rs400_gart_init,
276 .gart_fini = &rs400_gart_fini,
277 .gart_enable = &rs400_gart_enable,
278 .gart_disable = &rs400_gart_disable,
279 .gart_tlb_flush = &rs400_gart_tlb_flush, 216 .gart_tlb_flush = &rs400_gart_tlb_flush,
280 .gart_set_page = &rs400_gart_set_page, 217 .gart_set_page = &rs400_gart_set_page,
281 .cp_init = &r100_cp_init,
282 .cp_fini = &r100_cp_fini,
283 .cp_disable = &r100_cp_disable,
284 .cp_commit = &r100_cp_commit, 218 .cp_commit = &r100_cp_commit,
285 .ring_start = &r300_ring_start, 219 .ring_start = &r300_ring_start,
286 .ring_test = &r100_ring_test, 220 .ring_test = &r100_ring_test,
287 .ring_ib_execute = &r100_ring_ib_execute, 221 .ring_ib_execute = &r100_ring_ib_execute,
288 .ib_test = &r100_ib_test,
289 .irq_set = &r100_irq_set, 222 .irq_set = &r100_irq_set,
290 .irq_process = &r100_irq_process, 223 .irq_process = &r100_irq_process,
291 .get_vblank_counter = &r100_get_vblank_counter, 224 .get_vblank_counter = &r100_get_vblank_counter,
@@ -307,18 +240,13 @@ static struct radeon_asic rs400_asic = {
307/* 240/*
308 * rs600. 241 * rs600.
309 */ 242 */
310int rs600_init(struct radeon_device *rdev); 243extern int rs600_init(struct radeon_device *rdev);
311void rs600_errata(struct radeon_device *rdev); 244extern void rs600_fini(struct radeon_device *rdev);
312void rs600_vram_info(struct radeon_device *rdev); 245extern int rs600_suspend(struct radeon_device *rdev);
313int rs600_mc_init(struct radeon_device *rdev); 246extern int rs600_resume(struct radeon_device *rdev);
314void rs600_mc_fini(struct radeon_device *rdev);
315int rs600_irq_set(struct radeon_device *rdev); 247int rs600_irq_set(struct radeon_device *rdev);
316int rs600_irq_process(struct radeon_device *rdev); 248int rs600_irq_process(struct radeon_device *rdev);
317u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); 249u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
318int rs600_gart_init(struct radeon_device *rdev);
319void rs600_gart_fini(struct radeon_device *rdev);
320int rs600_gart_enable(struct radeon_device *rdev);
321void rs600_gart_disable(struct radeon_device *rdev);
322void rs600_gart_tlb_flush(struct radeon_device *rdev); 250void rs600_gart_tlb_flush(struct radeon_device *rdev);
323int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 251int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
324uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 252uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -326,28 +254,17 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
326void rs600_bandwidth_update(struct radeon_device *rdev); 254void rs600_bandwidth_update(struct radeon_device *rdev);
327static struct radeon_asic rs600_asic = { 255static struct radeon_asic rs600_asic = {
328 .init = &rs600_init, 256 .init = &rs600_init,
329 .errata = &rs600_errata, 257 .fini = &rs600_fini,
330 .vram_info = &rs600_vram_info, 258 .suspend = &rs600_suspend,
259 .resume = &rs600_resume,
331 .vga_set_state = &r100_vga_set_state, 260 .vga_set_state = &r100_vga_set_state,
332 .gpu_reset = &r300_gpu_reset, 261 .gpu_reset = &r300_gpu_reset,
333 .mc_init = &rs600_mc_init,
334 .mc_fini = &rs600_mc_fini,
335 .wb_init = &r100_wb_init,
336 .wb_fini = &r100_wb_fini,
337 .gart_init = &rs600_gart_init,
338 .gart_fini = &rs600_gart_fini,
339 .gart_enable = &rs600_gart_enable,
340 .gart_disable = &rs600_gart_disable,
341 .gart_tlb_flush = &rs600_gart_tlb_flush, 262 .gart_tlb_flush = &rs600_gart_tlb_flush,
342 .gart_set_page = &rs600_gart_set_page, 263 .gart_set_page = &rs600_gart_set_page,
343 .cp_init = &r100_cp_init,
344 .cp_fini = &r100_cp_fini,
345 .cp_disable = &r100_cp_disable,
346 .cp_commit = &r100_cp_commit, 264 .cp_commit = &r100_cp_commit,
347 .ring_start = &r300_ring_start, 265 .ring_start = &r300_ring_start,
348 .ring_test = &r100_ring_test, 266 .ring_test = &r100_ring_test,
349 .ring_ib_execute = &r100_ring_ib_execute, 267 .ring_ib_execute = &r100_ring_ib_execute,
350 .ib_test = &r100_ib_test,
351 .irq_set = &rs600_irq_set, 268 .irq_set = &rs600_irq_set,
352 .irq_process = &rs600_irq_process, 269 .irq_process = &rs600_irq_process,
353 .get_vblank_counter = &rs600_get_vblank_counter, 270 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -367,37 +284,26 @@ static struct radeon_asic rs600_asic = {
367/* 284/*
368 * rs690,rs740 285 * rs690,rs740
369 */ 286 */
370void rs690_errata(struct radeon_device *rdev); 287int rs690_init(struct radeon_device *rdev);
371void rs690_vram_info(struct radeon_device *rdev); 288void rs690_fini(struct radeon_device *rdev);
372int rs690_mc_init(struct radeon_device *rdev); 289int rs690_resume(struct radeon_device *rdev);
373void rs690_mc_fini(struct radeon_device *rdev); 290int rs690_suspend(struct radeon_device *rdev);
374uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); 291uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
375void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 292void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
376void rs690_bandwidth_update(struct radeon_device *rdev); 293void rs690_bandwidth_update(struct radeon_device *rdev);
377static struct radeon_asic rs690_asic = { 294static struct radeon_asic rs690_asic = {
378 .init = &rs600_init, 295 .init = &rs690_init,
379 .errata = &rs690_errata, 296 .fini = &rs690_fini,
380 .vram_info = &rs690_vram_info, 297 .suspend = &rs690_suspend,
298 .resume = &rs690_resume,
381 .vga_set_state = &r100_vga_set_state, 299 .vga_set_state = &r100_vga_set_state,
382 .gpu_reset = &r300_gpu_reset, 300 .gpu_reset = &r300_gpu_reset,
383 .mc_init = &rs690_mc_init,
384 .mc_fini = &rs690_mc_fini,
385 .wb_init = &r100_wb_init,
386 .wb_fini = &r100_wb_fini,
387 .gart_init = &rs400_gart_init,
388 .gart_fini = &rs400_gart_fini,
389 .gart_enable = &rs400_gart_enable,
390 .gart_disable = &rs400_gart_disable,
391 .gart_tlb_flush = &rs400_gart_tlb_flush, 301 .gart_tlb_flush = &rs400_gart_tlb_flush,
392 .gart_set_page = &rs400_gart_set_page, 302 .gart_set_page = &rs400_gart_set_page,
393 .cp_init = &r100_cp_init,
394 .cp_fini = &r100_cp_fini,
395 .cp_disable = &r100_cp_disable,
396 .cp_commit = &r100_cp_commit, 303 .cp_commit = &r100_cp_commit,
397 .ring_start = &r300_ring_start, 304 .ring_start = &r300_ring_start,
398 .ring_test = &r100_ring_test, 305 .ring_test = &r100_ring_test,
399 .ring_ib_execute = &r100_ring_ib_execute, 306 .ring_ib_execute = &r100_ring_ib_execute,
400 .ib_test = &r100_ib_test,
401 .irq_set = &rs600_irq_set, 307 .irq_set = &rs600_irq_set,
402 .irq_process = &rs600_irq_process, 308 .irq_process = &rs600_irq_process,
403 .get_vblank_counter = &rs600_get_vblank_counter, 309 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -435,28 +341,14 @@ static struct radeon_asic rv515_asic = {
435 .fini = &rv515_fini, 341 .fini = &rv515_fini,
436 .suspend = &rv515_suspend, 342 .suspend = &rv515_suspend,
437 .resume = &rv515_resume, 343 .resume = &rv515_resume,
438 .errata = NULL,
439 .vram_info = NULL,
440 .vga_set_state = &r100_vga_set_state, 344 .vga_set_state = &r100_vga_set_state,
441 .gpu_reset = &rv515_gpu_reset, 345 .gpu_reset = &rv515_gpu_reset,
442 .mc_init = NULL,
443 .mc_fini = NULL,
444 .wb_init = NULL,
445 .wb_fini = NULL,
446 .gart_init = &rv370_pcie_gart_init,
447 .gart_fini = &rv370_pcie_gart_fini,
448 .gart_enable = NULL,
449 .gart_disable = NULL,
450 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 346 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
451 .gart_set_page = &rv370_pcie_gart_set_page, 347 .gart_set_page = &rv370_pcie_gart_set_page,
452 .cp_init = NULL,
453 .cp_fini = NULL,
454 .cp_disable = NULL,
455 .cp_commit = &r100_cp_commit, 348 .cp_commit = &r100_cp_commit,
456 .ring_start = &rv515_ring_start, 349 .ring_start = &rv515_ring_start,
457 .ring_test = &r100_ring_test, 350 .ring_test = &r100_ring_test,
458 .ring_ib_execute = &r100_ring_ib_execute, 351 .ring_ib_execute = &r100_ring_ib_execute,
459 .ib_test = NULL,
460 .irq_set = &rs600_irq_set, 352 .irq_set = &rs600_irq_set,
461 .irq_process = &rs600_irq_process, 353 .irq_process = &rs600_irq_process,
462 .get_vblank_counter = &rs600_get_vblank_counter, 354 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -485,28 +377,14 @@ static struct radeon_asic r520_asic = {
485 .fini = &rv515_fini, 377 .fini = &rv515_fini,
486 .suspend = &rv515_suspend, 378 .suspend = &rv515_suspend,
487 .resume = &r520_resume, 379 .resume = &r520_resume,
488 .errata = NULL,
489 .vram_info = NULL,
490 .vga_set_state = &r100_vga_set_state, 380 .vga_set_state = &r100_vga_set_state,
491 .gpu_reset = &rv515_gpu_reset, 381 .gpu_reset = &rv515_gpu_reset,
492 .mc_init = NULL,
493 .mc_fini = NULL,
494 .wb_init = NULL,
495 .wb_fini = NULL,
496 .gart_init = NULL,
497 .gart_fini = NULL,
498 .gart_enable = NULL,
499 .gart_disable = NULL,
500 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 382 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
501 .gart_set_page = &rv370_pcie_gart_set_page, 383 .gart_set_page = &rv370_pcie_gart_set_page,
502 .cp_init = NULL,
503 .cp_fini = NULL,
504 .cp_disable = NULL,
505 .cp_commit = &r100_cp_commit, 384 .cp_commit = &r100_cp_commit,
506 .ring_start = &rv515_ring_start, 385 .ring_start = &rv515_ring_start,
507 .ring_test = &r100_ring_test, 386 .ring_test = &r100_ring_test,
508 .ring_ib_execute = &r100_ring_ib_execute, 387 .ring_ib_execute = &r100_ring_ib_execute,
509 .ib_test = NULL,
510 .irq_set = &rs600_irq_set, 388 .irq_set = &rs600_irq_set,
511 .irq_process = &rs600_irq_process, 389 .irq_process = &rs600_irq_process,
512 .get_vblank_counter = &rs600_get_vblank_counter, 390 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -554,37 +432,23 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
554 uint32_t offset, uint32_t obj_size); 432 uint32_t offset, uint32_t obj_size);
555int r600_clear_surface_reg(struct radeon_device *rdev, int reg); 433int r600_clear_surface_reg(struct radeon_device *rdev, int reg);
556void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 434void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
557int r600_ib_test(struct radeon_device *rdev);
558int r600_ring_test(struct radeon_device *rdev); 435int r600_ring_test(struct radeon_device *rdev);
559int r600_copy_blit(struct radeon_device *rdev, 436int r600_copy_blit(struct radeon_device *rdev,
560 uint64_t src_offset, uint64_t dst_offset, 437 uint64_t src_offset, uint64_t dst_offset,
561 unsigned num_pages, struct radeon_fence *fence); 438 unsigned num_pages, struct radeon_fence *fence);
562 439
563static struct radeon_asic r600_asic = { 440static struct radeon_asic r600_asic = {
564 .errata = NULL,
565 .init = &r600_init, 441 .init = &r600_init,
566 .fini = &r600_fini, 442 .fini = &r600_fini,
567 .suspend = &r600_suspend, 443 .suspend = &r600_suspend,
568 .resume = &r600_resume, 444 .resume = &r600_resume,
569 .cp_commit = &r600_cp_commit, 445 .cp_commit = &r600_cp_commit,
570 .vram_info = NULL,
571 .vga_set_state = &r600_vga_set_state, 446 .vga_set_state = &r600_vga_set_state,
572 .gpu_reset = &r600_gpu_reset, 447 .gpu_reset = &r600_gpu_reset,
573 .mc_init = NULL,
574 .mc_fini = NULL,
575 .wb_init = &r600_wb_init,
576 .wb_fini = &r600_wb_fini,
577 .gart_enable = NULL,
578 .gart_disable = NULL,
579 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 448 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
580 .gart_set_page = &rs600_gart_set_page, 449 .gart_set_page = &rs600_gart_set_page,
581 .cp_init = NULL,
582 .cp_fini = NULL,
583 .cp_disable = NULL,
584 .ring_start = NULL,
585 .ring_test = &r600_ring_test, 450 .ring_test = &r600_ring_test,
586 .ring_ib_execute = &r600_ring_ib_execute, 451 .ring_ib_execute = &r600_ring_ib_execute,
587 .ib_test = &r600_ib_test,
588 .irq_set = &r600_irq_set, 452 .irq_set = &r600_irq_set,
589 .irq_process = &r600_irq_process, 453 .irq_process = &r600_irq_process,
590 .fence_ring_emit = &r600_fence_ring_emit, 454 .fence_ring_emit = &r600_fence_ring_emit,
@@ -611,30 +475,17 @@ int rv770_resume(struct radeon_device *rdev);
611int rv770_gpu_reset(struct radeon_device *rdev); 475int rv770_gpu_reset(struct radeon_device *rdev);
612 476
613static struct radeon_asic rv770_asic = { 477static struct radeon_asic rv770_asic = {
614 .errata = NULL,
615 .init = &rv770_init, 478 .init = &rv770_init,
616 .fini = &rv770_fini, 479 .fini = &rv770_fini,
617 .suspend = &rv770_suspend, 480 .suspend = &rv770_suspend,
618 .resume = &rv770_resume, 481 .resume = &rv770_resume,
619 .cp_commit = &r600_cp_commit, 482 .cp_commit = &r600_cp_commit,
620 .vram_info = NULL,
621 .gpu_reset = &rv770_gpu_reset, 483 .gpu_reset = &rv770_gpu_reset,
622 .vga_set_state = &r600_vga_set_state, 484 .vga_set_state = &r600_vga_set_state,
623 .mc_init = NULL,
624 .mc_fini = NULL,
625 .wb_init = &r600_wb_init,
626 .wb_fini = &r600_wb_fini,
627 .gart_enable = NULL,
628 .gart_disable = NULL,
629 .gart_tlb_flush = &r600_pcie_gart_tlb_flush, 485 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
630 .gart_set_page = &rs600_gart_set_page, 486 .gart_set_page = &rs600_gart_set_page,
631 .cp_init = NULL,
632 .cp_fini = NULL,
633 .cp_disable = NULL,
634 .ring_start = NULL,
635 .ring_test = &r600_ring_test, 487 .ring_test = &r600_ring_test,
636 .ring_ib_execute = &r600_ring_ib_execute, 488 .ring_ib_execute = &r600_ring_ib_execute,
637 .ib_test = &r600_ib_test,
638 .irq_set = &r600_irq_set, 489 .irq_set = &r600_irq_set,
639 .irq_process = &r600_irq_process, 490 .irq_process = &r600_irq_process,
640 .fence_ring_emit = &r600_fence_ring_emit, 491 .fence_ring_emit = &r600_fence_ring_emit,
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 96e37a6e7ce4..34a9b9119518 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -33,12 +33,50 @@
33/* 33/*
34 * BIOS. 34 * BIOS.
35 */ 35 */
36
37/* If you boot an IGP board with a discrete card as the primary,
38 * the IGP rom is not accessible via the rom bar as the IGP rom is
39 * part of the system bios. On boot, the system bios puts a
40 * copy of the igp rom at the start of vram if a discrete card is
41 * present.
42 */
43static bool igp_read_bios_from_vram(struct radeon_device *rdev)
44{
45 uint8_t __iomem *bios;
46 resource_size_t vram_base;
47 resource_size_t size = 256 * 1024; /* ??? */
48
49 rdev->bios = NULL;
50 vram_base = drm_get_resource_start(rdev->ddev, 0);
51 bios = ioremap(vram_base, size);
52 if (!bios) {
53 DRM_ERROR("Unable to mmap vram\n");
54 return false;
55 }
56
57 if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
58 iounmap(bios);
59 DRM_ERROR("bad rom signature\n");
60 return false;
61 }
62 rdev->bios = kmalloc(size, GFP_KERNEL);
63 if (rdev->bios == NULL) {
64 iounmap(bios);
65 DRM_ERROR("kmalloc failed\n");
66 return false;
67 }
68 memcpy(rdev->bios, bios, size);
69 iounmap(bios);
70 return true;
71}
72
36static bool radeon_read_bios(struct radeon_device *rdev) 73static bool radeon_read_bios(struct radeon_device *rdev)
37{ 74{
38 uint8_t __iomem *bios; 75 uint8_t __iomem *bios;
39 size_t size; 76 size_t size;
40 77
41 rdev->bios = NULL; 78 rdev->bios = NULL;
79 /* XXX: some cards may return 0 for rom size? ddx has a workaround */
42 bios = pci_map_rom(rdev->pdev, &size); 80 bios = pci_map_rom(rdev->pdev, &size);
43 if (!bios) { 81 if (!bios) {
44 return false; 82 return false;
@@ -341,7 +379,9 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
341 379
342static bool radeon_read_disabled_bios(struct radeon_device *rdev) 380static bool radeon_read_disabled_bios(struct radeon_device *rdev)
343{ 381{
344 if (rdev->family >= CHIP_RV770) 382 if (rdev->flags & RADEON_IS_IGP)
383 return igp_read_bios_from_vram(rdev);
384 else if (rdev->family >= CHIP_RV770)
345 return r700_read_disabled_bios(rdev); 385 return r700_read_disabled_bios(rdev);
346 else if (rdev->family >= CHIP_R600) 386 else if (rdev->family >= CHIP_R600)
347 return r600_read_disabled_bios(rdev); 387 return r600_read_disabled_bios(rdev);
@@ -356,7 +396,12 @@ bool radeon_get_bios(struct radeon_device *rdev)
356 bool r; 396 bool r;
357 uint16_t tmp; 397 uint16_t tmp;
358 398
359 r = radeon_read_bios(rdev); 399 if (rdev->flags & RADEON_IS_IGP) {
400 r = igp_read_bios_from_vram(rdev);
401 if (r == false)
402 r = radeon_read_bios(rdev);
403 } else
404 r = radeon_read_bios(rdev);
360 if (r == false) { 405 if (r == false) {
361 r = radeon_read_disabled_bios(rdev); 406 r = radeon_read_disabled_bios(rdev);
362 } 407 }
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 152eef13197a..f5c32a766b10 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -411,7 +411,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
411 R300_PIXCLK_TRANS_ALWAYS_ONb | 411 R300_PIXCLK_TRANS_ALWAYS_ONb |
412 R300_PIXCLK_TVO_ALWAYS_ONb | 412 R300_PIXCLK_TVO_ALWAYS_ONb |
413 R300_P2G2CLK_ALWAYS_ONb | 413 R300_P2G2CLK_ALWAYS_ONb |
414 R300_P2G2CLK_ALWAYS_ONb); 414 R300_P2G2CLK_DAC_ALWAYS_ONb);
415 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); 415 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
416 } else if (rdev->family >= CHIP_RV350) { 416 } else if (rdev->family >= CHIP_RV350) {
417 tmp = RREG32_PLL(R300_SCLK_CNTL2); 417 tmp = RREG32_PLL(R300_SCLK_CNTL2);
@@ -464,7 +464,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
464 R300_PIXCLK_TRANS_ALWAYS_ONb | 464 R300_PIXCLK_TRANS_ALWAYS_ONb |
465 R300_PIXCLK_TVO_ALWAYS_ONb | 465 R300_PIXCLK_TVO_ALWAYS_ONb |
466 R300_P2G2CLK_ALWAYS_ONb | 466 R300_P2G2CLK_ALWAYS_ONb |
467 R300_P2G2CLK_ALWAYS_ONb); 467 R300_P2G2CLK_DAC_ALWAYS_ONb);
468 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); 468 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
469 469
470 tmp = RREG32_PLL(RADEON_MCLK_MISC); 470 tmp = RREG32_PLL(RADEON_MCLK_MISC);
@@ -654,7 +654,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
654 R300_PIXCLK_TRANS_ALWAYS_ONb | 654 R300_PIXCLK_TRANS_ALWAYS_ONb |
655 R300_PIXCLK_TVO_ALWAYS_ONb | 655 R300_PIXCLK_TVO_ALWAYS_ONb |
656 R300_P2G2CLK_ALWAYS_ONb | 656 R300_P2G2CLK_ALWAYS_ONb |
657 R300_P2G2CLK_ALWAYS_ONb | 657 R300_P2G2CLK_DAC_ALWAYS_ONb |
658 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); 658 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
659 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); 659 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
660 } else if (rdev->family >= CHIP_RV350) { 660 } else if (rdev->family >= CHIP_RV350) {
@@ -705,7 +705,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
705 R300_PIXCLK_TRANS_ALWAYS_ONb | 705 R300_PIXCLK_TRANS_ALWAYS_ONb |
706 R300_PIXCLK_TVO_ALWAYS_ONb | 706 R300_PIXCLK_TVO_ALWAYS_ONb |
707 R300_P2G2CLK_ALWAYS_ONb | 707 R300_P2G2CLK_ALWAYS_ONb |
708 R300_P2G2CLK_ALWAYS_ONb | 708 R300_P2G2CLK_DAC_ALWAYS_ONb |
709 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); 709 R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
710 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); 710 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
711 } else { 711 } else {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ec835d56d30a..3d667031de6e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -322,10 +322,6 @@ int radeon_asic_init(struct radeon_device *rdev)
322 case CHIP_RV380: 322 case CHIP_RV380:
323 rdev->asic = &r300_asic; 323 rdev->asic = &r300_asic;
324 if (rdev->flags & RADEON_IS_PCIE) { 324 if (rdev->flags & RADEON_IS_PCIE) {
325 rdev->asic->gart_init = &rv370_pcie_gart_init;
326 rdev->asic->gart_fini = &rv370_pcie_gart_fini;
327 rdev->asic->gart_enable = &rv370_pcie_gart_enable;
328 rdev->asic->gart_disable = &rv370_pcie_gart_disable;
329 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; 325 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
330 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; 326 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
331 } 327 }
@@ -485,7 +481,6 @@ void radeon_combios_fini(struct radeon_device *rdev)
485static unsigned int radeon_vga_set_decode(void *cookie, bool state) 481static unsigned int radeon_vga_set_decode(void *cookie, bool state)
486{ 482{
487 struct radeon_device *rdev = cookie; 483 struct radeon_device *rdev = cookie;
488
489 radeon_vga_set_state(rdev, state); 484 radeon_vga_set_state(rdev, state);
490 if (state) 485 if (state)
491 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 486 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
@@ -493,6 +488,29 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
493 else 488 else
494 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 489 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
495} 490}
491
492void radeon_agp_disable(struct radeon_device *rdev)
493{
494 rdev->flags &= ~RADEON_IS_AGP;
495 if (rdev->family >= CHIP_R600) {
496 DRM_INFO("Forcing AGP to PCIE mode\n");
497 rdev->flags |= RADEON_IS_PCIE;
498 } else if (rdev->family >= CHIP_RV515 ||
499 rdev->family == CHIP_RV380 ||
500 rdev->family == CHIP_RV410 ||
501 rdev->family == CHIP_R423) {
502 DRM_INFO("Forcing AGP to PCIE mode\n");
503 rdev->flags |= RADEON_IS_PCIE;
504 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
505 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
506 } else {
507 DRM_INFO("Forcing AGP to PCI mode\n");
508 rdev->flags |= RADEON_IS_PCI;
509 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
510 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
511 }
512}
513
496/* 514/*
497 * Radeon device. 515 * Radeon device.
498 */ 516 */
@@ -531,32 +549,7 @@ int radeon_device_init(struct radeon_device *rdev,
531 } 549 }
532 550
533 if (radeon_agpmode == -1) { 551 if (radeon_agpmode == -1) {
534 rdev->flags &= ~RADEON_IS_AGP; 552 radeon_agp_disable(rdev);
535 if (rdev->family >= CHIP_R600) {
536 DRM_INFO("Forcing AGP to PCIE mode\n");
537 rdev->flags |= RADEON_IS_PCIE;
538 } else if (rdev->family >= CHIP_RV515 ||
539 rdev->family == CHIP_RV380 ||
540 rdev->family == CHIP_RV410 ||
541 rdev->family == CHIP_R423) {
542 DRM_INFO("Forcing AGP to PCIE mode\n");
543 rdev->flags |= RADEON_IS_PCIE;
544 rdev->asic->gart_init = &rv370_pcie_gart_init;
545 rdev->asic->gart_fini = &rv370_pcie_gart_fini;
546 rdev->asic->gart_enable = &rv370_pcie_gart_enable;
547 rdev->asic->gart_disable = &rv370_pcie_gart_disable;
548 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
549 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
550 } else {
551 DRM_INFO("Forcing AGP to PCI mode\n");
552 rdev->flags |= RADEON_IS_PCI;
553 rdev->asic->gart_init = &r100_pci_gart_init;
554 rdev->asic->gart_fini = &r100_pci_gart_fini;
555 rdev->asic->gart_enable = &r100_pci_gart_enable;
556 rdev->asic->gart_disable = &r100_pci_gart_disable;
557 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
558 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
559 }
560 } 553 }
561 554
562 /* set DMA mask + need_dma32 flags. 555 /* set DMA mask + need_dma32 flags.
@@ -588,111 +581,27 @@ int radeon_device_init(struct radeon_device *rdev,
588 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); 581 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
589 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); 582 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
590 583
591 rdev->new_init_path = false;
592 r = radeon_init(rdev);
593 if (r) {
594 return r;
595 }
596
597 /* if we have > 1 VGA cards, then disable the radeon VGA resources */ 584 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
598 r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 585 r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
599 if (r) { 586 if (r) {
600 return -EINVAL; 587 return -EINVAL;
601 } 588 }
602 589
603 if (!rdev->new_init_path) { 590 r = radeon_init(rdev);
604 /* Setup errata flags */ 591 if (r)
605 radeon_errata(rdev); 592 return r;
606 /* Initialize scratch registers */
607 radeon_scratch_init(rdev);
608 /* Initialize surface registers */
609 radeon_surface_init(rdev);
610
611 /* BIOS*/
612 if (!radeon_get_bios(rdev)) {
613 if (ASIC_IS_AVIVO(rdev))
614 return -EINVAL;
615 }
616 if (rdev->is_atom_bios) {
617 r = radeon_atombios_init(rdev);
618 if (r) {
619 return r;
620 }
621 } else {
622 r = radeon_combios_init(rdev);
623 if (r) {
624 return r;
625 }
626 }
627 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
628 if (radeon_gpu_reset(rdev)) {
629 /* FIXME: what do we want to do here ? */
630 }
631 /* check if cards are posted or not */
632 if (!radeon_card_posted(rdev) && rdev->bios) {
633 DRM_INFO("GPU not posted. posting now...\n");
634 if (rdev->is_atom_bios) {
635 atom_asic_init(rdev->mode_info.atom_context);
636 } else {
637 radeon_combios_asic_init(rdev->ddev);
638 }
639 }
640 /* Get clock & vram information */
641 radeon_get_clock_info(rdev->ddev);
642 radeon_vram_info(rdev);
643 /* Initialize clocks */
644 r = radeon_clocks_init(rdev);
645 if (r) {
646 return r;
647 }
648 593
649 /* Initialize memory controller (also test AGP) */ 594 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
650 r = radeon_mc_init(rdev); 595 /* Acceleration not working on AGP card try again
651 if (r) { 596 * with fallback to PCI or PCIE GART
652 return r; 597 */
653 } 598 radeon_gpu_reset(rdev);
654 /* Fence driver */ 599 radeon_fini(rdev);
655 r = radeon_fence_driver_init(rdev); 600 radeon_agp_disable(rdev);
656 if (r) { 601 r = radeon_init(rdev);
657 return r;
658 }
659 r = radeon_irq_kms_init(rdev);
660 if (r) {
661 return r;
662 }
663 /* Memory manager */
664 r = radeon_object_init(rdev);
665 if (r) {
666 return r;
667 }
668 r = radeon_gpu_gart_init(rdev);
669 if (r) 602 if (r)
670 return r; 603 return r;
671 /* Initialize GART (initialize after TTM so we can allocate
672 * memory through TTM but finalize after TTM) */
673 r = radeon_gart_enable(rdev);
674 if (r)
675 return 0;
676 r = radeon_gem_init(rdev);
677 if (r)
678 return 0;
679
680 /* 1M ring buffer */
681 r = radeon_cp_init(rdev, 1024 * 1024);
682 if (r)
683 return 0;
684 r = radeon_wb_init(rdev);
685 if (r)
686 DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
687 r = radeon_ib_pool_init(rdev);
688 if (r)
689 return 0;
690 r = radeon_ib_test(rdev);
691 if (r)
692 return 0;
693 rdev->accel_working = true;
694 } 604 }
695 DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
696 if (radeon_testing) { 605 if (radeon_testing) {
697 radeon_test_moves(rdev); 606 radeon_test_moves(rdev);
698 } 607 }
@@ -706,32 +615,8 @@ void radeon_device_fini(struct radeon_device *rdev)
706{ 615{
707 DRM_INFO("radeon: finishing device.\n"); 616 DRM_INFO("radeon: finishing device.\n");
708 rdev->shutdown = true; 617 rdev->shutdown = true;
709 /* Order matter so becarefull if you rearrange anythings */ 618 radeon_fini(rdev);
710 if (!rdev->new_init_path) { 619 vga_client_register(rdev->pdev, NULL, NULL, NULL);
711 radeon_ib_pool_fini(rdev);
712 radeon_cp_fini(rdev);
713 radeon_wb_fini(rdev);
714 radeon_gpu_gart_fini(rdev);
715 radeon_gem_fini(rdev);
716 radeon_mc_fini(rdev);
717#if __OS_HAS_AGP
718 radeon_agp_fini(rdev);
719#endif
720 radeon_irq_kms_fini(rdev);
721 vga_client_register(rdev->pdev, NULL, NULL, NULL);
722 radeon_fence_driver_fini(rdev);
723 radeon_clocks_fini(rdev);
724 radeon_object_fini(rdev);
725 if (rdev->is_atom_bios) {
726 radeon_atombios_fini(rdev);
727 } else {
728 radeon_combios_fini(rdev);
729 }
730 kfree(rdev->bios);
731 rdev->bios = NULL;
732 } else {
733 radeon_fini(rdev);
734 }
735 iounmap(rdev->rmmio); 620 iounmap(rdev->rmmio);
736 rdev->rmmio = NULL; 621 rdev->rmmio = NULL;
737} 622}
@@ -771,14 +656,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
771 656
772 radeon_save_bios_scratch_regs(rdev); 657 radeon_save_bios_scratch_regs(rdev);
773 658
774 if (!rdev->new_init_path) { 659 radeon_suspend(rdev);
775 radeon_cp_disable(rdev);
776 radeon_gart_disable(rdev);
777 rdev->irq.sw_int = false;
778 radeon_irq_set(rdev);
779 } else {
780 radeon_suspend(rdev);
781 }
782 /* evict remaining vram memory */ 660 /* evict remaining vram memory */
783 radeon_object_evict_vram(rdev); 661 radeon_object_evict_vram(rdev);
784 662
@@ -797,7 +675,6 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
797int radeon_resume_kms(struct drm_device *dev) 675int radeon_resume_kms(struct drm_device *dev)
798{ 676{
799 struct radeon_device *rdev = dev->dev_private; 677 struct radeon_device *rdev = dev->dev_private;
800 int r;
801 678
802 acquire_console_sem(); 679 acquire_console_sem();
803 pci_set_power_state(dev->pdev, PCI_D0); 680 pci_set_power_state(dev->pdev, PCI_D0);
@@ -807,43 +684,7 @@ int radeon_resume_kms(struct drm_device *dev)
807 return -1; 684 return -1;
808 } 685 }
809 pci_set_master(dev->pdev); 686 pci_set_master(dev->pdev);
810 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 687 radeon_resume(rdev);
811 if (!rdev->new_init_path) {
812 if (radeon_gpu_reset(rdev)) {
813 /* FIXME: what do we want to do here ? */
814 }
815 /* post card */
816 if (rdev->is_atom_bios) {
817 atom_asic_init(rdev->mode_info.atom_context);
818 } else {
819 radeon_combios_asic_init(rdev->ddev);
820 }
821 /* Initialize clocks */
822 r = radeon_clocks_init(rdev);
823 if (r) {
824 release_console_sem();
825 return r;
826 }
827 /* Enable IRQ */
828 rdev->irq.sw_int = true;
829 radeon_irq_set(rdev);
830 /* Initialize GPU Memory Controller */
831 r = radeon_mc_init(rdev);
832 if (r) {
833 goto out;
834 }
835 r = radeon_gart_enable(rdev);
836 if (r) {
837 goto out;
838 }
839 r = radeon_cp_init(rdev, rdev->cp.ring_size);
840 if (r) {
841 goto out;
842 }
843 } else {
844 radeon_resume(rdev);
845 }
846out:
847 radeon_restore_bios_scratch_regs(rdev); 688 radeon_restore_bios_scratch_regs(rdev);
848 fb_set_suspend(rdev->fbdev_info, 0); 689 fb_set_suspend(rdev->fbdev_info, 0);
849 release_console_sem(); 690 release_console_sem();
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 5d8141b13765..3655d91993a6 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -106,24 +106,33 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
106 legacy_crtc_load_lut(crtc); 106 legacy_crtc_load_lut(crtc);
107} 107}
108 108
109/** Sets the color ramps on behalf of RandR */ 109/** Sets the color ramps on behalf of fbcon */
110void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 110void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
111 u16 blue, int regno) 111 u16 blue, int regno)
112{ 112{
113 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 113 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
114 114
115 if (regno == 0)
116 DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id);
117 radeon_crtc->lut_r[regno] = red >> 6; 115 radeon_crtc->lut_r[regno] = red >> 6;
118 radeon_crtc->lut_g[regno] = green >> 6; 116 radeon_crtc->lut_g[regno] = green >> 6;
119 radeon_crtc->lut_b[regno] = blue >> 6; 117 radeon_crtc->lut_b[regno] = blue >> 6;
120} 118}
121 119
120/** Gets the color ramps on behalf of fbcon */
121void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
122 u16 *blue, int regno)
123{
124 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
125
126 *red = radeon_crtc->lut_r[regno] << 6;
127 *green = radeon_crtc->lut_g[regno] << 6;
128 *blue = radeon_crtc->lut_b[regno] << 6;
129}
130
122static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 131static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
123 u16 *blue, uint32_t size) 132 u16 *blue, uint32_t size)
124{ 133{
125 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 134 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
126 int i, j; 135 int i;
127 136
128 if (size != 256) { 137 if (size != 256) {
129 return; 138 return;
@@ -132,23 +141,11 @@ static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
132 return; 141 return;
133 } 142 }
134 143
135 if (crtc->fb->depth == 16) { 144 /* userspace palettes are always correct as is */
136 for (i = 0; i < 64; i++) { 145 for (i = 0; i < 256; i++) {
137 if (i <= 31) { 146 radeon_crtc->lut_r[i] = red[i] >> 6;
138 for (j = 0; j < 8; j++) { 147 radeon_crtc->lut_g[i] = green[i] >> 6;
139 radeon_crtc->lut_r[i * 8 + j] = red[i] >> 6; 148 radeon_crtc->lut_b[i] = blue[i] >> 6;
140 radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 6;
141 }
142 }
143 for (j = 0; j < 4; j++)
144 radeon_crtc->lut_g[i * 4 + j] = green[i] >> 6;
145 }
146 } else {
147 for (i = 0; i < 256; i++) {
148 radeon_crtc->lut_r[i] = red[i] >> 6;
149 radeon_crtc->lut_g[i] = green[i] >> 6;
150 radeon_crtc->lut_b[i] = blue[i] >> 6;
151 }
152 } 149 }
153 150
154 radeon_crtc_load_lut(crtc); 151 radeon_crtc_load_lut(crtc);
@@ -724,7 +721,11 @@ int radeon_modeset_init(struct radeon_device *rdev)
724 if (ret) { 721 if (ret) {
725 return ret; 722 return ret;
726 } 723 }
727 /* allocate crtcs - TODO single crtc */ 724
725 if (rdev->flags & RADEON_SINGLE_CRTC)
726 num_crtc = 1;
727
728 /* allocate crtcs */
728 for (i = 0; i < num_crtc; i++) { 729 for (i = 0; i < num_crtc; i++) {
729 radeon_crtc_init(rdev->ddev, i); 730 radeon_crtc_init(rdev->ddev, i);
730 } 731 }
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 621646752cd2..a65ab1a0dad2 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1345,6 +1345,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
1345void 1345void
1346radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) 1346radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
1347{ 1347{
1348 struct radeon_device *rdev = dev->dev_private;
1348 struct drm_encoder *encoder; 1349 struct drm_encoder *encoder;
1349 struct radeon_encoder *radeon_encoder; 1350 struct radeon_encoder *radeon_encoder;
1350 1351
@@ -1364,7 +1365,10 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1364 return; 1365 return;
1365 1366
1366 encoder = &radeon_encoder->base; 1367 encoder = &radeon_encoder->base;
1367 encoder->possible_crtcs = 0x3; 1368 if (rdev->flags & RADEON_SINGLE_CRTC)
1369 encoder->possible_crtcs = 0x1;
1370 else
1371 encoder->possible_crtcs = 0x3;
1368 encoder->possible_clones = 0; 1372 encoder->possible_clones = 0;
1369 1373
1370 radeon_encoder->enc_priv = NULL; 1374 radeon_encoder->enc_priv = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 1ba704eedefb..b38c4c8e2c61 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -55,6 +55,7 @@ static struct fb_ops radeonfb_ops = {
55 .fb_imageblit = cfb_imageblit, 55 .fb_imageblit = cfb_imageblit,
56 .fb_pan_display = drm_fb_helper_pan_display, 56 .fb_pan_display = drm_fb_helper_pan_display,
57 .fb_blank = drm_fb_helper_blank, 57 .fb_blank = drm_fb_helper_blank,
58 .fb_setcmap = drm_fb_helper_setcmap,
58}; 59};
59 60
60/** 61/**
@@ -123,6 +124,7 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
123 124
124static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { 125static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
125 .gamma_set = radeon_crtc_fb_gamma_set, 126 .gamma_set = radeon_crtc_fb_gamma_set,
127 .gamma_get = radeon_crtc_fb_gamma_get,
126}; 128};
127 129
128int radeonfb_create(struct drm_device *dev, 130int radeonfb_create(struct drm_device *dev,
@@ -146,9 +148,15 @@ int radeonfb_create(struct drm_device *dev,
146 unsigned long tmp; 148 unsigned long tmp;
147 bool fb_tiled = false; /* useful for testing */ 149 bool fb_tiled = false; /* useful for testing */
148 u32 tiling_flags = 0; 150 u32 tiling_flags = 0;
151 int crtc_count;
149 152
150 mode_cmd.width = surface_width; 153 mode_cmd.width = surface_width;
151 mode_cmd.height = surface_height; 154 mode_cmd.height = surface_height;
155
156 /* avivo can't scanout real 24bpp */
157 if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
158 surface_bpp = 32;
159
152 mode_cmd.bpp = surface_bpp; 160 mode_cmd.bpp = surface_bpp;
153 /* need to align pitch with crtc limits */ 161 /* need to align pitch with crtc limits */
154 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); 162 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
@@ -217,7 +225,11 @@ int radeonfb_create(struct drm_device *dev,
217 rfbdev = info->par; 225 rfbdev = info->par;
218 rfbdev->helper.funcs = &radeon_fb_helper_funcs; 226 rfbdev->helper.funcs = &radeon_fb_helper_funcs;
219 rfbdev->helper.dev = dev; 227 rfbdev->helper.dev = dev;
220 ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, 2, 228 if (rdev->flags & RADEON_SINGLE_CRTC)
229 crtc_count = 1;
230 else
231 crtc_count = 2;
232 ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count,
221 RADEONFB_CONN_LIMIT); 233 RADEONFB_CONN_LIMIT);
222 if (ret) 234 if (ret)
223 goto out_unref; 235 goto out_unref;
@@ -234,7 +246,7 @@ int radeonfb_create(struct drm_device *dev,
234 246
235 strcpy(info->fix.id, "radeondrmfb"); 247 strcpy(info->fix.id, "radeondrmfb");
236 248
237 drm_fb_helper_fill_fix(info, fb->pitch); 249 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
238 250
239 info->flags = FBINFO_DEFAULT; 251 info->flags = FBINFO_DEFAULT;
240 info->fbops = &radeonfb_ops; 252 info->fbops = &radeonfb_ops;
@@ -309,7 +321,7 @@ int radeon_parse_options(char *options)
309 321
310int radeonfb_probe(struct drm_device *dev) 322int radeonfb_probe(struct drm_device *dev)
311{ 323{
312 return drm_fb_helper_single_fb_probe(dev, &radeonfb_create); 324 return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
313} 325}
314 326
315int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) 327int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 1841145a7c4f..8e0a8759e428 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -83,8 +83,12 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
83int radeon_irq_kms_init(struct radeon_device *rdev) 83int radeon_irq_kms_init(struct radeon_device *rdev)
84{ 84{
85 int r = 0; 85 int r = 0;
86 int num_crtc = 2;
86 87
87 r = drm_vblank_init(rdev->ddev, 2); 88 if (rdev->flags & RADEON_SINGLE_CRTC)
89 num_crtc = 1;
90
91 r = drm_vblank_init(rdev->ddev, num_crtc);
88 if (r) { 92 if (r) {
89 return r; 93 return r;
90 } 94 }
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 2b997a15fb1f..36410f85d705 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1053,6 +1053,7 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
1053 .mode_set_base = radeon_crtc_set_base, 1053 .mode_set_base = radeon_crtc_set_base,
1054 .prepare = radeon_crtc_prepare, 1054 .prepare = radeon_crtc_prepare,
1055 .commit = radeon_crtc_commit, 1055 .commit = radeon_crtc_commit,
1056 .load_lut = radeon_crtc_load_lut,
1056}; 1057};
1057 1058
1058 1059
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index b1547f700d73..6ceb958fd194 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -881,7 +881,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
881 R420_TV_DAC_DACADJ_MASK | 881 R420_TV_DAC_DACADJ_MASK |
882 R420_TV_DAC_RDACPD | 882 R420_TV_DAC_RDACPD |
883 R420_TV_DAC_GDACPD | 883 R420_TV_DAC_GDACPD |
884 R420_TV_DAC_GDACPD | 884 R420_TV_DAC_BDACPD |
885 R420_TV_DAC_TVENABLE); 885 R420_TV_DAC_TVENABLE);
886 } else { 886 } else {
887 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | 887 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
@@ -889,7 +889,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
889 RADEON_TV_DAC_DACADJ_MASK | 889 RADEON_TV_DAC_DACADJ_MASK |
890 RADEON_TV_DAC_RDACPD | 890 RADEON_TV_DAC_RDACPD |
891 RADEON_TV_DAC_GDACPD | 891 RADEON_TV_DAC_GDACPD |
892 RADEON_TV_DAC_GDACPD); 892 RADEON_TV_DAC_BDACPD);
893 } 893 }
894 894
895 /* FIXME TV */ 895 /* FIXME TV */
@@ -1318,7 +1318,10 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
1318 return; 1318 return;
1319 1319
1320 encoder = &radeon_encoder->base; 1320 encoder = &radeon_encoder->base;
1321 encoder->possible_crtcs = 0x3; 1321 if (rdev->flags & RADEON_SINGLE_CRTC)
1322 encoder->possible_crtcs = 0x1;
1323 else
1324 encoder->possible_crtcs = 0x3;
1322 encoder->possible_clones = 0; 1325 encoder->possible_clones = 0;
1323 1326
1324 radeon_encoder->enc_priv = NULL; 1327 radeon_encoder->enc_priv = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 570a58729daf..e61226817ccf 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -407,6 +407,8 @@ extern void
407radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); 407radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
408extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 408extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
409 u16 blue, int regno); 409 u16 blue, int regno);
410extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
411 u16 *blue, int regno);
410struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, 412struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev,
411 struct drm_mode_fb_cmd *mode_cmd, 413 struct drm_mode_fb_cmd *mode_cmd,
412 struct drm_gem_object *obj); 414 struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 73af463b7a59..1f056dadc5c2 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -400,11 +400,9 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
400int radeon_object_list_reserve(struct list_head *head) 400int radeon_object_list_reserve(struct list_head *head)
401{ 401{
402 struct radeon_object_list *lobj; 402 struct radeon_object_list *lobj;
403 struct list_head *i;
404 int r; 403 int r;
405 404
406 list_for_each(i, head) { 405 list_for_each_entry(lobj, head, list){
407 lobj = list_entry(i, struct radeon_object_list, list);
408 if (!lobj->robj->pin_count) { 406 if (!lobj->robj->pin_count) {
409 r = radeon_object_reserve(lobj->robj, true); 407 r = radeon_object_reserve(lobj->robj, true);
410 if (unlikely(r != 0)) { 408 if (unlikely(r != 0)) {
@@ -420,13 +418,10 @@ int radeon_object_list_reserve(struct list_head *head)
420void radeon_object_list_unreserve(struct list_head *head) 418void radeon_object_list_unreserve(struct list_head *head)
421{ 419{
422 struct radeon_object_list *lobj; 420 struct radeon_object_list *lobj;
423 struct list_head *i;
424 421
425 list_for_each(i, head) { 422 list_for_each_entry(lobj, head, list) {
426 lobj = list_entry(i, struct radeon_object_list, list);
427 if (!lobj->robj->pin_count) { 423 if (!lobj->robj->pin_count) {
428 radeon_object_unreserve(lobj->robj); 424 radeon_object_unreserve(lobj->robj);
429 } else {
430 } 425 }
431 } 426 }
432} 427}
@@ -436,7 +431,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
436 struct radeon_object_list *lobj; 431 struct radeon_object_list *lobj;
437 struct radeon_object *robj; 432 struct radeon_object *robj;
438 struct radeon_fence *old_fence = NULL; 433 struct radeon_fence *old_fence = NULL;
439 struct list_head *i;
440 int r; 434 int r;
441 435
442 r = radeon_object_list_reserve(head); 436 r = radeon_object_list_reserve(head);
@@ -444,8 +438,7 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
444 radeon_object_list_unreserve(head); 438 radeon_object_list_unreserve(head);
445 return r; 439 return r;
446 } 440 }
447 list_for_each(i, head) { 441 list_for_each_entry(lobj, head, list) {
448 lobj = list_entry(i, struct radeon_object_list, list);
449 robj = lobj->robj; 442 robj = lobj->robj;
450 if (!robj->pin_count) { 443 if (!robj->pin_count) {
451 if (lobj->wdomain) { 444 if (lobj->wdomain) {
@@ -482,10 +475,8 @@ void radeon_object_list_unvalidate(struct list_head *head)
482{ 475{
483 struct radeon_object_list *lobj; 476 struct radeon_object_list *lobj;
484 struct radeon_fence *old_fence = NULL; 477 struct radeon_fence *old_fence = NULL;
485 struct list_head *i;
486 478
487 list_for_each(i, head) { 479 list_for_each_entry(lobj, head, list) {
488 lobj = list_entry(i, struct radeon_object_list, list);
489 old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; 480 old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
490 lobj->robj->tobj.sync_obj = NULL; 481 lobj->robj->tobj.sync_obj = NULL;
491 if (old_fence) { 482 if (old_fence) {
diff --git a/drivers/gpu/drm/radeon/rs100d.h b/drivers/gpu/drm/radeon/rs100d.h
new file mode 100644
index 000000000000..48a913a06cfd
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs100d.h
@@ -0,0 +1,40 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RS100D_H__
29#define __RS100D_H__
30
31/* Registers */
32#define R_00015C_NB_TOM 0x00015C
33#define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0)
34#define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
35#define C_00015C_MC_FB_START 0xFFFF0000
36#define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
37#define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
38#define C_00015C_MC_FB_TOP 0x0000FFFF
39
40#endif
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index a3fbdad938c7..a769c296f6a6 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -27,27 +27,12 @@
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include "radeon_reg.h"
31#include "radeon.h" 30#include "radeon.h"
31#include "rs400d.h"
32 32
33/* rs400,rs480 depends on : */ 33/* This files gather functions specifics to : rs400,rs480 */
34void r100_hdp_reset(struct radeon_device *rdev); 34static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
35void r100_mc_disable_clients(struct radeon_device *rdev);
36int r300_mc_wait_for_idle(struct radeon_device *rdev);
37void r420_pipes_init(struct radeon_device *rdev);
38 35
39/* This files gather functions specifics to :
40 * rs400,rs480
41 *
42 * Some of these functions might be used by newer ASICs.
43 */
44void rs400_gpu_init(struct radeon_device *rdev);
45int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
46
47
48/*
49 * GART functions.
50 */
51void rs400_gart_adjust_size(struct radeon_device *rdev) 36void rs400_gart_adjust_size(struct radeon_device *rdev)
52{ 37{
53 /* Check gart size */ 38 /* Check gart size */
@@ -238,61 +223,6 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
238 return 0; 223 return 0;
239} 224}
240 225
241
242/*
243 * MC functions.
244 */
245int rs400_mc_init(struct radeon_device *rdev)
246{
247 uint32_t tmp;
248 int r;
249
250 if (r100_debugfs_rbbm_init(rdev)) {
251 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
252 }
253
254 rs400_gpu_init(rdev);
255 rs400_gart_disable(rdev);
256 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
257 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
258 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
259 r = radeon_mc_setup(rdev);
260 if (r) {
261 return r;
262 }
263
264 r100_mc_disable_clients(rdev);
265 if (r300_mc_wait_for_idle(rdev)) {
266 printk(KERN_WARNING "Failed to wait MC idle while "
267 "programming pipes. Bad things might happen.\n");
268 }
269
270 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
271 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
272 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
273 WREG32(RADEON_MC_FB_LOCATION, tmp);
274 tmp = RREG32(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS;
275 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
276 (void)RREG32(RADEON_HOST_PATH_CNTL);
277 WREG32(RADEON_HOST_PATH_CNTL, tmp);
278 (void)RREG32(RADEON_HOST_PATH_CNTL);
279
280 return 0;
281}
282
283void rs400_mc_fini(struct radeon_device *rdev)
284{
285}
286
287
288/*
289 * Global GPU functions
290 */
291void rs400_errata(struct radeon_device *rdev)
292{
293 rdev->pll_errata = 0;
294}
295
296void rs400_gpu_init(struct radeon_device *rdev) 226void rs400_gpu_init(struct radeon_device *rdev)
297{ 227{
298 /* FIXME: HDP same place on rs400 ? */ 228 /* FIXME: HDP same place on rs400 ? */
@@ -305,10 +235,6 @@ void rs400_gpu_init(struct radeon_device *rdev)
305 } 235 }
306} 236}
307 237
308
309/*
310 * VRAM info.
311 */
312void rs400_vram_info(struct radeon_device *rdev) 238void rs400_vram_info(struct radeon_device *rdev)
313{ 239{
314 rs400_gart_adjust_size(rdev); 240 rs400_gart_adjust_size(rdev);
@@ -319,10 +245,6 @@ void rs400_vram_info(struct radeon_device *rdev)
319 r100_vram_init_sizes(rdev); 245 r100_vram_init_sizes(rdev);
320} 246}
321 247
322
323/*
324 * Indirect registers accessor
325 */
326uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 248uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
327{ 249{
328 uint32_t r; 250 uint32_t r;
@@ -340,10 +262,6 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
340 WREG32(RS480_NB_MC_INDEX, 0xff); 262 WREG32(RS480_NB_MC_INDEX, 0xff);
341} 263}
342 264
343
344/*
345 * Debugfs info
346 */
347#if defined(CONFIG_DEBUG_FS) 265#if defined(CONFIG_DEBUG_FS)
348static int rs400_debugfs_gart_info(struct seq_file *m, void *data) 266static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
349{ 267{
@@ -419,7 +337,7 @@ static struct drm_info_list rs400_gart_info_list[] = {
419}; 337};
420#endif 338#endif
421 339
422int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) 340static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
423{ 341{
424#if defined(CONFIG_DEBUG_FS) 342#if defined(CONFIG_DEBUG_FS)
425 return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1); 343 return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
@@ -427,3 +345,188 @@ int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
427 return 0; 345 return 0;
428#endif 346#endif
429} 347}
348
349static int rs400_mc_init(struct radeon_device *rdev)
350{
351 int r;
352 u32 tmp;
353
354 /* Setup GPU memory space */
355 tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
356 rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
357 rdev->mc.gtt_location = 0xFFFFFFFFUL;
358 r = radeon_mc_setup(rdev);
359 if (r)
360 return r;
361 return 0;
362}
363
364void rs400_mc_program(struct radeon_device *rdev)
365{
366 struct r100_mc_save save;
367
368 /* Stops all mc clients */
369 r100_mc_stop(rdev, &save);
370
371 /* Wait for mc idle */
372 if (r300_mc_wait_for_idle(rdev))
373 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
374 WREG32(R_000148_MC_FB_LOCATION,
375 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
376 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
377
378 r100_mc_resume(rdev, &save);
379}
380
381static int rs400_startup(struct radeon_device *rdev)
382{
383 int r;
384
385 rs400_mc_program(rdev);
386 /* Resume clock */
387 r300_clock_startup(rdev);
388 /* Initialize GPU configuration (# pipes, ...) */
389 rs400_gpu_init(rdev);
390 /* Initialize GART (initialize after TTM so we can allocate
391 * memory through TTM but finalize after TTM) */
392 r = rs400_gart_enable(rdev);
393 if (r)
394 return r;
395 /* Enable IRQ */
396 rdev->irq.sw_int = true;
397 r100_irq_set(rdev);
398 /* 1M ring buffer */
399 r = r100_cp_init(rdev, 1024 * 1024);
400 if (r) {
401 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
402 return r;
403 }
404 r = r100_wb_init(rdev);
405 if (r)
406 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
407 r = r100_ib_init(rdev);
408 if (r) {
409 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
410 return r;
411 }
412 return 0;
413}
414
415int rs400_resume(struct radeon_device *rdev)
416{
417 /* Make sur GART are not working */
418 rs400_gart_disable(rdev);
419 /* Resume clock before doing reset */
420 r300_clock_startup(rdev);
421 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
422 if (radeon_gpu_reset(rdev)) {
423 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
424 RREG32(R_000E40_RBBM_STATUS),
425 RREG32(R_0007C0_CP_STAT));
426 }
427 /* post */
428 radeon_combios_asic_init(rdev->ddev);
429 /* Resume clock after posting */
430 r300_clock_startup(rdev);
431 return rs400_startup(rdev);
432}
433
434int rs400_suspend(struct radeon_device *rdev)
435{
436 r100_cp_disable(rdev);
437 r100_wb_disable(rdev);
438 r100_irq_disable(rdev);
439 rs400_gart_disable(rdev);
440 return 0;
441}
442
443void rs400_fini(struct radeon_device *rdev)
444{
445 rs400_suspend(rdev);
446 r100_cp_fini(rdev);
447 r100_wb_fini(rdev);
448 r100_ib_fini(rdev);
449 radeon_gem_fini(rdev);
450 rs400_gart_fini(rdev);
451 radeon_irq_kms_fini(rdev);
452 radeon_fence_driver_fini(rdev);
453 radeon_object_fini(rdev);
454 radeon_atombios_fini(rdev);
455 kfree(rdev->bios);
456 rdev->bios = NULL;
457}
458
459int rs400_init(struct radeon_device *rdev)
460{
461 int r;
462
463 /* Disable VGA */
464 r100_vga_render_disable(rdev);
465 /* Initialize scratch registers */
466 radeon_scratch_init(rdev);
467 /* Initialize surface registers */
468 radeon_surface_init(rdev);
469 /* TODO: disable VGA need to use VGA request */
470 /* BIOS*/
471 if (!radeon_get_bios(rdev)) {
472 if (ASIC_IS_AVIVO(rdev))
473 return -EINVAL;
474 }
475 if (rdev->is_atom_bios) {
476 dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
477 return -EINVAL;
478 } else {
479 r = radeon_combios_init(rdev);
480 if (r)
481 return r;
482 }
483 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
484 if (radeon_gpu_reset(rdev)) {
485 dev_warn(rdev->dev,
486 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
487 RREG32(R_000E40_RBBM_STATUS),
488 RREG32(R_0007C0_CP_STAT));
489 }
490 /* check if cards are posted or not */
491 if (!radeon_card_posted(rdev) && rdev->bios) {
492 DRM_INFO("GPU not posted. posting now...\n");
493 radeon_combios_asic_init(rdev->ddev);
494 }
495 /* Initialize clocks */
496 radeon_get_clock_info(rdev->ddev);
497 /* Get vram informations */
498 rs400_vram_info(rdev);
499 /* Initialize memory controller (also test AGP) */
500 r = rs400_mc_init(rdev);
501 if (r)
502 return r;
503 /* Fence driver */
504 r = radeon_fence_driver_init(rdev);
505 if (r)
506 return r;
507 r = radeon_irq_kms_init(rdev);
508 if (r)
509 return r;
510 /* Memory manager */
511 r = radeon_object_init(rdev);
512 if (r)
513 return r;
514 r = rs400_gart_init(rdev);
515 if (r)
516 return r;
517 r300_set_reg_safe(rdev);
518 rdev->accel_working = true;
519 r = rs400_startup(rdev);
520 if (r) {
521 /* Somethings want wront with the accel init stop accel */
522 dev_err(rdev->dev, "Disabling GPU acceleration\n");
523 rs400_suspend(rdev);
524 r100_cp_fini(rdev);
525 r100_wb_fini(rdev);
526 r100_ib_fini(rdev);
527 rs400_gart_fini(rdev);
528 radeon_irq_kms_fini(rdev);
529 rdev->accel_working = false;
530 }
531 return 0;
532}
diff --git a/drivers/gpu/drm/radeon/rs400d.h b/drivers/gpu/drm/radeon/rs400d.h
new file mode 100644
index 000000000000..6d8bac58ced9
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs400d.h
@@ -0,0 +1,160 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RS400D_H__
29#define __RS400D_H__
30
31/* Registers */
32#define R_000148_MC_FB_LOCATION 0x000148
33#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
34#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
35#define C_000148_MC_FB_START 0xFFFF0000
36#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
37#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
38#define C_000148_MC_FB_TOP 0x0000FFFF
39#define R_00015C_NB_TOM 0x00015C
40#define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0)
41#define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
42#define C_00015C_MC_FB_START 0xFFFF0000
43#define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
44#define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
45#define C_00015C_MC_FB_TOP 0x0000FFFF
46#define R_0007C0_CP_STAT 0x0007C0
47#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
48#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
49#define C_0007C0_MRU_BUSY 0xFFFFFFFE
50#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
51#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
52#define C_0007C0_MWU_BUSY 0xFFFFFFFD
53#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
54#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
55#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
56#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
57#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
58#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
59#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
60#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
61#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
62#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
63#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
64#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
65#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
66#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
67#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
68#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
69#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
70#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
71#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
72#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
73#define C_0007C0_CSI_BUSY 0xFFFFDFFF
74#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
75#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
76#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
77#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
78#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
79#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
80#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
81#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
82#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
83#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
84#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
85#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
86#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
87#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
88#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
89#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
90#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
91#define C_0007C0_CP_BUSY 0x7FFFFFFF
92#define R_000E40_RBBM_STATUS 0x000E40
93#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
94#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
95#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
96#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
97#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
98#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
99#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
100#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
101#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
102#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
103#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
104#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
105#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
106#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
107#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
108#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
109#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
110#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
111#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
112#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
113#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
114#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
115#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
116#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
117#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
118#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
119#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
120#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
121#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
122#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
123#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
124#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
125#define C_000E40_E2_BUSY 0xFFFDFFFF
126#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
127#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
128#define C_000E40_RB2D_BUSY 0xFFFBFFFF
129#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
130#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
131#define C_000E40_RB3D_BUSY 0xFFF7FFFF
132#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
133#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
134#define C_000E40_VAP_BUSY 0xFFEFFFFF
135#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
136#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
137#define C_000E40_RE_BUSY 0xFFDFFFFF
138#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
139#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
140#define C_000E40_TAM_BUSY 0xFFBFFFFF
141#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
142#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
143#define C_000E40_TDM_BUSY 0xFF7FFFFF
144#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
145#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
146#define C_000E40_PB_BUSY 0xFEFFFFFF
147#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
148#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
149#define C_000E40_TIM_BUSY 0xFDFFFFFF
150#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
151#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
152#define C_000E40_GA_BUSY 0xFBFFFFFF
153#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
154#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
155#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
156#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
157#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
158#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
159
160#endif
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 4a4fe1cb131c..10dfa78762da 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -25,27 +25,26 @@
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28/* RS600 / Radeon X1250/X1270 integrated GPU
29 *
30 * This file gather function specific to RS600 which is the IGP of
31 * the X1250/X1270 family supporting intel CPU (while RS690/RS740
32 * is the X1250/X1270 supporting AMD CPU). The display engine are
33 * the avivo one, bios is an atombios, 3D block are the one of the
34 * R4XX family. The GART is different from the RS400 one and is very
35 * close to the one of the R600 family (R600 likely being an evolution
36 * of the RS600 GART block).
37 */
28#include "drmP.h" 38#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h" 39#include "radeon.h"
40#include "atom.h"
41#include "rs600d.h"
31 42
32#include "rs600_reg_safe.h" 43#include "rs600_reg_safe.h"
33 44
34/* rs600 depends on : */
35void r100_hdp_reset(struct radeon_device *rdev);
36int r100_gui_wait_for_idle(struct radeon_device *rdev);
37int r300_mc_wait_for_idle(struct radeon_device *rdev);
38void r420_pipes_init(struct radeon_device *rdev);
39
40/* This files gather functions specifics to :
41 * rs600
42 *
43 * Some of these functions might be used by newer ASICs.
44 */
45void rs600_gpu_init(struct radeon_device *rdev); 45void rs600_gpu_init(struct radeon_device *rdev);
46int rs600_mc_wait_for_idle(struct radeon_device *rdev); 46int rs600_mc_wait_for_idle(struct radeon_device *rdev);
47 47
48
49/* 48/*
50 * GART. 49 * GART.
51 */ 50 */
@@ -53,18 +52,18 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
53{ 52{
54 uint32_t tmp; 53 uint32_t tmp;
55 54
56 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 55 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
57 tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); 56 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
58 WREG32_MC(RS600_MC_PT0_CNTL, tmp); 57 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
59 58
60 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 59 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
61 tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE; 60 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
62 WREG32_MC(RS600_MC_PT0_CNTL, tmp); 61 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
63 62
64 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 63 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
65 tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); 64 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
66 WREG32_MC(RS600_MC_PT0_CNTL, tmp); 65 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
67 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 66 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
68} 67}
69 68
70int rs600_gart_init(struct radeon_device *rdev) 69int rs600_gart_init(struct radeon_device *rdev)
@@ -86,7 +85,7 @@ int rs600_gart_init(struct radeon_device *rdev)
86 85
87int rs600_gart_enable(struct radeon_device *rdev) 86int rs600_gart_enable(struct radeon_device *rdev)
88{ 87{
89 uint32_t tmp; 88 u32 tmp;
90 int r, i; 89 int r, i;
91 90
92 if (rdev->gart.table.vram.robj == NULL) { 91 if (rdev->gart.table.vram.robj == NULL) {
@@ -96,46 +95,50 @@ int rs600_gart_enable(struct radeon_device *rdev)
96 r = radeon_gart_table_vram_pin(rdev); 95 r = radeon_gart_table_vram_pin(rdev);
97 if (r) 96 if (r)
98 return r; 97 return r;
98 /* Enable bus master */
99 tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
100 WREG32(R_00004C_BUS_CNTL, tmp);
99 /* FIXME: setup default page */ 101 /* FIXME: setup default page */
100 WREG32_MC(RS600_MC_PT0_CNTL, 102 WREG32_MC(R_000100_MC_PT0_CNTL,
101 (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | 103 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
102 RS600_EFFECTIVE_L2_QUEUE_SIZE(6))); 104 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
103 for (i = 0; i < 19; i++) { 105 for (i = 0; i < 19; i++) {
104 WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i, 106 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
105 (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE | 107 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
106 RS600_SYSTEM_ACCESS_MODE_IN_SYS | 108 S_00016C_SYSTEM_ACCESS_MODE_MASK(
107 RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE | 109 V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) |
108 RS600_EFFECTIVE_L1_CACHE_SIZE(3) | 110 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
109 RS600_ENABLE_FRAGMENT_PROCESSING | 111 V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) |
110 RS600_EFFECTIVE_L1_QUEUE_SIZE(3))); 112 S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) |
113 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
114 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1));
111 } 115 }
112 116
113 /* System context map to GART space */ 117 /* System context map to GART space */
114 WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location); 118 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
115 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 119 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
116 WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp);
117 120
118 /* enable first context */ 121 /* enable first context */
119 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location); 122 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
120 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 123 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
121 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp); 124 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
122 WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL, 125 S_000102_ENABLE_PAGE_TABLE(1) |
123 (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT)); 126 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
124 /* disable all other contexts */ 127 /* disable all other contexts */
125 for (i = 1; i < 8; i++) { 128 for (i = 1; i < 8; i++) {
126 WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0); 129 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
127 } 130 }
128 131
129 /* setup the page table */ 132 /* setup the page table */
130 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, 133 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
131 rdev->gart.table_addr); 134 rdev->gart.table_addr);
132 WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); 135 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
133 136
134 /* enable page tables */ 137 /* enable page tables */
135 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 138 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
136 WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT)); 139 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
137 tmp = RREG32_MC(RS600_MC_CNTL1); 140 tmp = RREG32_MC(R_000009_MC_CNTL1);
138 WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES)); 141 WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
139 rs600_gart_tlb_flush(rdev); 142 rs600_gart_tlb_flush(rdev);
140 rdev->gart.ready = true; 143 rdev->gart.ready = true;
141 return 0; 144 return 0;
@@ -146,10 +149,9 @@ void rs600_gart_disable(struct radeon_device *rdev)
146 uint32_t tmp; 149 uint32_t tmp;
147 150
148 /* FIXME: disable out of gart access */ 151 /* FIXME: disable out of gart access */
149 WREG32_MC(RS600_MC_PT0_CNTL, 0); 152 WREG32_MC(R_000100_MC_PT0_CNTL, 0);
150 tmp = RREG32_MC(RS600_MC_CNTL1); 153 tmp = RREG32_MC(R_000009_MC_CNTL1);
151 tmp &= ~RS600_ENABLE_PAGE_TABLES; 154 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
152 WREG32_MC(RS600_MC_CNTL1, tmp);
153 if (rdev->gart.table.vram.robj) { 155 if (rdev->gart.table.vram.robj) {
154 radeon_object_kunmap(rdev->gart.table.vram.robj); 156 radeon_object_kunmap(rdev->gart.table.vram.robj);
155 radeon_object_unpin(rdev->gart.table.vram.robj); 157 radeon_object_unpin(rdev->gart.table.vram.robj);
@@ -183,129 +185,61 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
183 return 0; 185 return 0;
184} 186}
185 187
186
187/*
188 * MC.
189 */
190void rs600_mc_disable_clients(struct radeon_device *rdev)
191{
192 unsigned tmp;
193
194 if (r100_gui_wait_for_idle(rdev)) {
195 printk(KERN_WARNING "Failed to wait GUI idle while "
196 "programming pipes. Bad things might happen.\n");
197 }
198
199 rv515_vga_render_disable(rdev);
200
201 tmp = RREG32(AVIVO_D1VGA_CONTROL);
202 WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
203 tmp = RREG32(AVIVO_D2VGA_CONTROL);
204 WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
205
206 tmp = RREG32(AVIVO_D1CRTC_CONTROL);
207 WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
208 tmp = RREG32(AVIVO_D2CRTC_CONTROL);
209 WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
210
211 /* make sure all previous write got through */
212 tmp = RREG32(AVIVO_D2CRTC_CONTROL);
213
214 mdelay(1);
215}
216
217int rs600_mc_init(struct radeon_device *rdev)
218{
219 uint32_t tmp;
220 int r;
221
222 if (r100_debugfs_rbbm_init(rdev)) {
223 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
224 }
225
226 rs600_gpu_init(rdev);
227 rs600_gart_disable(rdev);
228
229 /* Setup GPU memory space */
230 rdev->mc.vram_location = 0xFFFFFFFFUL;
231 rdev->mc.gtt_location = 0xFFFFFFFFUL;
232 r = radeon_mc_setup(rdev);
233 if (r) {
234 return r;
235 }
236
237 /* Program GPU memory space */
238 /* Enable bus master */
239 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
240 WREG32(RADEON_BUS_CNTL, tmp);
241 /* FIXME: What does AGP means for such chipset ? */
242 WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF);
243 /* FIXME: are this AGP reg in indirect MC range ? */
244 WREG32_MC(RS600_MC_AGP_BASE, 0);
245 WREG32_MC(RS600_MC_AGP_BASE_2, 0);
246 rs600_mc_disable_clients(rdev);
247 if (rs600_mc_wait_for_idle(rdev)) {
248 printk(KERN_WARNING "Failed to wait MC idle while "
249 "programming pipes. Bad things might happen.\n");
250 }
251 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
252 tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16);
253 tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16);
254 WREG32_MC(RS600_MC_FB_LOCATION, tmp);
255 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
256 return 0;
257}
258
259void rs600_mc_fini(struct radeon_device *rdev)
260{
261}
262
263
264/*
265 * Interrupts
266 */
267int rs600_irq_set(struct radeon_device *rdev) 188int rs600_irq_set(struct radeon_device *rdev)
268{ 189{
269 uint32_t tmp = 0; 190 uint32_t tmp = 0;
270 uint32_t mode_int = 0; 191 uint32_t mode_int = 0;
271 192
272 if (rdev->irq.sw_int) { 193 if (rdev->irq.sw_int) {
273 tmp |= RADEON_SW_INT_ENABLE; 194 tmp |= S_000040_SW_INT_EN(1);
274 } 195 }
275 if (rdev->irq.crtc_vblank_int[0]) { 196 if (rdev->irq.crtc_vblank_int[0]) {
276 mode_int |= AVIVO_D1MODE_INT_MASK; 197 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
277 } 198 }
278 if (rdev->irq.crtc_vblank_int[1]) { 199 if (rdev->irq.crtc_vblank_int[1]) {
279 mode_int |= AVIVO_D2MODE_INT_MASK; 200 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
280 } 201 }
281 WREG32(RADEON_GEN_INT_CNTL, tmp); 202 WREG32(R_000040_GEN_INT_CNTL, tmp);
282 WREG32(AVIVO_DxMODE_INT_MASK, mode_int); 203 WREG32(R_006540_DxMODE_INT_MASK, mode_int);
283 return 0; 204 return 0;
284} 205}
285 206
286static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) 207static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
287{ 208{
288 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 209 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
289 uint32_t irq_mask = RADEON_SW_INT_TEST; 210 uint32_t irq_mask = ~C_000044_SW_INT;
290 211
291 if (irqs & AVIVO_DISPLAY_INT_STATUS) { 212 if (G_000044_DISPLAY_INT_STAT(irqs)) {
292 *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS); 213 *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
293 if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { 214 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
294 WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); 215 WREG32(R_006534_D1MODE_VBLANK_STATUS,
216 S_006534_D1MODE_VBLANK_ACK(1));
295 } 217 }
296 if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { 218 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) {
297 WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); 219 WREG32(R_006D34_D2MODE_VBLANK_STATUS,
220 S_006D34_D2MODE_VBLANK_ACK(1));
298 } 221 }
299 } else { 222 } else {
300 *r500_disp_int = 0; 223 *r500_disp_int = 0;
301 } 224 }
302 225
303 if (irqs) { 226 if (irqs) {
304 WREG32(RADEON_GEN_INT_STATUS, irqs); 227 WREG32(R_000044_GEN_INT_STATUS, irqs);
305 } 228 }
306 return irqs & irq_mask; 229 return irqs & irq_mask;
307} 230}
308 231
232void rs600_irq_disable(struct radeon_device *rdev)
233{
234 u32 tmp;
235
236 WREG32(R_000040_GEN_INT_CNTL, 0);
237 WREG32(R_006540_DxMODE_INT_MASK, 0);
238 /* Wait and acknowledge irq */
239 mdelay(1);
240 rs600_irq_ack(rdev, &tmp);
241}
242
309int rs600_irq_process(struct radeon_device *rdev) 243int rs600_irq_process(struct radeon_device *rdev)
310{ 244{
311 uint32_t status; 245 uint32_t status;
@@ -317,16 +251,13 @@ int rs600_irq_process(struct radeon_device *rdev)
317 } 251 }
318 while (status || r500_disp_int) { 252 while (status || r500_disp_int) {
319 /* SW interrupt */ 253 /* SW interrupt */
320 if (status & RADEON_SW_INT_TEST) { 254 if (G_000040_SW_INT_EN(status))
321 radeon_fence_process(rdev); 255 radeon_fence_process(rdev);
322 }
323 /* Vertical blank interrupts */ 256 /* Vertical blank interrupts */
324 if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { 257 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
325 drm_handle_vblank(rdev->ddev, 0); 258 drm_handle_vblank(rdev->ddev, 0);
326 } 259 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
327 if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) {
328 drm_handle_vblank(rdev->ddev, 1); 260 drm_handle_vblank(rdev->ddev, 1);
329 }
330 status = rs600_irq_ack(rdev, &r500_disp_int); 261 status = rs600_irq_ack(rdev, &r500_disp_int);
331 } 262 }
332 return IRQ_HANDLED; 263 return IRQ_HANDLED;
@@ -335,53 +266,34 @@ int rs600_irq_process(struct radeon_device *rdev)
335u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) 266u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
336{ 267{
337 if (crtc == 0) 268 if (crtc == 0)
338 return RREG32(AVIVO_D1CRTC_FRAME_COUNT); 269 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
339 else 270 else
340 return RREG32(AVIVO_D2CRTC_FRAME_COUNT); 271 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
341} 272}
342 273
343
344/*
345 * Global GPU functions
346 */
347int rs600_mc_wait_for_idle(struct radeon_device *rdev) 274int rs600_mc_wait_for_idle(struct radeon_device *rdev)
348{ 275{
349 unsigned i; 276 unsigned i;
350 uint32_t tmp;
351 277
352 for (i = 0; i < rdev->usec_timeout; i++) { 278 for (i = 0; i < rdev->usec_timeout; i++) {
353 /* read MC_STATUS */ 279 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
354 tmp = RREG32_MC(RS600_MC_STATUS);
355 if (tmp & RS600_MC_STATUS_IDLE) {
356 return 0; 280 return 0;
357 } 281 udelay(1);
358 DRM_UDELAY(1);
359 } 282 }
360 return -1; 283 return -1;
361} 284}
362 285
363void rs600_errata(struct radeon_device *rdev)
364{
365 rdev->pll_errata = 0;
366}
367
368void rs600_gpu_init(struct radeon_device *rdev) 286void rs600_gpu_init(struct radeon_device *rdev)
369{ 287{
370 /* FIXME: HDP same place on rs600 ? */ 288 /* FIXME: HDP same place on rs600 ? */
371 r100_hdp_reset(rdev); 289 r100_hdp_reset(rdev);
372 rv515_vga_render_disable(rdev);
373 /* FIXME: is this correct ? */ 290 /* FIXME: is this correct ? */
374 r420_pipes_init(rdev); 291 r420_pipes_init(rdev);
375 if (rs600_mc_wait_for_idle(rdev)) { 292 /* Wait for mc idle */
376 printk(KERN_WARNING "Failed to wait MC idle while " 293 if (rs600_mc_wait_for_idle(rdev))
377 "programming pipes. Bad things might happen.\n"); 294 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
378 }
379} 295}
380 296
381
382/*
383 * VRAM info.
384 */
385void rs600_vram_info(struct radeon_device *rdev) 297void rs600_vram_info(struct radeon_device *rdev)
386{ 298{
387 /* FIXME: to do or is these values sane ? */ 299 /* FIXME: to do or is these values sane ? */
@@ -394,31 +306,206 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
394 /* FIXME: implement, should this be like rs690 ? */ 306 /* FIXME: implement, should this be like rs690 ? */
395} 307}
396 308
397
398/*
399 * Indirect registers accessor
400 */
401uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 309uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
402{ 310{
403 uint32_t r; 311 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
404 312 S_000070_MC_IND_CITF_ARB0(1));
405 WREG32(RS600_MC_INDEX, 313 return RREG32(R_000074_MC_IND_DATA);
406 ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0));
407 r = RREG32(RS600_MC_DATA);
408 return r;
409} 314}
410 315
411void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 316void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
412{ 317{
413 WREG32(RS600_MC_INDEX, 318 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
414 RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | 319 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
415 ((reg) & RS600_MC_ADDR_MASK)); 320 WREG32(R_000074_MC_IND_DATA, v);
416 WREG32(RS600_MC_DATA, v);
417} 321}
418 322
419int rs600_init(struct radeon_device *rdev) 323void rs600_debugfs(struct radeon_device *rdev)
324{
325 if (r100_debugfs_rbbm_init(rdev))
326 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
327}
328
329void rs600_set_safe_registers(struct radeon_device *rdev)
420{ 330{
421 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; 331 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
422 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); 332 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
333}
334
335static void rs600_mc_program(struct radeon_device *rdev)
336{
337 struct rv515_mc_save save;
338
339 /* Stops all mc clients */
340 rv515_mc_stop(rdev, &save);
341
342 /* Wait for mc idle */
343 if (rs600_mc_wait_for_idle(rdev))
344 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
345
346 /* FIXME: What does AGP means for such chipset ? */
347 WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
348 WREG32_MC(R_000006_AGP_BASE, 0);
349 WREG32_MC(R_000007_AGP_BASE_2, 0);
350 /* Program MC */
351 WREG32_MC(R_000004_MC_FB_LOCATION,
352 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
353 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
354 WREG32(R_000134_HDP_FB_LOCATION,
355 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
356
357 rv515_mc_resume(rdev, &save);
358}
359
360static int rs600_startup(struct radeon_device *rdev)
361{
362 int r;
363
364 rs600_mc_program(rdev);
365 /* Resume clock */
366 rv515_clock_startup(rdev);
367 /* Initialize GPU configuration (# pipes, ...) */
368 rs600_gpu_init(rdev);
369 /* Initialize GART (initialize after TTM so we can allocate
370 * memory through TTM but finalize after TTM) */
371 r = rs600_gart_enable(rdev);
372 if (r)
373 return r;
374 /* Enable IRQ */
375 rdev->irq.sw_int = true;
376 rs600_irq_set(rdev);
377 /* 1M ring buffer */
378 r = r100_cp_init(rdev, 1024 * 1024);
379 if (r) {
380 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
381 return r;
382 }
383 r = r100_wb_init(rdev);
384 if (r)
385 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
386 r = r100_ib_init(rdev);
387 if (r) {
388 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
389 return r;
390 }
391 return 0;
392}
393
394int rs600_resume(struct radeon_device *rdev)
395{
396 /* Make sur GART are not working */
397 rs600_gart_disable(rdev);
398 /* Resume clock before doing reset */
399 rv515_clock_startup(rdev);
400 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
401 if (radeon_gpu_reset(rdev)) {
402 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
403 RREG32(R_000E40_RBBM_STATUS),
404 RREG32(R_0007C0_CP_STAT));
405 }
406 /* post */
407 atom_asic_init(rdev->mode_info.atom_context);
408 /* Resume clock after posting */
409 rv515_clock_startup(rdev);
410 return rs600_startup(rdev);
411}
412
413int rs600_suspend(struct radeon_device *rdev)
414{
415 r100_cp_disable(rdev);
416 r100_wb_disable(rdev);
417 rs600_irq_disable(rdev);
418 rs600_gart_disable(rdev);
419 return 0;
420}
421
422void rs600_fini(struct radeon_device *rdev)
423{
424 rs600_suspend(rdev);
425 r100_cp_fini(rdev);
426 r100_wb_fini(rdev);
427 r100_ib_fini(rdev);
428 radeon_gem_fini(rdev);
429 rs600_gart_fini(rdev);
430 radeon_irq_kms_fini(rdev);
431 radeon_fence_driver_fini(rdev);
432 radeon_object_fini(rdev);
433 radeon_atombios_fini(rdev);
434 kfree(rdev->bios);
435 rdev->bios = NULL;
436}
437
438int rs600_init(struct radeon_device *rdev)
439{
440 int r;
441
442 /* Disable VGA */
443 rv515_vga_render_disable(rdev);
444 /* Initialize scratch registers */
445 radeon_scratch_init(rdev);
446 /* Initialize surface registers */
447 radeon_surface_init(rdev);
448 /* BIOS */
449 if (!radeon_get_bios(rdev)) {
450 if (ASIC_IS_AVIVO(rdev))
451 return -EINVAL;
452 }
453 if (rdev->is_atom_bios) {
454 r = radeon_atombios_init(rdev);
455 if (r)
456 return r;
457 } else {
458 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
459 return -EINVAL;
460 }
461 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
462 if (radeon_gpu_reset(rdev)) {
463 dev_warn(rdev->dev,
464 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
465 RREG32(R_000E40_RBBM_STATUS),
466 RREG32(R_0007C0_CP_STAT));
467 }
468 /* check if cards are posted or not */
469 if (!radeon_card_posted(rdev) && rdev->bios) {
470 DRM_INFO("GPU not posted. posting now...\n");
471 atom_asic_init(rdev->mode_info.atom_context);
472 }
473 /* Initialize clocks */
474 radeon_get_clock_info(rdev->ddev);
475 /* Get vram informations */
476 rs600_vram_info(rdev);
477 /* Initialize memory controller (also test AGP) */
478 r = r420_mc_init(rdev);
479 if (r)
480 return r;
481 rs600_debugfs(rdev);
482 /* Fence driver */
483 r = radeon_fence_driver_init(rdev);
484 if (r)
485 return r;
486 r = radeon_irq_kms_init(rdev);
487 if (r)
488 return r;
489 /* Memory manager */
490 r = radeon_object_init(rdev);
491 if (r)
492 return r;
493 r = rs600_gart_init(rdev);
494 if (r)
495 return r;
496 rs600_set_safe_registers(rdev);
497 rdev->accel_working = true;
498 r = rs600_startup(rdev);
499 if (r) {
500 /* Somethings want wront with the accel init stop accel */
501 dev_err(rdev->dev, "Disabling GPU acceleration\n");
502 rs600_suspend(rdev);
503 r100_cp_fini(rdev);
504 r100_wb_fini(rdev);
505 r100_ib_fini(rdev);
506 rs600_gart_fini(rdev);
507 radeon_irq_kms_fini(rdev);
508 rdev->accel_working = false;
509 }
423 return 0; 510 return 0;
424} 511}
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
new file mode 100644
index 000000000000..81308924859a
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -0,0 +1,470 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RS600D_H__
29#define __RS600D_H__
30
31/* Registers */
32#define R_000040_GEN_INT_CNTL 0x000040
33#define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0)
34#define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1)
35#define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE
36#define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12)
37#define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1)
38#define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF
39#define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6)
40#define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1)
41#define C_000040_CRTC2_VSYNC 0xFFFFFFBF
42#define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7)
43#define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1)
44#define C_000040_SNAPSHOT2 0xFFFFFF7F
45#define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9)
46#define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1)
47#define C_000040_CRTC2_VBLANK 0xFFFFFDFF
48#define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10)
49#define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1)
50#define C_000040_FP2_DETECT 0xFFFFFBFF
51#define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11)
52#define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1)
53#define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF
54#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13)
55#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1)
56#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF
57#define S_000040_DMA_VIPH2_INT_EN(x) (((x) & 0x1) << 14)
58#define G_000040_DMA_VIPH2_INT_EN(x) (((x) >> 14) & 0x1)
59#define C_000040_DMA_VIPH2_INT_EN 0xFFFFBFFF
60#define S_000040_DMA_VIPH3_INT_EN(x) (((x) & 0x1) << 15)
61#define G_000040_DMA_VIPH3_INT_EN(x) (((x) >> 15) & 0x1)
62#define C_000040_DMA_VIPH3_INT_EN 0xFFFF7FFF
63#define S_000040_I2C_INT_EN(x) (((x) & 0x1) << 17)
64#define G_000040_I2C_INT_EN(x) (((x) >> 17) & 0x1)
65#define C_000040_I2C_INT_EN 0xFFFDFFFF
66#define S_000040_GUI_IDLE(x) (((x) & 0x1) << 19)
67#define G_000040_GUI_IDLE(x) (((x) >> 19) & 0x1)
68#define C_000040_GUI_IDLE 0xFFF7FFFF
69#define S_000040_VIPH_INT_EN(x) (((x) & 0x1) << 24)
70#define G_000040_VIPH_INT_EN(x) (((x) >> 24) & 0x1)
71#define C_000040_VIPH_INT_EN 0xFEFFFFFF
72#define S_000040_SW_INT_EN(x) (((x) & 0x1) << 25)
73#define G_000040_SW_INT_EN(x) (((x) >> 25) & 0x1)
74#define C_000040_SW_INT_EN 0xFDFFFFFF
75#define S_000040_GEYSERVILLE(x) (((x) & 0x1) << 27)
76#define G_000040_GEYSERVILLE(x) (((x) >> 27) & 0x1)
77#define C_000040_GEYSERVILLE 0xF7FFFFFF
78#define S_000040_HDCP_AUTHORIZED_INT(x) (((x) & 0x1) << 28)
79#define G_000040_HDCP_AUTHORIZED_INT(x) (((x) >> 28) & 0x1)
80#define C_000040_HDCP_AUTHORIZED_INT 0xEFFFFFFF
81#define S_000040_DVI_I2C_INT(x) (((x) & 0x1) << 29)
82#define G_000040_DVI_I2C_INT(x) (((x) >> 29) & 0x1)
83#define C_000040_DVI_I2C_INT 0xDFFFFFFF
84#define S_000040_GUIDMA(x) (((x) & 0x1) << 30)
85#define G_000040_GUIDMA(x) (((x) >> 30) & 0x1)
86#define C_000040_GUIDMA 0xBFFFFFFF
87#define S_000040_VIDDMA(x) (((x) & 0x1) << 31)
88#define G_000040_VIDDMA(x) (((x) >> 31) & 0x1)
89#define C_000040_VIDDMA 0x7FFFFFFF
90#define R_000044_GEN_INT_STATUS 0x000044
91#define S_000044_DISPLAY_INT_STAT(x) (((x) & 0x1) << 0)
92#define G_000044_DISPLAY_INT_STAT(x) (((x) >> 0) & 0x1)
93#define C_000044_DISPLAY_INT_STAT 0xFFFFFFFE
94#define S_000044_VGA_INT_STAT(x) (((x) & 0x1) << 1)
95#define G_000044_VGA_INT_STAT(x) (((x) >> 1) & 0x1)
96#define C_000044_VGA_INT_STAT 0xFFFFFFFD
97#define S_000044_CAP0_INT_ACTIVE(x) (((x) & 0x1) << 8)
98#define G_000044_CAP0_INT_ACTIVE(x) (((x) >> 8) & 0x1)
99#define C_000044_CAP0_INT_ACTIVE 0xFFFFFEFF
100#define S_000044_DMA_VIPH0_INT(x) (((x) & 0x1) << 12)
101#define G_000044_DMA_VIPH0_INT(x) (((x) >> 12) & 0x1)
102#define C_000044_DMA_VIPH0_INT 0xFFFFEFFF
103#define S_000044_DMA_VIPH1_INT(x) (((x) & 0x1) << 13)
104#define G_000044_DMA_VIPH1_INT(x) (((x) >> 13) & 0x1)
105#define C_000044_DMA_VIPH1_INT 0xFFFFDFFF
106#define S_000044_DMA_VIPH2_INT(x) (((x) & 0x1) << 14)
107#define G_000044_DMA_VIPH2_INT(x) (((x) >> 14) & 0x1)
108#define C_000044_DMA_VIPH2_INT 0xFFFFBFFF
109#define S_000044_DMA_VIPH3_INT(x) (((x) & 0x1) << 15)
110#define G_000044_DMA_VIPH3_INT(x) (((x) >> 15) & 0x1)
111#define C_000044_DMA_VIPH3_INT 0xFFFF7FFF
112#define S_000044_MC_PROBE_FAULT_STAT(x) (((x) & 0x1) << 16)
113#define G_000044_MC_PROBE_FAULT_STAT(x) (((x) >> 16) & 0x1)
114#define C_000044_MC_PROBE_FAULT_STAT 0xFFFEFFFF
115#define S_000044_I2C_INT(x) (((x) & 0x1) << 17)
116#define G_000044_I2C_INT(x) (((x) >> 17) & 0x1)
117#define C_000044_I2C_INT 0xFFFDFFFF
118#define S_000044_SCRATCH_INT_STAT(x) (((x) & 0x1) << 18)
119#define G_000044_SCRATCH_INT_STAT(x) (((x) >> 18) & 0x1)
120#define C_000044_SCRATCH_INT_STAT 0xFFFBFFFF
121#define S_000044_GUI_IDLE_STAT(x) (((x) & 0x1) << 19)
122#define G_000044_GUI_IDLE_STAT(x) (((x) >> 19) & 0x1)
123#define C_000044_GUI_IDLE_STAT 0xFFF7FFFF
124#define S_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) & 0x1) << 20)
125#define G_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) >> 20) & 0x1)
126#define C_000044_ATI_OVERDRIVE_INT_STAT 0xFFEFFFFF
127#define S_000044_MC_PROTECTION_FAULT_STAT(x) (((x) & 0x1) << 21)
128#define G_000044_MC_PROTECTION_FAULT_STAT(x) (((x) >> 21) & 0x1)
129#define C_000044_MC_PROTECTION_FAULT_STAT 0xFFDFFFFF
130#define S_000044_RBBM_READ_INT_STAT(x) (((x) & 0x1) << 22)
131#define G_000044_RBBM_READ_INT_STAT(x) (((x) >> 22) & 0x1)
132#define C_000044_RBBM_READ_INT_STAT 0xFFBFFFFF
133#define S_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) & 0x1) << 23)
134#define G_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) >> 23) & 0x1)
135#define C_000044_CB_CONTEXT_SWITCH_STAT 0xFF7FFFFF
136#define S_000044_VIPH_INT(x) (((x) & 0x1) << 24)
137#define G_000044_VIPH_INT(x) (((x) >> 24) & 0x1)
138#define C_000044_VIPH_INT 0xFEFFFFFF
139#define S_000044_SW_INT(x) (((x) & 0x1) << 25)
140#define G_000044_SW_INT(x) (((x) >> 25) & 0x1)
141#define C_000044_SW_INT 0xFDFFFFFF
142#define S_000044_SW_INT_SET(x) (((x) & 0x1) << 26)
143#define G_000044_SW_INT_SET(x) (((x) >> 26) & 0x1)
144#define C_000044_SW_INT_SET 0xFBFFFFFF
145#define S_000044_IDCT_INT_STAT(x) (((x) & 0x1) << 27)
146#define G_000044_IDCT_INT_STAT(x) (((x) >> 27) & 0x1)
147#define C_000044_IDCT_INT_STAT 0xF7FFFFFF
148#define S_000044_GUIDMA_STAT(x) (((x) & 0x1) << 30)
149#define G_000044_GUIDMA_STAT(x) (((x) >> 30) & 0x1)
150#define C_000044_GUIDMA_STAT 0xBFFFFFFF
151#define S_000044_VIDDMA_STAT(x) (((x) & 0x1) << 31)
152#define G_000044_VIDDMA_STAT(x) (((x) >> 31) & 0x1)
153#define C_000044_VIDDMA_STAT 0x7FFFFFFF
154#define R_00004C_BUS_CNTL 0x00004C
155#define S_00004C_BUS_MASTER_DIS(x) (((x) & 0x1) << 14)
156#define G_00004C_BUS_MASTER_DIS(x) (((x) >> 14) & 0x1)
157#define C_00004C_BUS_MASTER_DIS 0xFFFFBFFF
158#define S_00004C_BUS_MSI_REARM(x) (((x) & 0x1) << 20)
159#define G_00004C_BUS_MSI_REARM(x) (((x) >> 20) & 0x1)
160#define C_00004C_BUS_MSI_REARM 0xFFEFFFFF
161#define R_000070_MC_IND_INDEX 0x000070
162#define S_000070_MC_IND_ADDR(x) (((x) & 0xFFFF) << 0)
163#define G_000070_MC_IND_ADDR(x) (((x) >> 0) & 0xFFFF)
164#define C_000070_MC_IND_ADDR 0xFFFF0000
165#define S_000070_MC_IND_SEQ_RBS_0(x) (((x) & 0x1) << 16)
166#define G_000070_MC_IND_SEQ_RBS_0(x) (((x) >> 16) & 0x1)
167#define C_000070_MC_IND_SEQ_RBS_0 0xFFFEFFFF
168#define S_000070_MC_IND_SEQ_RBS_1(x) (((x) & 0x1) << 17)
169#define G_000070_MC_IND_SEQ_RBS_1(x) (((x) >> 17) & 0x1)
170#define C_000070_MC_IND_SEQ_RBS_1 0xFFFDFFFF
171#define S_000070_MC_IND_SEQ_RBS_2(x) (((x) & 0x1) << 18)
172#define G_000070_MC_IND_SEQ_RBS_2(x) (((x) >> 18) & 0x1)
173#define C_000070_MC_IND_SEQ_RBS_2 0xFFFBFFFF
174#define S_000070_MC_IND_SEQ_RBS_3(x) (((x) & 0x1) << 19)
175#define G_000070_MC_IND_SEQ_RBS_3(x) (((x) >> 19) & 0x1)
176#define C_000070_MC_IND_SEQ_RBS_3 0xFFF7FFFF
177#define S_000070_MC_IND_AIC_RBS(x) (((x) & 0x1) << 20)
178#define G_000070_MC_IND_AIC_RBS(x) (((x) >> 20) & 0x1)
179#define C_000070_MC_IND_AIC_RBS 0xFFEFFFFF
180#define S_000070_MC_IND_CITF_ARB0(x) (((x) & 0x1) << 21)
181#define G_000070_MC_IND_CITF_ARB0(x) (((x) >> 21) & 0x1)
182#define C_000070_MC_IND_CITF_ARB0 0xFFDFFFFF
183#define S_000070_MC_IND_CITF_ARB1(x) (((x) & 0x1) << 22)
184#define G_000070_MC_IND_CITF_ARB1(x) (((x) >> 22) & 0x1)
185#define C_000070_MC_IND_CITF_ARB1 0xFFBFFFFF
186#define S_000070_MC_IND_WR_EN(x) (((x) & 0x1) << 23)
187#define G_000070_MC_IND_WR_EN(x) (((x) >> 23) & 0x1)
188#define C_000070_MC_IND_WR_EN 0xFF7FFFFF
189#define S_000070_MC_IND_RD_INV(x) (((x) & 0x1) << 24)
190#define G_000070_MC_IND_RD_INV(x) (((x) >> 24) & 0x1)
191#define C_000070_MC_IND_RD_INV 0xFEFFFFFF
192#define R_000074_MC_IND_DATA 0x000074
193#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0)
194#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
195#define C_000074_MC_IND_DATA 0x00000000
196#define R_000134_HDP_FB_LOCATION 0x000134
197#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
198#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
199#define C_000134_HDP_FB_START 0xFFFF0000
200#define R_0007C0_CP_STAT 0x0007C0
201#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
202#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
203#define C_0007C0_MRU_BUSY 0xFFFFFFFE
204#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
205#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
206#define C_0007C0_MWU_BUSY 0xFFFFFFFD
207#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
208#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
209#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
210#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
211#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
212#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
213#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
214#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
215#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
216#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
217#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
218#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
219#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
220#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
221#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
222#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
223#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
224#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
225#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
226#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
227#define C_0007C0_CSI_BUSY 0xFFFFDFFF
228#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
229#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
230#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
231#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
232#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
233#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
234#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
235#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
236#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
237#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
238#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
239#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
240#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
241#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
242#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
243#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
244#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
245#define C_0007C0_CP_BUSY 0x7FFFFFFF
246#define R_000E40_RBBM_STATUS 0x000E40
247#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
248#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
249#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
250#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
251#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
252#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
253#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
254#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
255#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
256#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
257#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
258#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
259#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
260#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
261#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
262#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
263#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
264#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
265#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
266#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
267#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
268#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
269#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
270#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
271#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
272#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
273#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
274#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
275#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
276#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
277#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
278#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
279#define C_000E40_E2_BUSY 0xFFFDFFFF
280#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
281#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
282#define C_000E40_RB2D_BUSY 0xFFFBFFFF
283#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
284#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
285#define C_000E40_RB3D_BUSY 0xFFF7FFFF
286#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
287#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
288#define C_000E40_VAP_BUSY 0xFFEFFFFF
289#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
290#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
291#define C_000E40_RE_BUSY 0xFFDFFFFF
292#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
293#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
294#define C_000E40_TAM_BUSY 0xFFBFFFFF
295#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
296#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
297#define C_000E40_TDM_BUSY 0xFF7FFFFF
298#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
299#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
300#define C_000E40_PB_BUSY 0xFEFFFFFF
301#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
302#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
303#define C_000E40_TIM_BUSY 0xFDFFFFFF
304#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
305#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
306#define C_000E40_GA_BUSY 0xFBFFFFFF
307#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
308#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
309#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
310#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
311#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
312#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
313#define R_0060A4_D1CRTC_STATUS_FRAME_COUNT 0x0060A4
314#define S_0060A4_D1CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0)
315#define G_0060A4_D1CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF)
316#define C_0060A4_D1CRTC_FRAME_COUNT 0xFF000000
317#define R_006534_D1MODE_VBLANK_STATUS 0x006534
318#define S_006534_D1MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0)
319#define G_006534_D1MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1)
320#define C_006534_D1MODE_VBLANK_OCCURRED 0xFFFFFFFE
321#define S_006534_D1MODE_VBLANK_ACK(x) (((x) & 0x1) << 4)
322#define G_006534_D1MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1)
323#define C_006534_D1MODE_VBLANK_ACK 0xFFFFFFEF
324#define S_006534_D1MODE_VBLANK_STAT(x) (((x) & 0x1) << 12)
325#define G_006534_D1MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1)
326#define C_006534_D1MODE_VBLANK_STAT 0xFFFFEFFF
327#define S_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16)
328#define G_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1)
329#define C_006534_D1MODE_VBLANK_INTERRUPT 0xFFFEFFFF
330#define R_006540_DxMODE_INT_MASK 0x006540
331#define S_006540_D1MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 0)
332#define G_006540_D1MODE_VBLANK_INT_MASK(x) (((x) >> 0) & 0x1)
333#define C_006540_D1MODE_VBLANK_INT_MASK 0xFFFFFFFE
334#define S_006540_D1MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 4)
335#define G_006540_D1MODE_VLINE_INT_MASK(x) (((x) >> 4) & 0x1)
336#define C_006540_D1MODE_VLINE_INT_MASK 0xFFFFFFEF
337#define S_006540_D2MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 8)
338#define G_006540_D2MODE_VBLANK_INT_MASK(x) (((x) >> 8) & 0x1)
339#define C_006540_D2MODE_VBLANK_INT_MASK 0xFFFFFEFF
340#define S_006540_D2MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 12)
341#define G_006540_D2MODE_VLINE_INT_MASK(x) (((x) >> 12) & 0x1)
342#define C_006540_D2MODE_VLINE_INT_MASK 0xFFFFEFFF
343#define S_006540_D1MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 30)
344#define G_006540_D1MODE_VBLANK_CP_SEL(x) (((x) >> 30) & 0x1)
345#define C_006540_D1MODE_VBLANK_CP_SEL 0xBFFFFFFF
346#define S_006540_D2MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 31)
347#define G_006540_D2MODE_VBLANK_CP_SEL(x) (((x) >> 31) & 0x1)
348#define C_006540_D2MODE_VBLANK_CP_SEL 0x7FFFFFFF
349#define R_0068A4_D2CRTC_STATUS_FRAME_COUNT 0x0068A4
350#define S_0068A4_D2CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0)
351#define G_0068A4_D2CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF)
352#define C_0068A4_D2CRTC_FRAME_COUNT 0xFF000000
353#define R_006D34_D2MODE_VBLANK_STATUS 0x006D34
354#define S_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0)
355#define G_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1)
356#define C_006D34_D2MODE_VBLANK_OCCURRED 0xFFFFFFFE
357#define S_006D34_D2MODE_VBLANK_ACK(x) (((x) & 0x1) << 4)
358#define G_006D34_D2MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1)
359#define C_006D34_D2MODE_VBLANK_ACK 0xFFFFFFEF
360#define S_006D34_D2MODE_VBLANK_STAT(x) (((x) & 0x1) << 12)
361#define G_006D34_D2MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1)
362#define C_006D34_D2MODE_VBLANK_STAT 0xFFFFEFFF
363#define S_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16)
364#define G_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1)
365#define C_006D34_D2MODE_VBLANK_INTERRUPT 0xFFFEFFFF
366#define R_007EDC_DISP_INTERRUPT_STATUS 0x007EDC
367#define S_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) & 0x1) << 4)
368#define G_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) >> 4) & 0x1)
369#define C_007EDC_LB_D1_VBLANK_INTERRUPT 0xFFFFFFEF
370#define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5)
371#define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1)
372#define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF
373
374
375/* MC registers */
376#define R_000000_MC_STATUS 0x000000
377#define S_000000_MC_IDLE(x) (((x) & 0x1) << 0)
378#define G_000000_MC_IDLE(x) (((x) >> 0) & 0x1)
379#define C_000000_MC_IDLE 0xFFFFFFFE
380#define R_000004_MC_FB_LOCATION 0x000004
381#define S_000004_MC_FB_START(x) (((x) & 0xFFFF) << 0)
382#define G_000004_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
383#define C_000004_MC_FB_START 0xFFFF0000
384#define S_000004_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
385#define G_000004_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
386#define C_000004_MC_FB_TOP 0x0000FFFF
387#define R_000005_MC_AGP_LOCATION 0x000005
388#define S_000005_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
389#define G_000005_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
390#define C_000005_MC_AGP_START 0xFFFF0000
391#define S_000005_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
392#define G_000005_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
393#define C_000005_MC_AGP_TOP 0x0000FFFF
394#define R_000006_AGP_BASE 0x000006
395#define S_000006_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
396#define G_000006_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
397#define C_000006_AGP_BASE_ADDR 0x00000000
398#define R_000007_AGP_BASE_2 0x000007
399#define S_000007_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
400#define G_000007_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
401#define C_000007_AGP_BASE_ADDR_2 0xFFFFFFF0
402#define R_000009_MC_CNTL1 0x000009
403#define S_000009_ENABLE_PAGE_TABLES(x) (((x) & 0x1) << 26)
404#define G_000009_ENABLE_PAGE_TABLES(x) (((x) >> 26) & 0x1)
405#define C_000009_ENABLE_PAGE_TABLES 0xFBFFFFFF
406/* FIXME don't know the various field size need feedback from AMD */
407#define R_000100_MC_PT0_CNTL 0x000100
408#define S_000100_ENABLE_PT(x) (((x) & 0x1) << 0)
409#define G_000100_ENABLE_PT(x) (((x) >> 0) & 0x1)
410#define C_000100_ENABLE_PT 0xFFFFFFFE
411#define S_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) & 0x7) << 15)
412#define G_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) >> 15) & 0x7)
413#define C_000100_EFFECTIVE_L2_CACHE_SIZE 0xFFFC7FFF
414#define S_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 0x7) << 21)
415#define G_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) >> 21) & 0x7)
416#define C_000100_EFFECTIVE_L2_QUEUE_SIZE 0xFF1FFFFF
417#define S_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) & 0x1) << 28)
418#define G_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) >> 28) & 0x1)
419#define C_000100_INVALIDATE_ALL_L1_TLBS 0xEFFFFFFF
420#define S_000100_INVALIDATE_L2_CACHE(x) (((x) & 0x1) << 29)
421#define G_000100_INVALIDATE_L2_CACHE(x) (((x) >> 29) & 0x1)
422#define C_000100_INVALIDATE_L2_CACHE 0xDFFFFFFF
423#define R_000102_MC_PT0_CONTEXT0_CNTL 0x000102
424#define S_000102_ENABLE_PAGE_TABLE(x) (((x) & 0x1) << 0)
425#define G_000102_ENABLE_PAGE_TABLE(x) (((x) >> 0) & 0x1)
426#define C_000102_ENABLE_PAGE_TABLE 0xFFFFFFFE
427#define S_000102_PAGE_TABLE_DEPTH(x) (((x) & 0x3) << 1)
428#define G_000102_PAGE_TABLE_DEPTH(x) (((x) >> 1) & 0x3)
429#define C_000102_PAGE_TABLE_DEPTH 0xFFFFFFF9
430#define V_000102_PAGE_TABLE_FLAT 0
431/* R600 documentation suggest that this should be a number of pages */
432#define R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x000112
433#define R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x000114
434#define R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x00011C
435#define R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x00012C
436#define R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x00013C
437#define R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x00014C
438#define R_00016C_MC_PT0_CLIENT0_CNTL 0x00016C
439#define S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 0)
440#define G_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 0) & 0x1)
441#define C_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFE
442#define S_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 1)
443#define G_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 1) & 0x1)
444#define C_00016C_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFD
445#define S_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) & 0x3) << 8)
446#define G_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) >> 8) & 0x3)
447#define C_00016C_SYSTEM_ACCESS_MODE_MASK 0xFFFFFCFF
448#define V_00016C_SYSTEM_ACCESS_MODE_PA_ONLY 0
449#define V_00016C_SYSTEM_ACCESS_MODE_USE_SYS_MAP 1
450#define V_00016C_SYSTEM_ACCESS_MODE_IN_SYS 2
451#define V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS 3
452#define S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) & 0x1) << 10)
453#define G_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) >> 10) & 0x1)
454#define C_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS 0xFFFFFBFF
455#define V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH 0
456#define V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE 1
457#define S_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) & 0x7) << 11)
458#define G_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) >> 11) & 0x7)
459#define C_00016C_EFFECTIVE_L1_CACHE_SIZE 0xFFFFC7FF
460#define S_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) & 0x1) << 14)
461#define G_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) >> 14) & 0x1)
462#define C_00016C_ENABLE_FRAGMENT_PROCESSING 0xFFFFBFFF
463#define S_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 0x7) << 15)
464#define G_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) >> 15) & 0x7)
465#define C_00016C_EFFECTIVE_L1_QUEUE_SIZE 0xFFFC7FFF
466#define S_00016C_INVALIDATE_L1_TLB(x) (((x) & 0x1) << 20)
467#define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1)
468#define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF
469
470#endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 7a0098ddf977..025e3225346c 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -26,105 +26,29 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h" 29#include "radeon.h"
31#include "rs690r.h"
32#include "atom.h" 30#include "atom.h"
33#include "atom-bits.h" 31#include "rs690d.h"
34
35/* rs690,rs740 depends on : */
36void r100_hdp_reset(struct radeon_device *rdev);
37int r300_mc_wait_for_idle(struct radeon_device *rdev);
38void r420_pipes_init(struct radeon_device *rdev);
39void rs400_gart_disable(struct radeon_device *rdev);
40int rs400_gart_enable(struct radeon_device *rdev);
41void rs400_gart_adjust_size(struct radeon_device *rdev);
42void rs600_mc_disable_clients(struct radeon_device *rdev);
43
44/* This files gather functions specifics to :
45 * rs690,rs740
46 *
47 * Some of these functions might be used by newer ASICs.
48 */
49void rs690_gpu_init(struct radeon_device *rdev);
50int rs690_mc_wait_for_idle(struct radeon_device *rdev);
51
52
53/*
54 * MC functions.
55 */
56int rs690_mc_init(struct radeon_device *rdev)
57{
58 uint32_t tmp;
59 int r;
60
61 if (r100_debugfs_rbbm_init(rdev)) {
62 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
63 }
64
65 rs690_gpu_init(rdev);
66 rs400_gart_disable(rdev);
67
68 /* Setup GPU memory space */
69 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
70 rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
71 rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
72 rdev->mc.vram_location = 0xFFFFFFFFUL;
73 r = radeon_mc_setup(rdev);
74 if (r) {
75 return r;
76 }
77
78 /* Program GPU memory space */
79 rs600_mc_disable_clients(rdev);
80 if (rs690_mc_wait_for_idle(rdev)) {
81 printk(KERN_WARNING "Failed to wait MC idle while "
82 "programming pipes. Bad things might happen.\n");
83 }
84 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
85 tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16);
86 tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16);
87 WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp);
88 /* FIXME: Does this reg exist on RS480,RS740 ? */
89 WREG32(0x310, rdev->mc.vram_location);
90 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
91 return 0;
92}
93
94void rs690_mc_fini(struct radeon_device *rdev)
95{
96}
97
98 32
99/* 33static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
100 * Global GPU functions
101 */
102int rs690_mc_wait_for_idle(struct radeon_device *rdev)
103{ 34{
104 unsigned i; 35 unsigned i;
105 uint32_t tmp; 36 uint32_t tmp;
106 37
107 for (i = 0; i < rdev->usec_timeout; i++) { 38 for (i = 0; i < rdev->usec_timeout; i++) {
108 /* read MC_STATUS */ 39 /* read MC_STATUS */
109 tmp = RREG32_MC(RS690_MC_STATUS); 40 tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS);
110 if (tmp & RS690_MC_STATUS_IDLE) { 41 if (G_000090_MC_SYSTEM_IDLE(tmp))
111 return 0; 42 return 0;
112 } 43 udelay(1);
113 DRM_UDELAY(1);
114 } 44 }
115 return -1; 45 return -1;
116} 46}
117 47
118void rs690_errata(struct radeon_device *rdev) 48static void rs690_gpu_init(struct radeon_device *rdev)
119{
120 rdev->pll_errata = 0;
121}
122
123void rs690_gpu_init(struct radeon_device *rdev)
124{ 49{
125 /* FIXME: HDP same place on rs690 ? */ 50 /* FIXME: HDP same place on rs690 ? */
126 r100_hdp_reset(rdev); 51 r100_hdp_reset(rdev);
127 rv515_vga_render_disable(rdev);
128 /* FIXME: is this correct ? */ 52 /* FIXME: is this correct ? */
129 r420_pipes_init(rdev); 53 r420_pipes_init(rdev);
130 if (rs690_mc_wait_for_idle(rdev)) { 54 if (rs690_mc_wait_for_idle(rdev)) {
@@ -133,10 +57,6 @@ void rs690_gpu_init(struct radeon_device *rdev)
133 } 57 }
134} 58}
135 59
136
137/*
138 * VRAM info.
139 */
140void rs690_pm_info(struct radeon_device *rdev) 60void rs690_pm_info(struct radeon_device *rdev)
141{ 61{
142 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 62 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
@@ -250,39 +170,39 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
250 /* 170 /*
251 * Line Buffer Setup 171 * Line Buffer Setup
252 * There is a single line buffer shared by both display controllers. 172 * There is a single line buffer shared by both display controllers.
253 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between 173 * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
254 * the display controllers. The paritioning can either be done 174 * the display controllers. The paritioning can either be done
255 * manually or via one of four preset allocations specified in bits 1:0: 175 * manually or via one of four preset allocations specified in bits 1:0:
256 * 0 - line buffer is divided in half and shared between crtc 176 * 0 - line buffer is divided in half and shared between crtc
257 * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 177 * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
258 * 2 - D1 gets the whole buffer 178 * 2 - D1 gets the whole buffer
259 * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 179 * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
260 * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual 180 * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual
261 * allocation mode. In manual allocation mode, D1 always starts at 0, 181 * allocation mode. In manual allocation mode, D1 always starts at 0,
262 * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. 182 * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
263 */ 183 */
264 tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK; 184 tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT;
265 tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE; 185 tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE;
266 /* auto */ 186 /* auto */
267 if (mode1 && mode2) { 187 if (mode1 && mode2) {
268 if (mode1->hdisplay > mode2->hdisplay) { 188 if (mode1->hdisplay > mode2->hdisplay) {
269 if (mode1->hdisplay > 2560) 189 if (mode1->hdisplay > 2560)
270 tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; 190 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
271 else 191 else
272 tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; 192 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
273 } else if (mode2->hdisplay > mode1->hdisplay) { 193 } else if (mode2->hdisplay > mode1->hdisplay) {
274 if (mode2->hdisplay > 2560) 194 if (mode2->hdisplay > 2560)
275 tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; 195 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
276 else 196 else
277 tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; 197 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
278 } else 198 } else
279 tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; 199 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
280 } else if (mode1) { 200 } else if (mode1) {
281 tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY; 201 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY;
282 } else if (mode2) { 202 } else if (mode2) {
283 tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; 203 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
284 } 204 }
285 WREG32(DC_LB_MEMORY_SPLIT, tmp); 205 WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
286} 206}
287 207
288struct rs690_watermark { 208struct rs690_watermark {
@@ -487,28 +407,28 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
487 * option. 407 * option.
488 */ 408 */
489 if (rdev->disp_priority == 2) { 409 if (rdev->disp_priority == 2) {
490 tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER); 410 tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
491 tmp &= ~MC_DISP1R_INIT_LAT_MASK; 411 tmp &= C_000104_MC_DISP0R_INIT_LAT;
492 tmp &= ~MC_DISP0R_INIT_LAT_MASK; 412 tmp &= C_000104_MC_DISP1R_INIT_LAT;
493 if (mode1)
494 tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
495 if (mode0) 413 if (mode0)
496 tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); 414 tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
497 WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp); 415 if (mode1)
416 tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
417 WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
498 } 418 }
499 rs690_line_buffer_adjust(rdev, mode0, mode1); 419 rs690_line_buffer_adjust(rdev, mode0, mode1);
500 420
501 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) 421 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
502 WREG32(DCP_CONTROL, 0); 422 WREG32(R_006C9C_DCP_CONTROL, 0);
503 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) 423 if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
504 WREG32(DCP_CONTROL, 2); 424 WREG32(R_006C9C_DCP_CONTROL, 2);
505 425
506 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); 426 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
507 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); 427 rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
508 428
509 tmp = (wm0.lb_request_fifo_depth - 1); 429 tmp = (wm0.lb_request_fifo_depth - 1);
510 tmp |= (wm1.lb_request_fifo_depth - 1) << 16; 430 tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
511 WREG32(LB_MAX_REQ_OUTSTANDING, tmp); 431 WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
512 432
513 if (mode0 && mode1) { 433 if (mode0 && mode1) {
514 if (rfixed_trunc(wm0.dbpp) > 64) 434 if (rfixed_trunc(wm0.dbpp) > 64)
@@ -561,10 +481,10 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
561 priority_mark12.full = 0; 481 priority_mark12.full = 0;
562 if (wm1.priority_mark_max.full > priority_mark12.full) 482 if (wm1.priority_mark_max.full > priority_mark12.full)
563 priority_mark12.full = wm1.priority_mark_max.full; 483 priority_mark12.full = wm1.priority_mark_max.full;
564 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); 484 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
565 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); 485 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
566 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); 486 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
567 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); 487 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
568 } else if (mode0) { 488 } else if (mode0) {
569 if (rfixed_trunc(wm0.dbpp) > 64) 489 if (rfixed_trunc(wm0.dbpp) > 64)
570 a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); 490 a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
@@ -591,10 +511,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
591 priority_mark02.full = 0; 511 priority_mark02.full = 0;
592 if (wm0.priority_mark_max.full > priority_mark02.full) 512 if (wm0.priority_mark_max.full > priority_mark02.full)
593 priority_mark02.full = wm0.priority_mark_max.full; 513 priority_mark02.full = wm0.priority_mark_max.full;
594 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); 514 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
595 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); 515 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
596 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); 516 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
597 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); 517 S_006D48_D2MODE_PRIORITY_A_OFF(1));
518 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
519 S_006D4C_D2MODE_PRIORITY_B_OFF(1));
598 } else { 520 } else {
599 if (rfixed_trunc(wm1.dbpp) > 64) 521 if (rfixed_trunc(wm1.dbpp) > 64)
600 a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); 522 a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
@@ -621,30 +543,203 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
621 priority_mark12.full = 0; 543 priority_mark12.full = 0;
622 if (wm1.priority_mark_max.full > priority_mark12.full) 544 if (wm1.priority_mark_max.full > priority_mark12.full)
623 priority_mark12.full = wm1.priority_mark_max.full; 545 priority_mark12.full = wm1.priority_mark_max.full;
624 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); 546 WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
625 WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); 547 S_006548_D1MODE_PRIORITY_A_OFF(1));
626 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); 548 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
627 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); 549 S_00654C_D1MODE_PRIORITY_B_OFF(1));
550 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
551 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
628 } 552 }
629} 553}
630 554
631/*
632 * Indirect registers accessor
633 */
634uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) 555uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
635{ 556{
636 uint32_t r; 557 uint32_t r;
637 558
638 WREG32(RS690_MC_INDEX, (reg & RS690_MC_INDEX_MASK)); 559 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
639 r = RREG32(RS690_MC_DATA); 560 r = RREG32(R_00007C_MC_DATA);
640 WREG32(RS690_MC_INDEX, RS690_MC_INDEX_MASK); 561 WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
641 return r; 562 return r;
642} 563}
643 564
644void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 565void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
645{ 566{
646 WREG32(RS690_MC_INDEX, 567 WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
647 RS690_MC_INDEX_WR_EN | ((reg) & RS690_MC_INDEX_MASK)); 568 S_000078_MC_IND_WR_EN(1));
648 WREG32(RS690_MC_DATA, v); 569 WREG32(R_00007C_MC_DATA, v);
649 WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); 570 WREG32(R_000078_MC_INDEX, 0x7F);
571}
572
573void rs690_mc_program(struct radeon_device *rdev)
574{
575 struct rv515_mc_save save;
576
577 /* Stops all mc clients */
578 rv515_mc_stop(rdev, &save);
579
580 /* Wait for mc idle */
581 if (rs690_mc_wait_for_idle(rdev))
582 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
583 /* Program MC, should be a 32bits limited address space */
584 WREG32_MC(R_000100_MCCFG_FB_LOCATION,
585 S_000100_MC_FB_START(rdev->mc.vram_start >> 16) |
586 S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16));
587 WREG32(R_000134_HDP_FB_LOCATION,
588 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
589
590 rv515_mc_resume(rdev, &save);
591}
592
593static int rs690_startup(struct radeon_device *rdev)
594{
595 int r;
596
597 rs690_mc_program(rdev);
598 /* Resume clock */
599 rv515_clock_startup(rdev);
600 /* Initialize GPU configuration (# pipes, ...) */
601 rs690_gpu_init(rdev);
602 /* Initialize GART (initialize after TTM so we can allocate
603 * memory through TTM but finalize after TTM) */
604 r = rs400_gart_enable(rdev);
605 if (r)
606 return r;
607 /* Enable IRQ */
608 rdev->irq.sw_int = true;
609 rs600_irq_set(rdev);
610 /* 1M ring buffer */
611 r = r100_cp_init(rdev, 1024 * 1024);
612 if (r) {
613 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
614 return r;
615 }
616 r = r100_wb_init(rdev);
617 if (r)
618 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
619 r = r100_ib_init(rdev);
620 if (r) {
621 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
622 return r;
623 }
624 return 0;
625}
626
627int rs690_resume(struct radeon_device *rdev)
628{
629 /* Make sur GART are not working */
630 rs400_gart_disable(rdev);
631 /* Resume clock before doing reset */
632 rv515_clock_startup(rdev);
633 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
634 if (radeon_gpu_reset(rdev)) {
635 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
636 RREG32(R_000E40_RBBM_STATUS),
637 RREG32(R_0007C0_CP_STAT));
638 }
639 /* post */
640 atom_asic_init(rdev->mode_info.atom_context);
641 /* Resume clock after posting */
642 rv515_clock_startup(rdev);
643 return rs690_startup(rdev);
644}
645
646int rs690_suspend(struct radeon_device *rdev)
647{
648 r100_cp_disable(rdev);
649 r100_wb_disable(rdev);
650 rs600_irq_disable(rdev);
651 rs400_gart_disable(rdev);
652 return 0;
653}
654
655void rs690_fini(struct radeon_device *rdev)
656{
657 rs690_suspend(rdev);
658 r100_cp_fini(rdev);
659 r100_wb_fini(rdev);
660 r100_ib_fini(rdev);
661 radeon_gem_fini(rdev);
662 rs400_gart_fini(rdev);
663 radeon_irq_kms_fini(rdev);
664 radeon_fence_driver_fini(rdev);
665 radeon_object_fini(rdev);
666 radeon_atombios_fini(rdev);
667 kfree(rdev->bios);
668 rdev->bios = NULL;
669}
670
671int rs690_init(struct radeon_device *rdev)
672{
673 int r;
674
675 /* Disable VGA */
676 rv515_vga_render_disable(rdev);
677 /* Initialize scratch registers */
678 radeon_scratch_init(rdev);
679 /* Initialize surface registers */
680 radeon_surface_init(rdev);
681 /* TODO: disable VGA need to use VGA request */
682 /* BIOS*/
683 if (!radeon_get_bios(rdev)) {
684 if (ASIC_IS_AVIVO(rdev))
685 return -EINVAL;
686 }
687 if (rdev->is_atom_bios) {
688 r = radeon_atombios_init(rdev);
689 if (r)
690 return r;
691 } else {
692 dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
693 return -EINVAL;
694 }
695 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
696 if (radeon_gpu_reset(rdev)) {
697 dev_warn(rdev->dev,
698 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
699 RREG32(R_000E40_RBBM_STATUS),
700 RREG32(R_0007C0_CP_STAT));
701 }
702 /* check if cards are posted or not */
703 if (!radeon_card_posted(rdev) && rdev->bios) {
704 DRM_INFO("GPU not posted. posting now...\n");
705 atom_asic_init(rdev->mode_info.atom_context);
706 }
707 /* Initialize clocks */
708 radeon_get_clock_info(rdev->ddev);
709 /* Get vram informations */
710 rs690_vram_info(rdev);
711 /* Initialize memory controller (also test AGP) */
712 r = r420_mc_init(rdev);
713 if (r)
714 return r;
715 rv515_debugfs(rdev);
716 /* Fence driver */
717 r = radeon_fence_driver_init(rdev);
718 if (r)
719 return r;
720 r = radeon_irq_kms_init(rdev);
721 if (r)
722 return r;
723 /* Memory manager */
724 r = radeon_object_init(rdev);
725 if (r)
726 return r;
727 r = rs400_gart_init(rdev);
728 if (r)
729 return r;
730 rs600_set_safe_registers(rdev);
731 rdev->accel_working = true;
732 r = rs690_startup(rdev);
733 if (r) {
734 /* Somethings want wront with the accel init stop accel */
735 dev_err(rdev->dev, "Disabling GPU acceleration\n");
736 rs690_suspend(rdev);
737 r100_cp_fini(rdev);
738 r100_wb_fini(rdev);
739 r100_ib_fini(rdev);
740 rs400_gart_fini(rdev);
741 radeon_irq_kms_fini(rdev);
742 rdev->accel_working = false;
743 }
744 return 0;
650} 745}
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h
new file mode 100644
index 000000000000..62d31e7a897f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rs690d.h
@@ -0,0 +1,307 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RS690D_H__
29#define __RS690D_H__
30
31/* Registers */
32#define R_000078_MC_INDEX 0x000078
33#define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
34#define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF)
35#define C_000078_MC_IND_ADDR 0xFFFFFE00
36#define S_000078_MC_IND_WR_EN(x) (((x) & 0x1) << 9)
37#define G_000078_MC_IND_WR_EN(x) (((x) >> 9) & 0x1)
38#define C_000078_MC_IND_WR_EN 0xFFFFFDFF
39#define R_00007C_MC_DATA 0x00007C
40#define S_00007C_MC_DATA(x) (((x) & 0xFFFFFFFF) << 0)
41#define G_00007C_MC_DATA(x) (((x) >> 0) & 0xFFFFFFFF)
42#define C_00007C_MC_DATA 0x00000000
43#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
44#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
45#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
46#define C_0000F8_CONFIG_MEMSIZE 0x00000000
47#define R_000134_HDP_FB_LOCATION 0x000134
48#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
49#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
50#define C_000134_HDP_FB_START 0xFFFF0000
51#define R_0007C0_CP_STAT 0x0007C0
52#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
53#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
54#define C_0007C0_MRU_BUSY 0xFFFFFFFE
55#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
56#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
57#define C_0007C0_MWU_BUSY 0xFFFFFFFD
58#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
59#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
60#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
61#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
62#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
63#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
64#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
65#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
66#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
67#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
68#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
69#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
70#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
71#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
72#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
73#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
74#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
75#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
76#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
77#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
78#define C_0007C0_CSI_BUSY 0xFFFFDFFF
79#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
80#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
81#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
82#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
83#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
84#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
85#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
86#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
87#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
88#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
89#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
90#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
91#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
92#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
93#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
94#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
95#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
96#define C_0007C0_CP_BUSY 0x7FFFFFFF
97#define R_000E40_RBBM_STATUS 0x000E40
98#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
99#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
100#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
101#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
102#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
103#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
104#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
105#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
106#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
107#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
108#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
109#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
110#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
111#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
112#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
113#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
114#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
115#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
116#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
117#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
118#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
119#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
120#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
121#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
122#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
123#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
124#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
125#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
126#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
127#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
128#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
129#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
130#define C_000E40_E2_BUSY 0xFFFDFFFF
131#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
132#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
133#define C_000E40_RB2D_BUSY 0xFFFBFFFF
134#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
135#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
136#define C_000E40_RB3D_BUSY 0xFFF7FFFF
137#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
138#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
139#define C_000E40_VAP_BUSY 0xFFEFFFFF
140#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
141#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
142#define C_000E40_RE_BUSY 0xFFDFFFFF
143#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
144#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
145#define C_000E40_TAM_BUSY 0xFFBFFFFF
146#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
147#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
148#define C_000E40_TDM_BUSY 0xFF7FFFFF
149#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
150#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
151#define C_000E40_PB_BUSY 0xFEFFFFFF
152#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
153#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
154#define C_000E40_TIM_BUSY 0xFDFFFFFF
155#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
156#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
157#define C_000E40_GA_BUSY 0xFBFFFFFF
158#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
159#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
160#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
161#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
162#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
163#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
164#define R_006520_DC_LB_MEMORY_SPLIT 0x006520
165#define S_006520_DC_LB_MEMORY_SPLIT(x) (((x) & 0x3) << 0)
166#define G_006520_DC_LB_MEMORY_SPLIT(x) (((x) >> 0) & 0x3)
167#define C_006520_DC_LB_MEMORY_SPLIT 0xFFFFFFFC
168#define S_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) & 0x1) << 2)
169#define G_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) >> 2) & 0x1)
170#define C_006520_DC_LB_MEMORY_SPLIT_MODE 0xFFFFFFFB
171#define V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
172#define V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
173#define V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY 2
174#define V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
175#define S_006520_DC_LB_DISP1_END_ADR(x) (((x) & 0x7FF) << 4)
176#define G_006520_DC_LB_DISP1_END_ADR(x) (((x) >> 4) & 0x7FF)
177#define C_006520_DC_LB_DISP1_END_ADR 0xFFFF800F
178#define R_006548_D1MODE_PRIORITY_A_CNT 0x006548
179#define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
180#define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
181#define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000
182#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
183#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
184#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
185#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
186#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
187#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
188#define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C
189#define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
190#define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
191#define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000
192#define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
193#define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
194#define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF
195#define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
196#define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
197#define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
198#define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
199#define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
200#define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
201#define R_006C9C_DCP_CONTROL 0x006C9C
202#define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48
203#define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
204#define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
205#define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000
206#define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
207#define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
208#define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF
209#define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
210#define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
211#define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
212#define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
213#define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
214#define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
215#define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C
216#define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
217#define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
218#define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000
219#define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
220#define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
221#define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF
222#define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
223#define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
224#define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
225#define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
226#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
227#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
228#define R_006D58_LB_MAX_REQ_OUTSTANDING 0x006D58
229#define S_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 0)
230#define G_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) >> 0) & 0xF)
231#define C_006D58_LB_D1_MAX_REQ_OUTSTANDING 0xFFFFFFF0
232#define S_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 16)
233#define G_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) >> 16) & 0xF)
234#define C_006D58_LB_D2_MAX_REQ_OUTSTANDING 0xFFF0FFFF
235
236
237#define R_000090_MC_SYSTEM_STATUS 0x000090
238#define S_000090_MC_SYSTEM_IDLE(x) (((x) & 0x1) << 0)
239#define G_000090_MC_SYSTEM_IDLE(x) (((x) >> 0) & 0x1)
240#define C_000090_MC_SYSTEM_IDLE 0xFFFFFFFE
241#define S_000090_MC_SEQUENCER_IDLE(x) (((x) & 0x1) << 1)
242#define G_000090_MC_SEQUENCER_IDLE(x) (((x) >> 1) & 0x1)
243#define C_000090_MC_SEQUENCER_IDLE 0xFFFFFFFD
244#define S_000090_MC_ARBITER_IDLE(x) (((x) & 0x1) << 2)
245#define G_000090_MC_ARBITER_IDLE(x) (((x) >> 2) & 0x1)
246#define C_000090_MC_ARBITER_IDLE 0xFFFFFFFB
247#define S_000090_MC_SELECT_PM(x) (((x) & 0x1) << 3)
248#define G_000090_MC_SELECT_PM(x) (((x) >> 3) & 0x1)
249#define C_000090_MC_SELECT_PM 0xFFFFFFF7
250#define S_000090_RESERVED4(x) (((x) & 0xF) << 4)
251#define G_000090_RESERVED4(x) (((x) >> 4) & 0xF)
252#define C_000090_RESERVED4 0xFFFFFF0F
253#define S_000090_RESERVED8(x) (((x) & 0xF) << 8)
254#define G_000090_RESERVED8(x) (((x) >> 8) & 0xF)
255#define C_000090_RESERVED8 0xFFFFF0FF
256#define S_000090_RESERVED12(x) (((x) & 0xF) << 12)
257#define G_000090_RESERVED12(x) (((x) >> 12) & 0xF)
258#define C_000090_RESERVED12 0xFFFF0FFF
259#define S_000090_MCA_INIT_EXECUTED(x) (((x) & 0x1) << 16)
260#define G_000090_MCA_INIT_EXECUTED(x) (((x) >> 16) & 0x1)
261#define C_000090_MCA_INIT_EXECUTED 0xFFFEFFFF
262#define S_000090_MCA_IDLE(x) (((x) & 0x1) << 17)
263#define G_000090_MCA_IDLE(x) (((x) >> 17) & 0x1)
264#define C_000090_MCA_IDLE 0xFFFDFFFF
265#define S_000090_MCA_SEQ_IDLE(x) (((x) & 0x1) << 18)
266#define G_000090_MCA_SEQ_IDLE(x) (((x) >> 18) & 0x1)
267#define C_000090_MCA_SEQ_IDLE 0xFFFBFFFF
268#define S_000090_MCA_ARB_IDLE(x) (((x) & 0x1) << 19)
269#define G_000090_MCA_ARB_IDLE(x) (((x) >> 19) & 0x1)
270#define C_000090_MCA_ARB_IDLE 0xFFF7FFFF
271#define S_000090_RESERVED20(x) (((x) & 0xFFF) << 20)
272#define G_000090_RESERVED20(x) (((x) >> 20) & 0xFFF)
273#define C_000090_RESERVED20 0x000FFFFF
274#define R_000100_MCCFG_FB_LOCATION 0x000100
275#define S_000100_MC_FB_START(x) (((x) & 0xFFFF) << 0)
276#define G_000100_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
277#define C_000100_MC_FB_START 0xFFFF0000
278#define S_000100_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
279#define G_000100_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
280#define C_000100_MC_FB_TOP 0x0000FFFF
281#define R_000104_MC_INIT_MISC_LAT_TIMER 0x000104
282#define S_000104_MC_CPR_INIT_LAT(x) (((x) & 0xF) << 0)
283#define G_000104_MC_CPR_INIT_LAT(x) (((x) >> 0) & 0xF)
284#define C_000104_MC_CPR_INIT_LAT 0xFFFFFFF0
285#define S_000104_MC_VF_INIT_LAT(x) (((x) & 0xF) << 4)
286#define G_000104_MC_VF_INIT_LAT(x) (((x) >> 4) & 0xF)
287#define C_000104_MC_VF_INIT_LAT 0xFFFFFF0F
288#define S_000104_MC_DISP0R_INIT_LAT(x) (((x) & 0xF) << 8)
289#define G_000104_MC_DISP0R_INIT_LAT(x) (((x) >> 8) & 0xF)
290#define C_000104_MC_DISP0R_INIT_LAT 0xFFFFF0FF
291#define S_000104_MC_DISP1R_INIT_LAT(x) (((x) & 0xF) << 12)
292#define G_000104_MC_DISP1R_INIT_LAT(x) (((x) >> 12) & 0xF)
293#define C_000104_MC_DISP1R_INIT_LAT 0xFFFF0FFF
294#define S_000104_MC_FIXED_INIT_LAT(x) (((x) & 0xF) << 16)
295#define G_000104_MC_FIXED_INIT_LAT(x) (((x) >> 16) & 0xF)
296#define C_000104_MC_FIXED_INIT_LAT 0xFFF0FFFF
297#define S_000104_MC_E2R_INIT_LAT(x) (((x) & 0xF) << 20)
298#define G_000104_MC_E2R_INIT_LAT(x) (((x) >> 20) & 0xF)
299#define C_000104_MC_E2R_INIT_LAT 0xFF0FFFFF
300#define S_000104_SAME_PAGE_PRIO(x) (((x) & 0xF) << 24)
301#define G_000104_SAME_PAGE_PRIO(x) (((x) >> 24) & 0xF)
302#define C_000104_SAME_PAGE_PRIO 0xF0FFFFFF
303#define S_000104_MC_GLOBW_INIT_LAT(x) (((x) & 0xF) << 28)
304#define G_000104_MC_GLOBW_INIT_LAT(x) (((x) >> 28) & 0xF)
305#define C_000104_MC_GLOBW_INIT_LAT 0x0FFFFFFF
306
307#endif
diff --git a/drivers/gpu/drm/radeon/rs690r.h b/drivers/gpu/drm/radeon/rs690r.h
deleted file mode 100644
index c0d9faa2175b..000000000000
--- a/drivers/gpu/drm/radeon/rs690r.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef RS690R_H
29#define RS690R_H
30
31/* RS690/RS740 registers */
32#define MC_INDEX 0x0078
33# define MC_INDEX_MASK 0x1FF
34# define MC_INDEX_WR_EN (1 << 9)
35# define MC_INDEX_WR_ACK 0x7F
36#define MC_DATA 0x007C
37#define HDP_FB_LOCATION 0x0134
38#define DC_LB_MEMORY_SPLIT 0x6520
39#define DC_LB_MEMORY_SPLIT_MASK 0x00000003
40#define DC_LB_MEMORY_SPLIT_SHIFT 0
41#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0
42#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1
43#define DC_LB_MEMORY_SPLIT_D1_ONLY 2
44#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3
45#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2)
46#define DC_LB_DISP1_END_ADR_SHIFT 4
47#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0
48#define D1MODE_PRIORITY_A_CNT 0x6548
49#define MODE_PRIORITY_MARK_MASK 0x00007FFF
50#define MODE_PRIORITY_OFF (1 << 16)
51#define MODE_PRIORITY_ALWAYS_ON (1 << 20)
52#define MODE_PRIORITY_FORCE_MASK (1 << 24)
53#define D1MODE_PRIORITY_B_CNT 0x654C
54#define LB_MAX_REQ_OUTSTANDING 0x6D58
55#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F
56#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0
57#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000
58#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16
59#define DCP_CONTROL 0x6C9C
60#define D2MODE_PRIORITY_A_CNT 0x6D48
61#define D2MODE_PRIORITY_B_CNT 0x6D4C
62
63/* MC indirect registers */
64#define MC_STATUS_IDLE (1 << 0)
65#define MC_MISC_CNTL 0x18
66#define DISABLE_GTW (1 << 1)
67#define GART_INDEX_REG_EN (1 << 12)
68#define BLOCK_GFX_D3_EN (1 << 14)
69#define GART_FEATURE_ID 0x2B
70#define HANG_EN (1 << 11)
71#define TLB_ENABLE (1 << 18)
72#define P2P_ENABLE (1 << 19)
73#define GTW_LAC_EN (1 << 25)
74#define LEVEL2_GART (0 << 30)
75#define LEVEL1_GART (1 << 30)
76#define PDC_EN (1 << 31)
77#define GART_BASE 0x2C
78#define GART_CACHE_CNTRL 0x2E
79# define GART_CACHE_INVALIDATE (1 << 0)
80#define MC_STATUS 0x90
81#define MCCFG_FB_LOCATION 0x100
82#define MC_FB_START_MASK 0x0000FFFF
83#define MC_FB_START_SHIFT 0
84#define MC_FB_TOP_MASK 0xFFFF0000
85#define MC_FB_TOP_SHIFT 16
86#define MCCFG_AGP_LOCATION 0x101
87#define MC_AGP_START_MASK 0x0000FFFF
88#define MC_AGP_START_SHIFT 0
89#define MC_AGP_TOP_MASK 0xFFFF0000
90#define MC_AGP_TOP_SHIFT 16
91#define MCCFG_AGP_BASE 0x102
92#define MCCFG_AGP_BASE_2 0x103
93#define MC_INIT_MISC_LAT_TIMER 0x104
94#define MC_DISP0R_INIT_LAT_SHIFT 8
95#define MC_DISP0R_INIT_LAT_MASK 0x00000F00
96#define MC_DISP1R_INIT_LAT_SHIFT 12
97#define MC_DISP1R_INIT_LAT_MASK 0x0000F000
98
99#endif
diff --git a/drivers/gpu/drm/radeon/rv200d.h b/drivers/gpu/drm/radeon/rv200d.h
new file mode 100644
index 000000000000..c5b398330c26
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv200d.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RV200D_H__
29#define __RV200D_H__
30
31#define R_00015C_AGP_BASE_2 0x00015C
32#define S_00015C_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
33#define G_00015C_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
34#define C_00015C_AGP_BASE_ADDR_2 0xFFFFFFF0
35
36#endif
diff --git a/drivers/gpu/drm/radeon/rv250d.h b/drivers/gpu/drm/radeon/rv250d.h
new file mode 100644
index 000000000000..e5a70b06fe1f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv250d.h
@@ -0,0 +1,123 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RV250D_H__
29#define __RV250D_H__
30
31#define R_00000D_SCLK_CNTL_M6 0x00000D
32#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0)
33#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7)
34#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8
35#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3)
36#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1)
37#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7
38#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4)
39#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1)
40#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF
41#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5)
42#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1)
43#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF
44#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6)
45#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1)
46#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF
47#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7)
48#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1)
49#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F
50#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8)
51#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1)
52#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF
53#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9)
54#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1)
55#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF
56#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10)
57#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1)
58#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF
59#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11)
60#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1)
61#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF
62#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12)
63#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1)
64#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF
65#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13)
66#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1)
67#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF
68#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14)
69#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1)
70#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF
71#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15)
72#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1)
73#define C_00000D_FORCE_DISP2 0xFFFF7FFF
74#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16)
75#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1)
76#define C_00000D_FORCE_CP 0xFFFEFFFF
77#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17)
78#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1)
79#define C_00000D_FORCE_HDP 0xFFFDFFFF
80#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18)
81#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1)
82#define C_00000D_FORCE_DISP1 0xFFFBFFFF
83#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19)
84#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1)
85#define C_00000D_FORCE_TOP 0xFFF7FFFF
86#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20)
87#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1)
88#define C_00000D_FORCE_E2 0xFFEFFFFF
89#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21)
90#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1)
91#define C_00000D_FORCE_SE 0xFFDFFFFF
92#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22)
93#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1)
94#define C_00000D_FORCE_IDCT 0xFFBFFFFF
95#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23)
96#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1)
97#define C_00000D_FORCE_VIP 0xFF7FFFFF
98#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24)
99#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1)
100#define C_00000D_FORCE_RE 0xFEFFFFFF
101#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25)
102#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1)
103#define C_00000D_FORCE_PB 0xFDFFFFFF
104#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26)
105#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1)
106#define C_00000D_FORCE_TAM 0xFBFFFFFF
107#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27)
108#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1)
109#define C_00000D_FORCE_TDM 0xF7FFFFFF
110#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28)
111#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1)
112#define C_00000D_FORCE_RB 0xEFFFFFFF
113#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29)
114#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1)
115#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF
116#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30)
117#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1)
118#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF
119#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31)
120#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1)
121#define C_00000D_FORCE_OV0 0x7FFFFFFF
122
123#endif
diff --git a/drivers/gpu/drm/radeon/rv350d.h b/drivers/gpu/drm/radeon/rv350d.h
new file mode 100644
index 000000000000..c75c5ed9e654
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv350d.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RV350D_H__
29#define __RV350D_H__
30
31/* RV350, RV380 registers */
32/* #define R_00000D_SCLK_CNTL 0x00000D */
33#define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21)
34#define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1)
35#define C_00000D_FORCE_VAP 0xFFDFFFFF
36#define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25)
37#define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1)
38#define C_00000D_FORCE_SR 0xFDFFFFFF
39#define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26)
40#define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1)
41#define C_00000D_FORCE_PX 0xFBFFFFFF
42#define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27)
43#define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1)
44#define C_00000D_FORCE_TX 0xF7FFFFFF
45#define S_00000D_FORCE_US(x) (((x) & 0x1) << 28)
46#define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1)
47#define C_00000D_FORCE_US 0xEFFFFFFF
48#define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30)
49#define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1)
50#define C_00000D_FORCE_SU 0xBFFFFFFF
51
52#endif
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index e53b5ca7a253..41a34c23e6d8 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -478,7 +478,7 @@ static int rv515_startup(struct radeon_device *rdev)
478 } 478 }
479 /* Enable IRQ */ 479 /* Enable IRQ */
480 rdev->irq.sw_int = true; 480 rdev->irq.sw_int = true;
481 r100_irq_set(rdev); 481 rs600_irq_set(rdev);
482 /* 1M ring buffer */ 482 /* 1M ring buffer */
483 r = r100_cp_init(rdev, 1024 * 1024); 483 r = r100_cp_init(rdev, 1024 * 1024);
484 if (r) { 484 if (r) {
@@ -520,7 +520,7 @@ int rv515_suspend(struct radeon_device *rdev)
520{ 520{
521 r100_cp_disable(rdev); 521 r100_cp_disable(rdev);
522 r100_wb_disable(rdev); 522 r100_wb_disable(rdev);
523 r100_irq_disable(rdev); 523 rs600_irq_disable(rdev);
524 if (rdev->flags & RADEON_IS_PCIE) 524 if (rdev->flags & RADEON_IS_PCIE)
525 rv370_pcie_gart_disable(rdev); 525 rv370_pcie_gart_disable(rdev);
526 return 0; 526 return 0;
@@ -553,7 +553,6 @@ int rv515_init(struct radeon_device *rdev)
553{ 553{
554 int r; 554 int r;
555 555
556 rdev->new_init_path = true;
557 /* Initialize scratch registers */ 556 /* Initialize scratch registers */
558 radeon_scratch_init(rdev); 557 radeon_scratch_init(rdev);
559 /* Initialize surface registers */ 558 /* Initialize surface registers */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index e0b97d161397..595ac638039d 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -75,7 +75,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
75 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 75 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
76 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 76 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
77 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 77 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
78 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12); 78 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
79 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 79 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
80 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 80 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
81 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 81 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
@@ -126,17 +126,36 @@ void rv770_pcie_gart_fini(struct radeon_device *rdev)
126} 126}
127 127
128 128
129/* 129void rv770_agp_enable(struct radeon_device *rdev)
130 * MC
131 */
132static void rv770_mc_resume(struct radeon_device *rdev)
133{ 130{
134 u32 d1vga_control, d2vga_control; 131 u32 tmp;
135 u32 vga_render_control, vga_hdp_control; 132 int i;
136 u32 d1crtc_control, d2crtc_control; 133
137 u32 new_d1grph_primary, new_d1grph_secondary; 134 /* Setup L2 cache */
138 u32 new_d2grph_primary, new_d2grph_secondary; 135 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
139 u64 old_vram_start; 136 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
137 EFFECTIVE_L2_QUEUE_SIZE(7));
138 WREG32(VM_L2_CNTL2, 0);
139 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
140 /* Setup TLB control */
141 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
142 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
143 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
144 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
145 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
146 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
147 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
148 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
149 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
150 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
151 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
152 for (i = 0; i < 7; i++)
153 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
154}
155
156static void rv770_mc_program(struct radeon_device *rdev)
157{
158 struct rv515_mc_save save;
140 u32 tmp; 159 u32 tmp;
141 int i, j; 160 int i, j;
142 161
@@ -150,53 +169,42 @@ static void rv770_mc_resume(struct radeon_device *rdev)
150 } 169 }
151 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); 170 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
152 171
153 d1vga_control = RREG32(D1VGA_CONTROL); 172 rv515_mc_stop(rdev, &save);
154 d2vga_control = RREG32(D2VGA_CONTROL);
155 vga_render_control = RREG32(VGA_RENDER_CONTROL);
156 vga_hdp_control = RREG32(VGA_HDP_CONTROL);
157 d1crtc_control = RREG32(D1CRTC_CONTROL);
158 d2crtc_control = RREG32(D2CRTC_CONTROL);
159 old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
160 new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS);
161 new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS);
162 new_d1grph_primary += rdev->mc.vram_start - old_vram_start;
163 new_d1grph_secondary += rdev->mc.vram_start - old_vram_start;
164 new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS);
165 new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS);
166 new_d2grph_primary += rdev->mc.vram_start - old_vram_start;
167 new_d2grph_secondary += rdev->mc.vram_start - old_vram_start;
168
169 /* Stop all video */
170 WREG32(D1VGA_CONTROL, 0);
171 WREG32(D2VGA_CONTROL, 0);
172 WREG32(VGA_RENDER_CONTROL, 0);
173 WREG32(D1CRTC_UPDATE_LOCK, 1);
174 WREG32(D2CRTC_UPDATE_LOCK, 1);
175 WREG32(D1CRTC_CONTROL, 0);
176 WREG32(D2CRTC_CONTROL, 0);
177 WREG32(D1CRTC_UPDATE_LOCK, 0);
178 WREG32(D2CRTC_UPDATE_LOCK, 0);
179
180 mdelay(1);
181 if (r600_mc_wait_for_idle(rdev)) { 173 if (r600_mc_wait_for_idle(rdev)) {
182 printk(KERN_WARNING "[drm] MC not idle !\n"); 174 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
183 } 175 }
184
185 /* Lockout access through VGA aperture*/ 176 /* Lockout access through VGA aperture*/
186 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 177 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
187
188 /* Update configuration */ 178 /* Update configuration */
189 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); 179 if (rdev->flags & RADEON_IS_AGP) {
190 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12); 180 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
181 /* VRAM before AGP */
182 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
183 rdev->mc.vram_start >> 12);
184 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
185 rdev->mc.gtt_end >> 12);
186 } else {
187 /* VRAM after AGP */
188 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
189 rdev->mc.gtt_start >> 12);
190 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
191 rdev->mc.vram_end >> 12);
192 }
193 } else {
194 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
195 rdev->mc.vram_start >> 12);
196 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
197 rdev->mc.vram_end >> 12);
198 }
191 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); 199 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
192 tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16; 200 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
193 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 201 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
194 WREG32(MC_VM_FB_LOCATION, tmp); 202 WREG32(MC_VM_FB_LOCATION, tmp);
195 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 203 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
196 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 204 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
197 WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); 205 WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
198 if (rdev->flags & RADEON_IS_AGP) { 206 if (rdev->flags & RADEON_IS_AGP) {
199 WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16); 207 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
200 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); 208 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
201 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); 209 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
202 } else { 210 } else {
@@ -204,31 +212,10 @@ static void rv770_mc_resume(struct radeon_device *rdev)
204 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); 212 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
205 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); 213 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
206 } 214 }
207 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary);
208 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary);
209 WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary);
210 WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary);
211 WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
212
213 /* Unlock host access */
214 WREG32(VGA_HDP_CONTROL, vga_hdp_control);
215
216 mdelay(1);
217 if (r600_mc_wait_for_idle(rdev)) { 215 if (r600_mc_wait_for_idle(rdev)) {
218 printk(KERN_WARNING "[drm] MC not idle !\n"); 216 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
219 } 217 }
220 218 rv515_mc_resume(rdev, &save);
221 /* Restore video state */
222 WREG32(D1CRTC_UPDATE_LOCK, 1);
223 WREG32(D2CRTC_UPDATE_LOCK, 1);
224 WREG32(D1CRTC_CONTROL, d1crtc_control);
225 WREG32(D2CRTC_CONTROL, d2crtc_control);
226 WREG32(D1CRTC_UPDATE_LOCK, 0);
227 WREG32(D2CRTC_UPDATE_LOCK, 0);
228 WREG32(D1VGA_CONTROL, d1vga_control);
229 WREG32(D2VGA_CONTROL, d2vga_control);
230 WREG32(VGA_RENDER_CONTROL, vga_render_control);
231
232 /* we need to own VRAM, so turn off the VGA renderer here 219 /* we need to own VRAM, so turn off the VGA renderer here
233 * to stop it overwriting our objects */ 220 * to stop it overwriting our objects */
234 rv515_vga_render_disable(rdev); 221 rv515_vga_render_disable(rdev);
@@ -840,9 +827,9 @@ int rv770_mc_init(struct radeon_device *rdev)
840 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 827 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
841 } 828 }
842 rdev->mc.vram_start = rdev->mc.vram_location; 829 rdev->mc.vram_start = rdev->mc.vram_location;
843 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size; 830 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
844 rdev->mc.gtt_start = rdev->mc.gtt_location; 831 rdev->mc.gtt_start = rdev->mc.gtt_location;
845 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size; 832 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
846 /* FIXME: we should enforce default clock in case GPU is not in 833 /* FIXME: we should enforce default clock in case GPU is not in
847 * default setup 834 * default setup
848 */ 835 */
@@ -861,11 +848,14 @@ static int rv770_startup(struct radeon_device *rdev)
861{ 848{
862 int r; 849 int r;
863 850
864 radeon_gpu_reset(rdev); 851 rv770_mc_program(rdev);
865 rv770_mc_resume(rdev); 852 if (rdev->flags & RADEON_IS_AGP) {
866 r = rv770_pcie_gart_enable(rdev); 853 rv770_agp_enable(rdev);
867 if (r) 854 } else {
868 return r; 855 r = rv770_pcie_gart_enable(rdev);
856 if (r)
857 return r;
858 }
869 rv770_gpu_init(rdev); 859 rv770_gpu_init(rdev);
870 860
871 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, 861 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
@@ -884,9 +874,8 @@ static int rv770_startup(struct radeon_device *rdev)
884 r = r600_cp_resume(rdev); 874 r = r600_cp_resume(rdev);
885 if (r) 875 if (r)
886 return r; 876 return r;
887 r = r600_wb_init(rdev); 877 /* write back buffer are not vital so don't worry about failure */
888 if (r) 878 r600_wb_enable(rdev);
889 return r;
890 return 0; 879 return 0;
891} 880}
892 881
@@ -894,15 +883,12 @@ int rv770_resume(struct radeon_device *rdev)
894{ 883{
895 int r; 884 int r;
896 885
897 if (radeon_gpu_reset(rdev)) { 886 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
898 /* FIXME: what do we want to do here ? */ 887 * posting will perform necessary task to bring back GPU into good
899 } 888 * shape.
889 */
900 /* post card */ 890 /* post card */
901 if (rdev->is_atom_bios) { 891 atom_asic_init(rdev->mode_info.atom_context);
902 atom_asic_init(rdev->mode_info.atom_context);
903 } else {
904 radeon_combios_asic_init(rdev->ddev);
905 }
906 /* Initialize clocks */ 892 /* Initialize clocks */
907 r = radeon_clocks_init(rdev); 893 r = radeon_clocks_init(rdev);
908 if (r) { 894 if (r) {
@@ -915,7 +901,7 @@ int rv770_resume(struct radeon_device *rdev)
915 return r; 901 return r;
916 } 902 }
917 903
918 r = radeon_ib_test(rdev); 904 r = r600_ib_test(rdev);
919 if (r) { 905 if (r) {
920 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 906 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
921 return r; 907 return r;
@@ -929,8 +915,8 @@ int rv770_suspend(struct radeon_device *rdev)
929 /* FIXME: we should wait for ring to be empty */ 915 /* FIXME: we should wait for ring to be empty */
930 r700_cp_stop(rdev); 916 r700_cp_stop(rdev);
931 rdev->cp.ready = false; 917 rdev->cp.ready = false;
918 r600_wb_disable(rdev);
932 rv770_pcie_gart_disable(rdev); 919 rv770_pcie_gart_disable(rdev);
933
934 /* unpin shaders bo */ 920 /* unpin shaders bo */
935 radeon_object_unpin(rdev->r600_blit.shader_obj); 921 radeon_object_unpin(rdev->r600_blit.shader_obj);
936 return 0; 922 return 0;
@@ -946,7 +932,6 @@ int rv770_init(struct radeon_device *rdev)
946{ 932{
947 int r; 933 int r;
948 934
949 rdev->new_init_path = true;
950 r = radeon_dummy_page_init(rdev); 935 r = radeon_dummy_page_init(rdev);
951 if (r) 936 if (r)
952 return r; 937 return r;
@@ -960,8 +945,10 @@ int rv770_init(struct radeon_device *rdev)
960 return -EINVAL; 945 return -EINVAL;
961 } 946 }
962 /* Must be an ATOMBIOS */ 947 /* Must be an ATOMBIOS */
963 if (!rdev->is_atom_bios) 948 if (!rdev->is_atom_bios) {
949 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
964 return -EINVAL; 950 return -EINVAL;
951 }
965 r = radeon_atombios_init(rdev); 952 r = radeon_atombios_init(rdev);
966 if (r) 953 if (r)
967 return r; 954 return r;
@@ -983,15 +970,8 @@ int rv770_init(struct radeon_device *rdev)
983 if (r) 970 if (r)
984 return r; 971 return r;
985 r = rv770_mc_init(rdev); 972 r = rv770_mc_init(rdev);
986 if (r) { 973 if (r)
987 if (rdev->flags & RADEON_IS_AGP) {
988 /* Retry with disabling AGP */
989 rv770_fini(rdev);
990 rdev->flags &= ~RADEON_IS_AGP;
991 return rv770_init(rdev);
992 }
993 return r; 974 return r;
994 }
995 /* Memory manager */ 975 /* Memory manager */
996 r = radeon_object_init(rdev); 976 r = radeon_object_init(rdev);
997 if (r) 977 if (r)
@@ -1020,12 +1000,10 @@ int rv770_init(struct radeon_device *rdev)
1020 1000
1021 r = rv770_startup(rdev); 1001 r = rv770_startup(rdev);
1022 if (r) { 1002 if (r) {
1023 if (rdev->flags & RADEON_IS_AGP) { 1003 rv770_suspend(rdev);
1024 /* Retry with disabling AGP */ 1004 r600_wb_fini(rdev);
1025 rv770_fini(rdev); 1005 radeon_ring_fini(rdev);
1026 rdev->flags &= ~RADEON_IS_AGP; 1006 rv770_pcie_gart_fini(rdev);
1027 return rv770_init(rdev);
1028 }
1029 rdev->accel_working = false; 1007 rdev->accel_working = false;
1030 } 1008 }
1031 if (rdev->accel_working) { 1009 if (rdev->accel_working) {
@@ -1034,7 +1012,7 @@ int rv770_init(struct radeon_device *rdev)
1034 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); 1012 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
1035 rdev->accel_working = false; 1013 rdev->accel_working = false;
1036 } 1014 }
1037 r = radeon_ib_test(rdev); 1015 r = r600_ib_test(rdev);
1038 if (r) { 1016 if (r) {
1039 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1017 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1040 rdev->accel_working = false; 1018 rdev->accel_working = false;
@@ -1049,20 +1027,15 @@ void rv770_fini(struct radeon_device *rdev)
1049 1027
1050 r600_blit_fini(rdev); 1028 r600_blit_fini(rdev);
1051 radeon_ring_fini(rdev); 1029 radeon_ring_fini(rdev);
1030 r600_wb_fini(rdev);
1052 rv770_pcie_gart_fini(rdev); 1031 rv770_pcie_gart_fini(rdev);
1053 radeon_gem_fini(rdev); 1032 radeon_gem_fini(rdev);
1054 radeon_fence_driver_fini(rdev); 1033 radeon_fence_driver_fini(rdev);
1055 radeon_clocks_fini(rdev); 1034 radeon_clocks_fini(rdev);
1056#if __OS_HAS_AGP
1057 if (rdev->flags & RADEON_IS_AGP) 1035 if (rdev->flags & RADEON_IS_AGP)
1058 radeon_agp_fini(rdev); 1036 radeon_agp_fini(rdev);
1059#endif
1060 radeon_object_fini(rdev); 1037 radeon_object_fini(rdev);
1061 if (rdev->is_atom_bios) { 1038 radeon_atombios_fini(rdev);
1062 radeon_atombios_fini(rdev);
1063 } else {
1064 radeon_combios_fini(rdev);
1065 }
1066 kfree(rdev->bios); 1039 kfree(rdev->bios);
1067 rdev->bios = NULL; 1040 rdev->bios = NULL;
1068 radeon_dummy_page_fini(rdev); 1041 radeon_dummy_page_fini(rdev);
diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
index 541744d00d3e..b17007178a36 100644
--- a/drivers/gpu/drm/ttm/ttm_global.c
+++ b/drivers/gpu/drm/ttm/ttm_global.c
@@ -82,8 +82,8 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
82 if (unlikely(ret != 0)) 82 if (unlikely(ret != 0))
83 goto out_err; 83 goto out_err;
84 84
85 ++item->refcount;
86 } 85 }
86 ++item->refcount;
87 ref->object = item->object; 87 ref->object = item->object;
88 object = item->object; 88 object = item->object;
89 mutex_unlock(&item->mutex); 89 mutex_unlock(&item->mutex);
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 0c6639ea03dd..ba05275e5104 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -30,6 +30,7 @@
30#include <linux/major.h> 30#include <linux/major.h>
31#include <linux/hid.h> 31#include <linux/hid.h>
32#include <linux/mutex.h> 32#include <linux/mutex.h>
33#include <linux/sched.h>
33#include <linux/smp_lock.h> 34#include <linux/smp_lock.h>
34 35
35#include <linux/hidraw.h> 36#include <linux/hidraw.h>
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index ea955edde87e..2a7a85a6dc36 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -915,7 +915,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
915 return ret; 915 return ret;
916} 916}
917 917
918static struct file_operations watchdog_fops = { 918static const struct file_operations watchdog_fops = {
919 .owner = THIS_MODULE, 919 .owner = THIS_MODULE,
920 .llseek = no_llseek, 920 .llseek = no_llseek,
921 .open = watchdog_open, 921 .open = watchdog_open,
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index ecd739534f6a..82b16808a274 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -83,7 +83,8 @@ static int __devexit lis302dl_spi_remove(struct spi_device *spi)
83 struct lis3lv02d *lis3 = spi_get_drvdata(spi); 83 struct lis3lv02d *lis3 = spi_get_drvdata(spi);
84 lis3lv02d_joystick_disable(); 84 lis3lv02d_joystick_disable();
85 lis3lv02d_poweroff(lis3); 85 lis3lv02d_poweroff(lis3);
86 return 0; 86
87 return lis3lv02d_remove_fs(&lis3_dev);
87} 88}
88 89
89#ifdef CONFIG_PM 90#ifdef CONFIG_PM
diff --git a/drivers/hwmon/ltc4215.c b/drivers/hwmon/ltc4215.c
index 6c9a04136e0a..00d975eb5b83 100644
--- a/drivers/hwmon/ltc4215.c
+++ b/drivers/hwmon/ltc4215.c
@@ -20,11 +20,6 @@
20#include <linux/hwmon.h> 20#include <linux/hwmon.h>
21#include <linux/hwmon-sysfs.h> 21#include <linux/hwmon-sysfs.h>
22 22
23static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
24
25/* Insmod parameters */
26I2C_CLIENT_INSMOD_1(ltc4215);
27
28/* Here are names of the chip's registers (a.k.a. commands) */ 23/* Here are names of the chip's registers (a.k.a. commands) */
29enum ltc4215_cmd { 24enum ltc4215_cmd {
30 LTC4215_CONTROL = 0x00, /* rw */ 25 LTC4215_CONTROL = 0x00, /* rw */
@@ -246,9 +241,13 @@ static const struct attribute_group ltc4215_group = {
246static int ltc4215_probe(struct i2c_client *client, 241static int ltc4215_probe(struct i2c_client *client,
247 const struct i2c_device_id *id) 242 const struct i2c_device_id *id)
248{ 243{
244 struct i2c_adapter *adapter = client->adapter;
249 struct ltc4215_data *data; 245 struct ltc4215_data *data;
250 int ret; 246 int ret;
251 247
248 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
249 return -ENODEV;
250
252 data = kzalloc(sizeof(*data), GFP_KERNEL); 251 data = kzalloc(sizeof(*data), GFP_KERNEL);
253 if (!data) { 252 if (!data) {
254 ret = -ENOMEM; 253 ret = -ENOMEM;
@@ -294,56 +293,20 @@ static int ltc4215_remove(struct i2c_client *client)
294 return 0; 293 return 0;
295} 294}
296 295
297static int ltc4215_detect(struct i2c_client *client,
298 int kind,
299 struct i2c_board_info *info)
300{
301 struct i2c_adapter *adapter = client->adapter;
302
303 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
304 return -ENODEV;
305
306 if (kind < 0) { /* probed detection - check the chip type */
307 s32 v; /* 8 bits from the chip, or -ERRNO */
308
309 /*
310 * Register 0x01 bit b7 is reserved, expect 0
311 * Register 0x03 bit b6 and b7 are reserved, expect 0
312 */
313 v = i2c_smbus_read_byte_data(client, LTC4215_ALERT);
314 if (v < 0 || (v & (1 << 7)) != 0)
315 return -ENODEV;
316
317 v = i2c_smbus_read_byte_data(client, LTC4215_FAULT);
318 if (v < 0 || (v & ((1 << 6) | (1 << 7))) != 0)
319 return -ENODEV;
320 }
321
322 strlcpy(info->type, "ltc4215", I2C_NAME_SIZE);
323 dev_info(&adapter->dev, "ltc4215 %s at address 0x%02x\n",
324 kind < 0 ? "probed" : "forced",
325 client->addr);
326
327 return 0;
328}
329
330static const struct i2c_device_id ltc4215_id[] = { 296static const struct i2c_device_id ltc4215_id[] = {
331 { "ltc4215", ltc4215 }, 297 { "ltc4215", 0 },
332 { } 298 { }
333}; 299};
334MODULE_DEVICE_TABLE(i2c, ltc4215_id); 300MODULE_DEVICE_TABLE(i2c, ltc4215_id);
335 301
336/* This is the driver that will be inserted */ 302/* This is the driver that will be inserted */
337static struct i2c_driver ltc4215_driver = { 303static struct i2c_driver ltc4215_driver = {
338 .class = I2C_CLASS_HWMON,
339 .driver = { 304 .driver = {
340 .name = "ltc4215", 305 .name = "ltc4215",
341 }, 306 },
342 .probe = ltc4215_probe, 307 .probe = ltc4215_probe,
343 .remove = ltc4215_remove, 308 .remove = ltc4215_remove,
344 .id_table = ltc4215_id, 309 .id_table = ltc4215_id,
345 .detect = ltc4215_detect,
346 .address_data = &addr_data,
347}; 310};
348 311
349static int __init ltc4215_init(void) 312static int __init ltc4215_init(void)
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index e38964333612..65c232a9d0c5 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -22,15 +22,6 @@
22#include <linux/hwmon.h> 22#include <linux/hwmon.h>
23#include <linux/hwmon-sysfs.h> 23#include <linux/hwmon-sysfs.h>
24 24
25/* Valid addresses are 0x20 - 0x3f
26 *
27 * For now, we do not probe, since some of these addresses
28 * are known to be unfriendly to probing */
29static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
30
31/* Insmod parameters */
32I2C_CLIENT_INSMOD_1(ltc4245);
33
34/* Here are names of the chip's registers (a.k.a. commands) */ 25/* Here are names of the chip's registers (a.k.a. commands) */
35enum ltc4245_cmd { 26enum ltc4245_cmd {
36 LTC4245_STATUS = 0x00, /* readonly */ 27 LTC4245_STATUS = 0x00, /* readonly */
@@ -369,9 +360,13 @@ static const struct attribute_group ltc4245_group = {
369static int ltc4245_probe(struct i2c_client *client, 360static int ltc4245_probe(struct i2c_client *client,
370 const struct i2c_device_id *id) 361 const struct i2c_device_id *id)
371{ 362{
363 struct i2c_adapter *adapter = client->adapter;
372 struct ltc4245_data *data; 364 struct ltc4245_data *data;
373 int ret; 365 int ret;
374 366
367 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
368 return -ENODEV;
369
375 data = kzalloc(sizeof(*data), GFP_KERNEL); 370 data = kzalloc(sizeof(*data), GFP_KERNEL);
376 if (!data) { 371 if (!data) {
377 ret = -ENOMEM; 372 ret = -ENOMEM;
@@ -418,136 +413,20 @@ static int ltc4245_remove(struct i2c_client *client)
418 return 0; 413 return 0;
419} 414}
420 415
421/* Check that some bits in a control register appear at all possible
422 * locations without changing value
423 *
424 * @client: the i2c client to use
425 * @reg: the register to read
426 * @bits: the bits to check (0xff checks all bits,
427 * 0x03 checks only the last two bits)
428 *
429 * return -ERRNO if the register read failed
430 * return -ENODEV if the register value doesn't stay constant at all
431 * possible addresses
432 *
433 * return 0 for success
434 */
435static int ltc4245_check_control_reg(struct i2c_client *client, u8 reg, u8 bits)
436{
437 int i;
438 s32 v, voff1, voff2;
439
440 /* Read register and check for error */
441 v = i2c_smbus_read_byte_data(client, reg);
442 if (v < 0)
443 return v;
444
445 v &= bits;
446
447 for (i = 0x00; i < 0xff; i += 0x20) {
448
449 voff1 = i2c_smbus_read_byte_data(client, reg + i);
450 if (voff1 < 0)
451 return voff1;
452
453 voff2 = i2c_smbus_read_byte_data(client, reg + i + 0x08);
454 if (voff2 < 0)
455 return voff2;
456
457 voff1 &= bits;
458 voff2 &= bits;
459
460 if (v != voff1 || v != voff2)
461 return -ENODEV;
462 }
463
464 return 0;
465}
466
467static int ltc4245_detect(struct i2c_client *client,
468 int kind,
469 struct i2c_board_info *info)
470{
471 struct i2c_adapter *adapter = client->adapter;
472
473 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
474 return -ENODEV;
475
476 if (kind < 0) { /* probed detection - check the chip type */
477 s32 v; /* 8 bits from the chip, or -ERRNO */
478
479 /* Chip registers 0x00-0x07 are control registers
480 * Chip registers 0x10-0x1f are data registers
481 *
482 * Address bits b7-b5 are ignored. This makes the chip "repeat"
483 * in steps of 0x20. Any control registers should appear with
484 * the same values across all duplicated addresses.
485 *
486 * Register 0x02 bit b2 is reserved, expect 0
487 * Register 0x07 bits b7 to b4 are reserved, expect 0
488 *
489 * Registers 0x01, 0x02 are control registers and should not
490 * change on their own.
491 *
492 * Register 0x06 bits b6 and b7 are control bits, and should
493 * not change on their own.
494 *
495 * Register 0x07 bits b3 to b0 are control bits, and should
496 * not change on their own.
497 */
498
499 /* read register 0x02 reserved bit, expect 0 */
500 v = i2c_smbus_read_byte_data(client, LTC4245_CONTROL);
501 if (v < 0 || (v & 0x04) != 0)
502 return -ENODEV;
503
504 /* read register 0x07 reserved bits, expect 0 */
505 v = i2c_smbus_read_byte_data(client, LTC4245_ADCADR);
506 if (v < 0 || (v & 0xf0) != 0)
507 return -ENODEV;
508
509 /* check that the alert register appears at all locations */
510 if (ltc4245_check_control_reg(client, LTC4245_ALERT, 0xff))
511 return -ENODEV;
512
513 /* check that the control register appears at all locations */
514 if (ltc4245_check_control_reg(client, LTC4245_CONTROL, 0xff))
515 return -ENODEV;
516
517 /* check that register 0x06 bits b6 and b7 stay constant */
518 if (ltc4245_check_control_reg(client, LTC4245_GPIO, 0xc0))
519 return -ENODEV;
520
521 /* check that register 0x07 bits b3-b0 stay constant */
522 if (ltc4245_check_control_reg(client, LTC4245_ADCADR, 0x0f))
523 return -ENODEV;
524 }
525
526 strlcpy(info->type, "ltc4245", I2C_NAME_SIZE);
527 dev_info(&adapter->dev, "ltc4245 %s at address 0x%02x\n",
528 kind < 0 ? "probed" : "forced",
529 client->addr);
530
531 return 0;
532}
533
534static const struct i2c_device_id ltc4245_id[] = { 416static const struct i2c_device_id ltc4245_id[] = {
535 { "ltc4245", ltc4245 }, 417 { "ltc4245", 0 },
536 { } 418 { }
537}; 419};
538MODULE_DEVICE_TABLE(i2c, ltc4245_id); 420MODULE_DEVICE_TABLE(i2c, ltc4245_id);
539 421
540/* This is the driver that will be inserted */ 422/* This is the driver that will be inserted */
541static struct i2c_driver ltc4245_driver = { 423static struct i2c_driver ltc4245_driver = {
542 .class = I2C_CLASS_HWMON,
543 .driver = { 424 .driver = {
544 .name = "ltc4245", 425 .name = "ltc4245",
545 }, 426 },
546 .probe = ltc4245_probe, 427 .probe = ltc4245_probe,
547 .remove = ltc4245_remove, 428 .remove = ltc4245_remove,
548 .id_table = ltc4245_id, 429 .id_table = ltc4245_id,
549 .detect = ltc4245_detect,
550 .address_data = &addr_data,
551}; 430};
552 431
553static int __init ltc4245_init(void) 432static int __init ltc4245_init(void)
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index f7d6fe9c49ba..8f0b90ef8c76 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -364,7 +364,7 @@ static int __devinit amd756_probe(struct pci_dev *pdev,
364 error = acpi_check_region(amd756_ioport, SMB_IOSIZE, 364 error = acpi_check_region(amd756_ioport, SMB_IOSIZE,
365 amd756_driver.name); 365 amd756_driver.name);
366 if (error) 366 if (error)
367 return error; 367 return -ENODEV;
368 368
369 if (!request_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name)) { 369 if (!request_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name)) {
370 dev_err(&pdev->dev, "SMB region 0x%x already in use!\n", 370 dev_err(&pdev->dev, "SMB region 0x%x already in use!\n",
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index a7c59908c457..5b4ad86ca166 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -376,8 +376,10 @@ static int __devinit amd8111_probe(struct pci_dev *dev,
376 smbus->size = pci_resource_len(dev, 0); 376 smbus->size = pci_resource_len(dev, 0);
377 377
378 error = acpi_check_resource_conflict(&dev->resource[0]); 378 error = acpi_check_resource_conflict(&dev->resource[0]);
379 if (error) 379 if (error) {
380 error = -ENODEV;
380 goto out_kfree; 381 goto out_kfree;
382 }
381 383
382 if (!request_region(smbus->base, smbus->size, amd8111_driver.name)) { 384 if (!request_region(smbus->base, smbus->size, amd8111_driver.name)) {
383 error = -EBUSY; 385 error = -EBUSY;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 9d2c5adf5d4f..55edcfe5b851 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -732,8 +732,10 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
732 } 732 }
733 733
734 err = acpi_check_resource_conflict(&dev->resource[SMBBAR]); 734 err = acpi_check_resource_conflict(&dev->resource[SMBBAR]);
735 if (err) 735 if (err) {
736 err = -ENODEV;
736 goto exit; 737 goto exit;
738 }
737 739
738 err = pci_request_region(dev, SMBBAR, i801_driver.name); 740 err = pci_request_region(dev, SMBBAR, i801_driver.name);
739 if (err) { 741 if (err) {
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 9f6b8e0f8632..dba6eb053e2f 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -281,7 +281,7 @@ static int __devinit sch_probe(struct pci_dev *dev,
281 return -ENODEV; 281 return -ENODEV;
282 } 282 }
283 if (acpi_check_region(sch_smba, SMBIOSIZE, sch_driver.name)) 283 if (acpi_check_region(sch_smba, SMBIOSIZE, sch_driver.name))
284 return -EBUSY; 284 return -ENODEV;
285 if (!request_region(sch_smba, SMBIOSIZE, sch_driver.name)) { 285 if (!request_region(sch_smba, SMBIOSIZE, sch_driver.name)) {
286 dev_err(&dev->dev, "SMBus region 0x%x already in use!\n", 286 dev_err(&dev->dev, "SMBus region 0x%x already in use!\n",
287 sch_smba); 287 sch_smba);
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index a782c7a08f9e..d26a972aacaa 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -169,7 +169,7 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
169 } 169 }
170 170
171 if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) 171 if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
172 return -EBUSY; 172 return -ENODEV;
173 173
174 if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { 174 if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) {
175 dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", 175 dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n",
@@ -260,7 +260,7 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
260 260
261 piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0; 261 piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
262 if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) 262 if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
263 return -EBUSY; 263 return -ENODEV;
264 264
265 if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { 265 if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) {
266 dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", 266 dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n",
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 8295885b2fdb..1649963b00dc 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -280,7 +280,7 @@ static int __devinit sis96x_probe(struct pci_dev *dev,
280 280
281 retval = acpi_check_resource_conflict(&dev->resource[SIS96x_BAR]); 281 retval = acpi_check_resource_conflict(&dev->resource[SIS96x_BAR]);
282 if (retval) 282 if (retval)
283 return retval; 283 return -ENODEV;
284 284
285 /* Everything is happy, let's grab the memory and set things up. */ 285 /* Everything is happy, let's grab the memory and set things up. */
286 if (!request_region(sis96x_smbus_base, SMB_IOSIZE, 286 if (!request_region(sis96x_smbus_base, SMB_IOSIZE,
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 54d810a4d00f..e4b1543015af 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -365,7 +365,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
365found: 365found:
366 error = acpi_check_region(vt596_smba, 8, vt596_driver.name); 366 error = acpi_check_region(vt596_smba, 8, vt596_driver.name);
367 if (error) 367 if (error)
368 return error; 368 return -ENODEV;
369 369
370 if (!request_region(vt596_smba, 8, vt596_driver.name)) { 370 if (!request_region(vt596_smba, 8, vt596_driver.name)) {
371 dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n", 371 dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n",
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 28d09a5d8450..017c09540c2f 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -273,14 +273,8 @@ static const struct ide_proc_devset ide_generic_settings[] = {
273 273
274static void proc_ide_settings_warn(void) 274static void proc_ide_settings_warn(void)
275{ 275{
276 static int warned; 276 printk_once(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is "
277
278 if (warned)
279 return;
280
281 printk(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is "
282 "obsolete, and will be removed soon!\n"); 277 "obsolete, and will be removed soon!\n");
283 warned = 1;
284} 278}
285 279
286static int ide_settings_proc_show(struct seq_file *m, void *v) 280static int ide_settings_proc_show(struct seq_file *m, void *v)
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
index afca22beaadf..3b88eba04c9c 100644
--- a/drivers/ide/sis5513.c
+++ b/drivers/ide/sis5513.c
@@ -2,7 +2,7 @@
2 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> 2 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
3 * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer 3 * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer
4 * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz> 4 * Copyright (C) 2003 Vojtech Pavlik <vojtech@suse.cz>
5 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz 5 * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz
6 * 6 *
7 * May be copied or modified under the terms of the GNU General Public License 7 * May be copied or modified under the terms of the GNU General Public License
8 * 8 *
@@ -281,11 +281,13 @@ static void config_drive_art_rwp(ide_drive_t *drive)
281 281
282 pci_read_config_byte(dev, 0x4b, &reg4bh); 282 pci_read_config_byte(dev, 0x4b, &reg4bh);
283 283
284 rw_prefetch = reg4bh & ~(0x11 << drive->dn);
285
284 if (drive->media == ide_disk) 286 if (drive->media == ide_disk)
285 rw_prefetch = 0x11 << drive->dn; 287 rw_prefetch |= 0x11 << drive->dn;
286 288
287 if ((reg4bh & (0x11 << drive->dn)) != rw_prefetch) 289 if (reg4bh != rw_prefetch)
288 pci_write_config_byte(dev, 0x4b, reg4bh|rw_prefetch); 290 pci_write_config_byte(dev, 0x4b, rw_prefetch);
289} 291}
290 292
291static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio) 293static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio)
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 51bd9669cb1f..f504c9b00c1b 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -38,6 +38,7 @@
38#include <linux/device.h> 38#include <linux/device.h>
39#include <linux/err.h> 39#include <linux/err.h>
40#include <linux/poll.h> 40#include <linux/poll.h>
41#include <linux/sched.h>
41#include <linux/file.h> 42#include <linux/file.h>
42#include <linux/mount.h> 43#include <linux/mount.h>
43#include <linux/cdev.h> 44#include <linux/cdev.h>
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 8c46f2257098..7de02969ed7d 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -44,6 +44,7 @@
44#include <linux/mutex.h> 44#include <linux/mutex.h>
45#include <linux/kref.h> 45#include <linux/kref.h>
46#include <linux/compat.h> 46#include <linux/compat.h>
47#include <linux/sched.h>
47#include <linux/semaphore.h> 48#include <linux/semaphore.h>
48 49
49#include <asm/uaccess.h> 50#include <asm/uaccess.h>
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index d3fff9e008a3..aec0fbdfe7f0 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -40,6 +40,7 @@
40#include <linux/err.h> 40#include <linux/err.h>
41#include <linux/fs.h> 41#include <linux/fs.h>
42#include <linux/poll.h> 42#include <linux/poll.h>
43#include <linux/sched.h>
43#include <linux/file.h> 44#include <linux/file.h>
44#include <linux/mount.h> 45#include <linux/mount.h>
45#include <linux/cdev.h> 46#include <linux/cdev.h>
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 1148140d08a1..dee6706038aa 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -13,6 +13,7 @@
13#define EVDEV_BUFFER_SIZE 64 13#define EVDEV_BUFFER_SIZE 64
14 14
15#include <linux/poll.h> 15#include <linux/poll.h>
16#include <linux/sched.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/init.h> 19#include <linux/init.h>
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e828aab7dace..c6f88ebb40c7 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -17,6 +17,7 @@
17#include <linux/random.h> 17#include <linux/random.h>
18#include <linux/major.h> 18#include <linux/major.h>
19#include <linux/proc_fs.h> 19#include <linux/proc_fs.h>
20#include <linux/sched.h>
20#include <linux/seq_file.h> 21#include <linux/seq_file.h>
21#include <linux/poll.h> 22#include <linux/poll.h>
22#include <linux/device.h> 23#include <linux/device.h>
@@ -1273,6 +1274,7 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1273 } \ 1274 } \
1274 } while (0) 1275 } while (0)
1275 1276
1277#ifdef CONFIG_PM
1276static void input_dev_reset(struct input_dev *dev, bool activate) 1278static void input_dev_reset(struct input_dev *dev, bool activate)
1277{ 1279{
1278 if (!dev->event) 1280 if (!dev->event)
@@ -1287,7 +1289,6 @@ static void input_dev_reset(struct input_dev *dev, bool activate)
1287 } 1289 }
1288} 1290}
1289 1291
1290#ifdef CONFIG_PM
1291static int input_dev_suspend(struct device *dev) 1292static int input_dev_suspend(struct device *dev)
1292{ 1293{
1293 struct input_dev *input_dev = to_input_dev(dev); 1294 struct input_dev *input_dev = to_input_dev(dev);
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 901b2525993e..b1bd6dd32286 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -18,6 +18,7 @@
18#include <linux/input.h> 18#include <linux/input.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/major.h> 20#include <linux/major.h>
21#include <linux/sched.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
23#include <linux/miscdevice.h> 24#include <linux/miscdevice.h>
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index c5a49aba418f..d3f57245420a 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -30,6 +30,7 @@
30 * - first public version 30 * - first public version
31 */ 31 */
32#include <linux/poll.h> 32#include <linux/poll.h>
33#include <linux/sched.h>
33#include <linux/slab.h> 34#include <linux/slab.h>
34#include <linux/module.h> 35#include <linux/module.h>
35#include <linux/init.h> 36#include <linux/init.h>
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 966b8868f792..a13d80f7da17 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -13,6 +13,7 @@
13#define MOUSEDEV_MINORS 32 13#define MOUSEDEV_MINORS 32
14#define MOUSEDEV_MIX 31 14#define MOUSEDEV_MIX 31
15 15
16#include <linux/sched.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/smp_lock.h> 18#include <linux/smp_lock.h>
18#include <linux/poll.h> 19#include <linux/poll.h>
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 2d8352419c0d..65bf91e16a42 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -603,7 +603,7 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
603 603
604 if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_CONF) { 604 if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_CONF) {
605 u16 info = CAPIMSG_U16(skb->data, 12); // Info field 605 u16 info = CAPIMSG_U16(skb->data, 12); // Info field
606 if (info == 0) { 606 if ((info & 0xff00) == 0) {
607 mutex_lock(&cdev->ncci_list_mtx); 607 mutex_lock(&cdev->ncci_list_mtx);
608 capincci_alloc(cdev, CAPIMSG_NCCI(skb->data)); 608 capincci_alloc(cdev, CAPIMSG_NCCI(skb->data));
609 mutex_unlock(&cdev->ncci_list_mtx); 609 mutex_unlock(&cdev->ncci_list_mtx);
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 650120261abf..3e6d17f42a98 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -40,7 +40,7 @@ static int debugmode = 0;
40MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux"); 40MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux");
41MODULE_AUTHOR("Carsten Paeth"); 41MODULE_AUTHOR("Carsten Paeth");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43module_param(debugmode, uint, 0); 43module_param(debugmode, uint, S_IRUGO|S_IWUSR);
44 44
45/* -------- type definitions ----------------------------------------- */ 45/* -------- type definitions ----------------------------------------- */
46 46
@@ -671,8 +671,8 @@ static void n0(capidrv_contr * card, capidrv_ncci * ncci)
671 NULL, /* Useruserdata */ /* $$$$ */ 671 NULL, /* Useruserdata */ /* $$$$ */
672 NULL /* Facilitydataarray */ 672 NULL /* Facilitydataarray */
673 ); 673 );
674 send_message(card, &cmsg);
675 plci_change_state(card, ncci->plcip, EV_PLCI_DISCONNECT_REQ); 674 plci_change_state(card, ncci->plcip, EV_PLCI_DISCONNECT_REQ);
675 send_message(card, &cmsg);
676 676
677 cmd.command = ISDN_STAT_BHUP; 677 cmd.command = ISDN_STAT_BHUP;
678 cmd.driver = card->myid; 678 cmd.driver = card->myid;
@@ -924,8 +924,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg)
924 */ 924 */
925 capi_cmsg_answer(cmsg); 925 capi_cmsg_answer(cmsg);
926 cmsg->Reject = 1; /* ignore */ 926 cmsg->Reject = 1; /* ignore */
927 send_message(card, cmsg);
928 plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); 927 plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
928 send_message(card, cmsg);
929 printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s ignored\n", 929 printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s ignored\n",
930 card->contrnr, 930 card->contrnr,
931 cmd.parm.setup.phone, 931 cmd.parm.setup.phone,
@@ -974,8 +974,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg)
974 case 2: /* Call will be rejected. */ 974 case 2: /* Call will be rejected. */
975 capi_cmsg_answer(cmsg); 975 capi_cmsg_answer(cmsg);
976 cmsg->Reject = 2; /* reject call, normal call clearing */ 976 cmsg->Reject = 2; /* reject call, normal call clearing */
977 send_message(card, cmsg);
978 plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); 977 plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
978 send_message(card, cmsg);
979 break; 979 break;
980 980
981 default: 981 default:
@@ -983,8 +983,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg)
983 capi_cmsg_answer(cmsg); 983 capi_cmsg_answer(cmsg);
984 cmsg->Reject = 8; /* reject call, 984 cmsg->Reject = 8; /* reject call,
985 destination out of order */ 985 destination out of order */
986 send_message(card, cmsg);
987 plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); 986 plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT);
987 send_message(card, cmsg);
988 break; 988 break;
989 } 989 }
990 return; 990 return;
@@ -1020,8 +1020,8 @@ static void handle_plci(_cmsg * cmsg)
1020 card->bchans[plcip->chan].disconnecting = 1; 1020 card->bchans[plcip->chan].disconnecting = 1;
1021 plci_change_state(card, plcip, EV_PLCI_DISCONNECT_IND); 1021 plci_change_state(card, plcip, EV_PLCI_DISCONNECT_IND);
1022 capi_cmsg_answer(cmsg); 1022 capi_cmsg_answer(cmsg);
1023 send_message(card, cmsg);
1024 plci_change_state(card, plcip, EV_PLCI_DISCONNECT_RESP); 1023 plci_change_state(card, plcip, EV_PLCI_DISCONNECT_RESP);
1024 send_message(card, cmsg);
1025 break; 1025 break;
1026 1026
1027 case CAPI_DISCONNECT_CONF: /* plci */ 1027 case CAPI_DISCONNECT_CONF: /* plci */
@@ -1078,8 +1078,8 @@ static void handle_plci(_cmsg * cmsg)
1078 1078
1079 if (card->bchans[plcip->chan].incoming) { 1079 if (card->bchans[plcip->chan].incoming) {
1080 capi_cmsg_answer(cmsg); 1080 capi_cmsg_answer(cmsg);
1081 send_message(card, cmsg);
1082 plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND); 1081 plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND);
1082 send_message(card, cmsg);
1083 } else { 1083 } else {
1084 capidrv_ncci *nccip; 1084 capidrv_ncci *nccip;
1085 capi_cmsg_answer(cmsg); 1085 capi_cmsg_answer(cmsg);
@@ -1098,13 +1098,14 @@ static void handle_plci(_cmsg * cmsg)
1098 NULL /* NCPI */ 1098 NULL /* NCPI */
1099 ); 1099 );
1100 nccip->msgid = cmsg->Messagenumber; 1100 nccip->msgid = cmsg->Messagenumber;
1101 plci_change_state(card, plcip,
1102 EV_PLCI_CONNECT_ACTIVE_IND);
1103 ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ);
1101 send_message(card, cmsg); 1104 send_message(card, cmsg);
1102 cmd.command = ISDN_STAT_DCONN; 1105 cmd.command = ISDN_STAT_DCONN;
1103 cmd.driver = card->myid; 1106 cmd.driver = card->myid;
1104 cmd.arg = plcip->chan; 1107 cmd.arg = plcip->chan;
1105 card->interface.statcallb(&cmd); 1108 card->interface.statcallb(&cmd);
1106 plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND);
1107 ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ);
1108 } 1109 }
1109 break; 1110 break;
1110 1111
@@ -1193,8 +1194,8 @@ static void handle_ncci(_cmsg * cmsg)
1193 goto notfound; 1194 goto notfound;
1194 1195
1195 capi_cmsg_answer(cmsg); 1196 capi_cmsg_answer(cmsg);
1196 send_message(card, cmsg);
1197 ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_ACTIVE_IND); 1197 ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_ACTIVE_IND);
1198 send_message(card, cmsg);
1198 1199
1199 cmd.command = ISDN_STAT_BCONN; 1200 cmd.command = ISDN_STAT_BCONN;
1200 cmd.driver = card->myid; 1201 cmd.driver = card->myid;
@@ -1222,8 +1223,8 @@ static void handle_ncci(_cmsg * cmsg)
1222 0, /* Reject */ 1223 0, /* Reject */
1223 NULL /* NCPI */ 1224 NULL /* NCPI */
1224 ); 1225 );
1225 send_message(card, cmsg);
1226 ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_RESP); 1226 ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_RESP);
1227 send_message(card, cmsg);
1227 break; 1228 break;
1228 } 1229 }
1229 printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr); 1230 printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr);
@@ -1299,8 +1300,8 @@ static void handle_ncci(_cmsg * cmsg)
1299 card->bchans[nccip->chan].disconnecting = 1; 1300 card->bchans[nccip->chan].disconnecting = 1;
1300 ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_IND); 1301 ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_IND);
1301 capi_cmsg_answer(cmsg); 1302 capi_cmsg_answer(cmsg);
1302 send_message(card, cmsg);
1303 ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_RESP); 1303 ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_RESP);
1304 send_message(card, cmsg);
1304 break; 1305 break;
1305 1306
1306 case CAPI_DISCONNECT_B3_CONF: /* ncci */ 1307 case CAPI_DISCONNECT_B3_CONF: /* ncci */
@@ -2014,8 +2015,8 @@ static void send_listen(capidrv_contr *card)
2014 card->cipmask, 2015 card->cipmask,
2015 card->cipmask2, 2016 card->cipmask2,
2016 NULL, NULL); 2017 NULL, NULL);
2017 send_message(card, &cmdcmsg);
2018 listen_change_state(card, EV_LISTEN_REQ); 2018 listen_change_state(card, EV_LISTEN_REQ);
2019 send_message(card, &cmdcmsg);
2019} 2020}
2020 2021
2021static void listentimerfunc(unsigned long x) 2022static void listentimerfunc(unsigned long x)
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 8b256a617c8a..3697c409bec6 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -16,6 +16,7 @@
16#else 16#else
17#include <linux/fs.h> 17#include <linux/fs.h>
18#endif 18#endif
19#include <linux/sched.h>
19#include <linux/isdnif.h> 20#include <linux/isdnif.h>
20#include <net/net_namespace.h> 21#include <net/net_namespace.h>
21#include "isdn_divert.h" 22#include "isdn_divert.h"
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index 234cc5d53312..44a58e6f8f65 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -334,7 +334,14 @@ static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes,
334 return startbytes - numbytes; 334 return startbytes - numbytes;
335} 335}
336 336
337/* process a block of data received from the device 337/**
338 * gigaset_m10x_input() - process a block of data received from the device
339 * @inbuf: received data and device descriptor structure.
340 *
341 * Called by hardware module {ser,usb}_gigaset with a block of received
342 * bytes. Separates the bytes received over the serial data channel into
343 * user data and command replies (locked/unlocked) according to the
344 * current state of the interface.
338 */ 345 */
339void gigaset_m10x_input(struct inbuf_t *inbuf) 346void gigaset_m10x_input(struct inbuf_t *inbuf)
340{ 347{
@@ -543,16 +550,17 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
543 return iraw_skb; 550 return iraw_skb;
544} 551}
545 552
546/* gigaset_send_skb 553/**
547 * called by common.c to queue an skb for sending 554 * gigaset_m10x_send_skb() - queue an skb for sending
548 * and start transmission if necessary 555 * @bcs: B channel descriptor structure.
549 * parameters: 556 * @skb: data to send.
550 * B Channel control structure 557 *
551 * skb 558 * Called by i4l.c to encode and queue an skb for sending, and start
559 * transmission if necessary.
560 *
552 * Return value: 561 * Return value:
553 * number of bytes accepted for sending 562 * number of bytes accepted for sending (skb->len) if ok,
554 * (skb->len if ok, 0 if out of buffer space) 563 * error code < 0 (eg. -ENOMEM) on error
555 * or error code (< 0, eg. -EINVAL)
556 */ 564 */
557int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb) 565int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
558{ 566{
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 781c4041f7b0..5ed1d99eb9f3 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -134,6 +134,7 @@ struct bas_cardstate {
134#define BS_ATRDPEND 0x040 /* urb_cmd_in in use */ 134#define BS_ATRDPEND 0x040 /* urb_cmd_in in use */
135#define BS_ATWRPEND 0x080 /* urb_cmd_out in use */ 135#define BS_ATWRPEND 0x080 /* urb_cmd_out in use */
136#define BS_SUSPEND 0x100 /* USB port suspended */ 136#define BS_SUSPEND 0x100 /* USB port suspended */
137#define BS_RESETTING 0x200 /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */
137 138
138 139
139static struct gigaset_driver *driver = NULL; 140static struct gigaset_driver *driver = NULL;
@@ -319,6 +320,21 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
319 return -EINVAL; 320 return -EINVAL;
320} 321}
321 322
323/* set/clear bits in base connection state, return previous state
324 */
325static inline int update_basstate(struct bas_cardstate *ucs,
326 int set, int clear)
327{
328 unsigned long flags;
329 int state;
330
331 spin_lock_irqsave(&ucs->lock, flags);
332 state = ucs->basstate;
333 ucs->basstate = (state & ~clear) | set;
334 spin_unlock_irqrestore(&ucs->lock, flags);
335 return state;
336}
337
322/* error_hangup 338/* error_hangup
323 * hang up any existing connection because of an unrecoverable error 339 * hang up any existing connection because of an unrecoverable error
324 * This function may be called from any context and takes care of scheduling 340 * This function may be called from any context and takes care of scheduling
@@ -350,12 +366,9 @@ static inline void error_hangup(struct bc_state *bcs)
350 */ 366 */
351static inline void error_reset(struct cardstate *cs) 367static inline void error_reset(struct cardstate *cs)
352{ 368{
353 /* close AT command channel to recover (ignore errors) */ 369 /* reset interrupt pipe to recover (ignore errors) */
354 req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); 370 update_basstate(cs->hw.bas, BS_RESETTING, 0);
355 371 req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT);
356 //FIXME try to recover without bothering the user
357 dev_err(cs->dev,
358 "unrecoverable error - please disconnect Gigaset base to reset\n");
359} 372}
360 373
361/* check_pending 374/* check_pending
@@ -398,8 +411,13 @@ static void check_pending(struct bas_cardstate *ucs)
398 case HD_DEVICE_INIT_ACK: /* no reply expected */ 411 case HD_DEVICE_INIT_ACK: /* no reply expected */
399 ucs->pending = 0; 412 ucs->pending = 0;
400 break; 413 break;
401 /* HD_READ_ATMESSAGE, HD_WRITE_ATMESSAGE, HD_RESET_INTERRUPTPIPE 414 case HD_RESET_INTERRUPT_PIPE:
402 * are handled separately and should never end up here 415 if (!(ucs->basstate & BS_RESETTING))
416 ucs->pending = 0;
417 break;
418 /*
419 * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately
420 * and should never end up here
403 */ 421 */
404 default: 422 default:
405 dev_warn(&ucs->interface->dev, 423 dev_warn(&ucs->interface->dev,
@@ -449,21 +467,6 @@ static void cmd_in_timeout(unsigned long data)
449 error_reset(cs); 467 error_reset(cs);
450} 468}
451 469
452/* set/clear bits in base connection state, return previous state
453 */
454inline static int update_basstate(struct bas_cardstate *ucs,
455 int set, int clear)
456{
457 unsigned long flags;
458 int state;
459
460 spin_lock_irqsave(&ucs->lock, flags);
461 state = ucs->basstate;
462 ucs->basstate = (state & ~clear) | set;
463 spin_unlock_irqrestore(&ucs->lock, flags);
464 return state;
465}
466
467/* read_ctrl_callback 470/* read_ctrl_callback
468 * USB completion handler for control pipe input 471 * USB completion handler for control pipe input
469 * called by the USB subsystem in interrupt context 472 * called by the USB subsystem in interrupt context
@@ -762,7 +765,8 @@ static void read_int_callback(struct urb *urb)
762 break; 765 break;
763 766
764 case HD_RESET_INTERRUPT_PIPE_ACK: 767 case HD_RESET_INTERRUPT_PIPE_ACK:
765 gig_dbg(DEBUG_USBREQ, "HD_RESET_INTERRUPT_PIPE_ACK"); 768 update_basstate(ucs, 0, BS_RESETTING);
769 dev_notice(cs->dev, "interrupt pipe reset\n");
766 break; 770 break;
767 771
768 case HD_SUSPEND_END: 772 case HD_SUSPEND_END:
@@ -1331,28 +1335,24 @@ static void read_iso_tasklet(unsigned long data)
1331 rcvbuf = urb->transfer_buffer; 1335 rcvbuf = urb->transfer_buffer;
1332 totleft = urb->actual_length; 1336 totleft = urb->actual_length;
1333 for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) { 1337 for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) {
1334 if (unlikely(urb->iso_frame_desc[frame].status)) { 1338 numbytes = urb->iso_frame_desc[frame].actual_length;
1339 if (unlikely(urb->iso_frame_desc[frame].status))
1335 dev_warn(cs->dev, 1340 dev_warn(cs->dev,
1336 "isochronous read: frame %d: %s\n", 1341 "isochronous read: frame %d[%d]: %s\n",
1337 frame, 1342 frame, numbytes,
1338 get_usb_statmsg( 1343 get_usb_statmsg(
1339 urb->iso_frame_desc[frame].status)); 1344 urb->iso_frame_desc[frame].status));
1340 break; 1345 if (unlikely(numbytes > BAS_MAXFRAME))
1341 }
1342 numbytes = urb->iso_frame_desc[frame].actual_length;
1343 if (unlikely(numbytes > BAS_MAXFRAME)) {
1344 dev_warn(cs->dev, 1346 dev_warn(cs->dev,
1345 "isochronous read: frame %d: " 1347 "isochronous read: frame %d: "
1346 "numbytes (%d) > BAS_MAXFRAME\n", 1348 "numbytes (%d) > BAS_MAXFRAME\n",
1347 frame, numbytes); 1349 frame, numbytes);
1348 break;
1349 }
1350 if (unlikely(numbytes > totleft)) { 1350 if (unlikely(numbytes > totleft)) {
1351 dev_warn(cs->dev, 1351 dev_warn(cs->dev,
1352 "isochronous read: frame %d: " 1352 "isochronous read: frame %d: "
1353 "numbytes (%d) > totleft (%d)\n", 1353 "numbytes (%d) > totleft (%d)\n",
1354 frame, numbytes, totleft); 1354 frame, numbytes, totleft);
1355 break; 1355 numbytes = totleft;
1356 } 1356 }
1357 offset = urb->iso_frame_desc[frame].offset; 1357 offset = urb->iso_frame_desc[frame].offset;
1358 if (unlikely(offset + numbytes > BAS_INBUFSIZE)) { 1358 if (unlikely(offset + numbytes > BAS_INBUFSIZE)) {
@@ -1361,7 +1361,7 @@ static void read_iso_tasklet(unsigned long data)
1361 "offset (%d) + numbytes (%d) " 1361 "offset (%d) + numbytes (%d) "
1362 "> BAS_INBUFSIZE\n", 1362 "> BAS_INBUFSIZE\n",
1363 frame, offset, numbytes); 1363 frame, offset, numbytes);
1364 break; 1364 numbytes = BAS_INBUFSIZE - offset;
1365 } 1365 }
1366 gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs); 1366 gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs);
1367 totleft -= numbytes; 1367 totleft -= numbytes;
@@ -1433,6 +1433,7 @@ static void req_timeout(unsigned long data)
1433 1433
1434 case HD_CLOSE_ATCHANNEL: 1434 case HD_CLOSE_ATCHANNEL:
1435 dev_err(bcs->cs->dev, "timeout closing AT channel\n"); 1435 dev_err(bcs->cs->dev, "timeout closing AT channel\n");
1436 error_reset(bcs->cs);
1436 break; 1437 break;
1437 1438
1438 case HD_CLOSE_B2CHANNEL: 1439 case HD_CLOSE_B2CHANNEL:
@@ -1442,6 +1443,13 @@ static void req_timeout(unsigned long data)
1442 error_reset(bcs->cs); 1443 error_reset(bcs->cs);
1443 break; 1444 break;
1444 1445
1446 case HD_RESET_INTERRUPT_PIPE:
1447 /* error recovery escalation */
1448 dev_err(bcs->cs->dev,
1449 "reset interrupt pipe timeout, attempting USB reset\n");
1450 usb_queue_reset_device(bcs->cs->hw.bas->interface);
1451 break;
1452
1445 default: 1453 default:
1446 dev_warn(bcs->cs->dev, "request 0x%02x timed out, clearing\n", 1454 dev_warn(bcs->cs->dev, "request 0x%02x timed out, clearing\n",
1447 pending); 1455 pending);
@@ -1934,6 +1942,15 @@ static int gigaset_write_cmd(struct cardstate *cs,
1934 goto notqueued; 1942 goto notqueued;
1935 } 1943 }
1936 1944
1945 /* translate "+++" escape sequence sent as a single separate command
1946 * into "close AT channel" command for error recovery
1947 * The next command will reopen the AT channel automatically.
1948 */
1949 if (len == 3 && !memcmp(buf, "+++", 3)) {
1950 rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT);
1951 goto notqueued;
1952 }
1953
1937 if (len > IF_WRITEBUF) 1954 if (len > IF_WRITEBUF)
1938 len = IF_WRITEBUF; 1955 len = IF_WRITEBUF;
1939 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { 1956 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index e4141bf8b2f3..33dcd8d72b7c 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -22,6 +22,12 @@
22#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers" 22#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers"
23#define DRIVER_DESC "Driver for Gigaset 307x" 23#define DRIVER_DESC "Driver for Gigaset 307x"
24 24
25#ifdef CONFIG_GIGASET_DEBUG
26#define DRIVER_DESC_DEBUG " (debug build)"
27#else
28#define DRIVER_DESC_DEBUG ""
29#endif
30
25/* Module parameters */ 31/* Module parameters */
26int gigaset_debuglevel = DEBUG_DEFAULT; 32int gigaset_debuglevel = DEBUG_DEFAULT;
27EXPORT_SYMBOL_GPL(gigaset_debuglevel); 33EXPORT_SYMBOL_GPL(gigaset_debuglevel);
@@ -32,6 +38,17 @@ MODULE_PARM_DESC(debug, "debug level");
32#define VALID_MINOR 0x01 38#define VALID_MINOR 0x01
33#define VALID_ID 0x02 39#define VALID_ID 0x02
34 40
41/**
42 * gigaset_dbg_buffer() - dump data in ASCII and hex for debugging
43 * @level: debugging level.
44 * @msg: message prefix.
45 * @len: number of bytes to dump.
46 * @buf: data to dump.
47 *
48 * If the current debugging level includes one of the bits set in @level,
49 * @len bytes starting at @buf are logged to dmesg at KERN_DEBUG prio,
50 * prefixed by the text @msg.
51 */
35void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, 52void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
36 size_t len, const unsigned char *buf) 53 size_t len, const unsigned char *buf)
37{ 54{
@@ -274,6 +291,20 @@ static void clear_events(struct cardstate *cs)
274 spin_unlock_irqrestore(&cs->ev_lock, flags); 291 spin_unlock_irqrestore(&cs->ev_lock, flags);
275} 292}
276 293
294/**
295 * gigaset_add_event() - add event to device event queue
296 * @cs: device descriptor structure.
297 * @at_state: connection state structure.
298 * @type: event type.
299 * @ptr: pointer parameter for event.
300 * @parameter: integer parameter for event.
301 * @arg: pointer parameter for event.
302 *
303 * Allocate an event queue entry from the device's event queue, and set it up
304 * with the parameters given.
305 *
306 * Return value: added event
307 */
277struct event_t *gigaset_add_event(struct cardstate *cs, 308struct event_t *gigaset_add_event(struct cardstate *cs,
278 struct at_state_t *at_state, int type, 309 struct at_state_t *at_state, int type,
279 void *ptr, int parameter, void *arg) 310 void *ptr, int parameter, void *arg)
@@ -398,6 +429,15 @@ static void make_invalid(struct cardstate *cs, unsigned mask)
398 spin_unlock_irqrestore(&drv->lock, flags); 429 spin_unlock_irqrestore(&drv->lock, flags);
399} 430}
400 431
432/**
433 * gigaset_freecs() - free all associated ressources of a device
434 * @cs: device descriptor structure.
435 *
436 * Stops all tasklets and timers, unregisters the device from all
437 * subsystems it was registered to, deallocates the device structure
438 * @cs and all structures referenced from it.
439 * Operations on the device should be stopped before calling this.
440 */
401void gigaset_freecs(struct cardstate *cs) 441void gigaset_freecs(struct cardstate *cs)
402{ 442{
403 int i; 443 int i;
@@ -506,7 +546,12 @@ static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs,
506 inbuf->inputstate = inputstate; 546 inbuf->inputstate = inputstate;
507} 547}
508 548
509/* append received bytes to inbuf */ 549/**
550 * gigaset_fill_inbuf() - append received data to input buffer
551 * @inbuf: buffer structure.
552 * @src: received data.
553 * @numbytes: number of bytes received.
554 */
510int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, 555int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
511 unsigned numbytes) 556 unsigned numbytes)
512{ 557{
@@ -606,20 +651,22 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
606 return NULL; 651 return NULL;
607} 652}
608 653
609/* gigaset_initcs 654/**
655 * gigaset_initcs() - initialize device structure
656 * @drv: hardware driver the device belongs to
657 * @channels: number of B channels supported by device
658 * @onechannel: !=0 if B channel data and AT commands share one
659 * communication channel (M10x),
660 * ==0 if B channels have separate communication channels (base)
661 * @ignoreframes: number of frames to ignore after setting up B channel
662 * @cidmode: !=0: start in CallID mode
663 * @modulename: name of driver module for LL registration
664 *
610 * Allocate and initialize cardstate structure for Gigaset driver 665 * Allocate and initialize cardstate structure for Gigaset driver
611 * Calls hardware dependent gigaset_initcshw() function 666 * Calls hardware dependent gigaset_initcshw() function
612 * Calls B channel initialization function gigaset_initbcs() for each B channel 667 * Calls B channel initialization function gigaset_initbcs() for each B channel
613 * parameters: 668 *
614 * drv hardware driver the device belongs to 669 * Return value:
615 * channels number of B channels supported by device
616 * onechannel !=0: B channel data and AT commands share one
617 * communication channel
618 * ==0: B channels have separate communication channels
619 * ignoreframes number of frames to ignore after setting up B channel
620 * cidmode !=0: start in CallID mode
621 * modulename name of driver module (used for I4L registration)
622 * return value:
623 * pointer to cardstate structure 670 * pointer to cardstate structure
624 */ 671 */
625struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, 672struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
@@ -837,6 +884,17 @@ static void cleanup_cs(struct cardstate *cs)
837} 884}
838 885
839 886
887/**
888 * gigaset_start() - start device operations
889 * @cs: device descriptor structure.
890 *
891 * Prepares the device for use by setting up communication parameters,
892 * scheduling an EV_START event to initiate device initialization, and
893 * waiting for completion of the initialization.
894 *
895 * Return value:
896 * 1 - success, 0 - error
897 */
840int gigaset_start(struct cardstate *cs) 898int gigaset_start(struct cardstate *cs)
841{ 899{
842 unsigned long flags; 900 unsigned long flags;
@@ -879,9 +937,15 @@ error:
879} 937}
880EXPORT_SYMBOL_GPL(gigaset_start); 938EXPORT_SYMBOL_GPL(gigaset_start);
881 939
882/* gigaset_shutdown 940/**
883 * check if a device is associated to the cardstate structure and stop it 941 * gigaset_shutdown() - shut down device operations
884 * return value: 0 if ok, -1 if no device was associated 942 * @cs: device descriptor structure.
943 *
944 * Deactivates the device by scheduling an EV_SHUTDOWN event and
945 * waiting for completion of the shutdown.
946 *
947 * Return value:
948 * 0 - success, -1 - error (no device associated)
885 */ 949 */
886int gigaset_shutdown(struct cardstate *cs) 950int gigaset_shutdown(struct cardstate *cs)
887{ 951{
@@ -912,6 +976,13 @@ exit:
912} 976}
913EXPORT_SYMBOL_GPL(gigaset_shutdown); 977EXPORT_SYMBOL_GPL(gigaset_shutdown);
914 978
979/**
980 * gigaset_stop() - stop device operations
981 * @cs: device descriptor structure.
982 *
983 * Stops operations on the device by scheduling an EV_STOP event and
984 * waiting for completion of the shutdown.
985 */
915void gigaset_stop(struct cardstate *cs) 986void gigaset_stop(struct cardstate *cs)
916{ 987{
917 mutex_lock(&cs->mutex); 988 mutex_lock(&cs->mutex);
@@ -1020,6 +1091,14 @@ struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty)
1020 return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start); 1091 return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start);
1021} 1092}
1022 1093
1094/**
1095 * gigaset_freedriver() - free all associated ressources of a driver
1096 * @drv: driver descriptor structure.
1097 *
1098 * Unregisters the driver from the system and deallocates the driver
1099 * structure @drv and all structures referenced from it.
1100 * All devices should be shut down before calling this.
1101 */
1023void gigaset_freedriver(struct gigaset_driver *drv) 1102void gigaset_freedriver(struct gigaset_driver *drv)
1024{ 1103{
1025 unsigned long flags; 1104 unsigned long flags;
@@ -1035,14 +1114,16 @@ void gigaset_freedriver(struct gigaset_driver *drv)
1035} 1114}
1036EXPORT_SYMBOL_GPL(gigaset_freedriver); 1115EXPORT_SYMBOL_GPL(gigaset_freedriver);
1037 1116
1038/* gigaset_initdriver 1117/**
1118 * gigaset_initdriver() - initialize driver structure
1119 * @minor: First minor number
1120 * @minors: Number of minors this driver can handle
1121 * @procname: Name of the driver
1122 * @devname: Name of the device files (prefix without minor number)
1123 *
1039 * Allocate and initialize gigaset_driver structure. Initialize interface. 1124 * Allocate and initialize gigaset_driver structure. Initialize interface.
1040 * parameters: 1125 *
1041 * minor First minor number 1126 * Return value:
1042 * minors Number of minors this driver can handle
1043 * procname Name of the driver
1044 * devname Name of the device files (prefix without minor number)
1045 * return value:
1046 * Pointer to the gigaset_driver structure on success, NULL on failure. 1127 * Pointer to the gigaset_driver structure on success, NULL on failure.
1047 */ 1128 */
1048struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, 1129struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
@@ -1095,6 +1176,13 @@ error:
1095} 1176}
1096EXPORT_SYMBOL_GPL(gigaset_initdriver); 1177EXPORT_SYMBOL_GPL(gigaset_initdriver);
1097 1178
1179/**
1180 * gigaset_blockdriver() - block driver
1181 * @drv: driver descriptor structure.
1182 *
1183 * Prevents the driver from attaching new devices, in preparation for
1184 * deregistration.
1185 */
1098void gigaset_blockdriver(struct gigaset_driver *drv) 1186void gigaset_blockdriver(struct gigaset_driver *drv)
1099{ 1187{
1100 drv->blocked = 1; 1188 drv->blocked = 1;
@@ -1110,7 +1198,7 @@ static int __init gigaset_init_module(void)
1110 if (gigaset_debuglevel == 1) 1198 if (gigaset_debuglevel == 1)
1111 gigaset_debuglevel = DEBUG_DEFAULT; 1199 gigaset_debuglevel = DEBUG_DEFAULT;
1112 1200
1113 pr_info(DRIVER_DESC "\n"); 1201 pr_info(DRIVER_DESC DRIVER_DESC_DEBUG "\n");
1114 return 0; 1202 return 0;
1115} 1203}
1116 1204
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index 2d91049571a4..cc768caa38f5 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -207,7 +207,6 @@ struct reply_t gigaset_tab_nocid[] =
207 /* leave dle mode */ 207 /* leave dle mode */
208 {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, 208 {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
209 {RSP_OK, 201,201, -1, 202,-1}, 209 {RSP_OK, 201,201, -1, 202,-1},
210 //{RSP_ZDLE, 202,202, 0, 202, 0, {ACT_ERROR}},//DELETE
211 {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}}, 210 {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}},
212 {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}}, 211 {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}},
213 {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, 212 {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}},
@@ -265,6 +264,7 @@ struct reply_t gigaset_tab_nocid[] =
265 {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME 264 {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME
266 265
267 /* misc. */ 266 /* misc. */
267 {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} },
268 {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 268 {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
269 {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 269 {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
270 {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 270 {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
@@ -328,10 +328,9 @@ struct reply_t gigaset_tab_cid[] =
328 {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1? 328 {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1?
329 {RSP_OK, 401,401, -1, 402, 5}, 329 {RSP_OK, 401,401, -1, 402, 5},
330 {RSP_ZVLS, 402,402, 0, 403, 5}, 330 {RSP_ZVLS, 402,402, 0, 403, 5},
331 {RSP_ZSAU, 403,403,ZSAU_DISCONNECT_REQ, -1,-1, {ACT_DEBUG}}, /* if not remote hup */ 331 {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
332 //{RSP_ZSAU, 403,403,ZSAU_NULL, 401, 0, {ACT_ERROR}}, //DELETE//FIXME -> DLE0 // should we do this _before_ hanging up for base driver? 332 {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} },
333 {RSP_ZSAU, 403,403,ZSAU_NULL, 0, 0, {ACT_DISCONNECT}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver? 333 {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} },
334 {RSP_NODEV, 401,403, -1, 0, 0, {ACT_FAKEHUP}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver?
335 {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}}, 334 {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}},
336 {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}}, 335 {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}},
337 336
@@ -474,8 +473,13 @@ static int cid_of_response(char *s)
474 //FIXME is ;<digit>+ at end of non-CID response really impossible? 473 //FIXME is ;<digit>+ at end of non-CID response really impossible?
475} 474}
476 475
477/* This function will be called via task queue from the callback handler. 476/**
478 * We received a modem response and have to handle it.. 477 * gigaset_handle_modem_response() - process received modem response
478 * @cs: device descriptor structure.
479 *
480 * Called by asyncdata/isocdata if a block of data received from the
481 * device must be processed as a modem command response. The data is
482 * already in the cs structure.
479 */ 483 */
480void gigaset_handle_modem_response(struct cardstate *cs) 484void gigaset_handle_modem_response(struct cardstate *cs)
481{ 485{
@@ -707,6 +711,11 @@ static void disconnect(struct at_state_t **at_state_p)
707 if (bcs) { 711 if (bcs) {
708 /* B channel assigned: invoke hardware specific handler */ 712 /* B channel assigned: invoke hardware specific handler */
709 cs->ops->close_bchannel(bcs); 713 cs->ops->close_bchannel(bcs);
714 /* notify LL */
715 if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
716 bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
717 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP);
718 }
710 } else { 719 } else {
711 /* no B channel assigned: just deallocate */ 720 /* no B channel assigned: just deallocate */
712 spin_lock_irqsave(&cs->lock, flags); 721 spin_lock_irqsave(&cs->lock, flags);
@@ -1429,11 +1438,12 @@ static void do_action(int action, struct cardstate *cs,
1429 cs->gotfwver = -1; 1438 cs->gotfwver = -1;
1430 dev_err(cs->dev, "could not read firmware version.\n"); 1439 dev_err(cs->dev, "could not read firmware version.\n");
1431 break; 1440 break;
1432#ifdef CONFIG_GIGASET_DEBUG
1433 case ACT_ERROR: 1441 case ACT_ERROR:
1434 *p_genresp = 1; 1442 gig_dbg(DEBUG_ANY, "%s: ERROR response in ConState %d",
1435 *p_resp_code = RSP_ERROR; 1443 __func__, at_state->ConState);
1444 cs->cur_at_seq = SEQ_NONE;
1436 break; 1445 break;
1446#ifdef CONFIG_GIGASET_DEBUG
1437 case ACT_TEST: 1447 case ACT_TEST:
1438 { 1448 {
1439 static int count = 3; //2; //1; 1449 static int count = 3; //2; //1;
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 9b22f9cf2f33..654489d836cd 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -51,6 +51,12 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
51 return -ENODEV; 51 return -ENODEV;
52 } 52 }
53 bcs = &cs->bcs[channel]; 53 bcs = &cs->bcs[channel];
54
55 /* can only handle linear sk_buffs */
56 if (skb_linearize(skb) < 0) {
57 dev_err(cs->dev, "%s: skb_linearize failed\n", __func__);
58 return -ENOMEM;
59 }
54 len = skb->len; 60 len = skb->len;
55 61
56 gig_dbg(DEBUG_LLDATA, 62 gig_dbg(DEBUG_LLDATA,
@@ -79,6 +85,14 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
79 return cs->ops->send_skb(bcs, skb); 85 return cs->ops->send_skb(bcs, skb);
80} 86}
81 87
88/**
89 * gigaset_skb_sent() - acknowledge sending an skb
90 * @bcs: B channel descriptor structure.
91 * @skb: sent data.
92 *
93 * Called by hardware module {bas,ser,usb}_gigaset when the data in a
94 * skb has been successfully sent, for signalling completion to the LL.
95 */
82void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb) 96void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
83{ 97{
84 unsigned len; 98 unsigned len;
@@ -455,6 +469,15 @@ int gigaset_isdn_setup_accept(struct at_state_t *at_state)
455 return 0; 469 return 0;
456} 470}
457 471
472/**
473 * gigaset_isdn_icall() - signal incoming call
474 * @at_state: connection state structure.
475 *
476 * Called by main module to notify the LL that an incoming call has been
477 * received. @at_state contains the parameters of the call.
478 *
479 * Return value: call disposition (ICALL_*)
480 */
458int gigaset_isdn_icall(struct at_state_t *at_state) 481int gigaset_isdn_icall(struct at_state_t *at_state)
459{ 482{
460 struct cardstate *cs = at_state->cs; 483 struct cardstate *cs = at_state->cs;
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index f33ac27de643..6a8e1384e7bd 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -616,6 +616,15 @@ void gigaset_if_free(struct cardstate *cs)
616 tty_unregister_device(drv->tty, cs->minor_index); 616 tty_unregister_device(drv->tty, cs->minor_index);
617} 617}
618 618
619/**
620 * gigaset_if_receive() - pass a received block of data to the tty device
621 * @cs: device descriptor structure.
622 * @buffer: received data.
623 * @len: number of bytes received.
624 *
625 * Called by asyncdata/isocdata if a block of data received from the
626 * device must be sent to userspace through the ttyG* device.
627 */
619void gigaset_if_receive(struct cardstate *cs, 628void gigaset_if_receive(struct cardstate *cs,
620 unsigned char *buffer, size_t len) 629 unsigned char *buffer, size_t len)
621{ 630{
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index bed38fcc432b..9f3ef7b4248c 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -429,7 +429,7 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
429 return -EAGAIN; 429 return -EAGAIN;
430 } 430 }
431 431
432 dump_bytes(DEBUG_STREAM, "snd data", in, count); 432 dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count);
433 433
434 /* bitstuff and checksum input data */ 434 /* bitstuff and checksum input data */
435 fcs = PPP_INITFCS; 435 fcs = PPP_INITFCS;
@@ -448,7 +448,6 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
448 /* put closing flag and repeat byte for flag idle */ 448 /* put closing flag and repeat byte for flag idle */
449 isowbuf_putflag(iwb); 449 isowbuf_putflag(iwb);
450 end = isowbuf_donewrite(iwb); 450 end = isowbuf_donewrite(iwb);
451 dump_bytes(DEBUG_STREAM_DUMP, "isowbuf", iwb->data, end + 1);
452 return end; 451 return end;
453} 452}
454 453
@@ -482,6 +481,8 @@ static inline int trans_buildframe(struct isowbuf_t *iwb,
482 } 481 }
483 482
484 gig_dbg(DEBUG_STREAM, "put %d bytes", count); 483 gig_dbg(DEBUG_STREAM, "put %d bytes", count);
484 dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count);
485
485 write = iwb->write; 486 write = iwb->write;
486 do { 487 do {
487 c = bitrev8(*in++); 488 c = bitrev8(*in++);
@@ -583,7 +584,7 @@ static inline void hdlc_done(struct bc_state *bcs)
583 procskb->tail -= 2; 584 procskb->tail -= 2;
584 gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", 585 gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)",
585 __func__, procskb->len); 586 __func__, procskb->len);
586 dump_bytes(DEBUG_STREAM, 587 dump_bytes(DEBUG_STREAM_DUMP,
587 "rcv data", procskb->data, procskb->len); 588 "rcv data", procskb->data, procskb->len);
588 bcs->hw.bas->goodbytes += procskb->len; 589 bcs->hw.bas->goodbytes += procskb->len;
589 gigaset_rcv_skb(procskb, bcs->cs, bcs); 590 gigaset_rcv_skb(procskb, bcs->cs, bcs);
@@ -878,6 +879,8 @@ static inline void trans_receive(unsigned char *src, unsigned count,
878 dobytes--; 879 dobytes--;
879 } 880 }
880 if (dobytes == 0) { 881 if (dobytes == 0) {
882 dump_bytes(DEBUG_STREAM_DUMP,
883 "rcv data", skb->data, skb->len);
881 gigaset_rcv_skb(skb, bcs->cs, bcs); 884 gigaset_rcv_skb(skb, bcs->cs, bcs);
882 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); 885 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN);
883 if (!skb) { 886 if (!skb) {
@@ -973,16 +976,17 @@ void gigaset_isoc_input(struct inbuf_t *inbuf)
973 976
974/* == data output ========================================================== */ 977/* == data output ========================================================== */
975 978
976/* gigaset_send_skb 979/**
977 * called by common.c to queue an skb for sending 980 * gigaset_isoc_send_skb() - queue an skb for sending
978 * and start transmission if necessary 981 * @bcs: B channel descriptor structure.
979 * parameters: 982 * @skb: data to send.
980 * B Channel control structure 983 *
981 * skb 984 * Called by i4l.c to queue an skb for sending, and start transmission if
982 * return value: 985 * necessary.
983 * number of bytes accepted for sending 986 *
984 * (skb->len if ok, 0 if out of buffer space) 987 * Return value:
985 * or error code (< 0, eg. -EINVAL) 988 * number of bytes accepted for sending (skb->len) if ok,
989 * error code < 0 (eg. -ENODEV) on error
986 */ 990 */
987int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb) 991int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb)
988{ 992{
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index c36f52137456..feb0fa45b664 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -415,7 +415,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
415} 415}
416 416
417static int data_sock_setsockopt(struct socket *sock, int level, int optname, 417static int data_sock_setsockopt(struct socket *sock, int level, int optname,
418 char __user *optval, int len) 418 char __user *optval, unsigned int len)
419{ 419{
420 struct sock *sk = sock->sk; 420 struct sock *sk = sock->sk;
421 int err = 0, opt = 0; 421 int err = 0, opt = 0;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 708a8017c21d..adc561eb59d2 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -19,9 +19,6 @@
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/leds-pca9532.h> 20#include <linux/leds-pca9532.h>
21 21
22static const unsigned short normal_i2c[] = { /*0x60,*/ I2C_CLIENT_END};
23I2C_CLIENT_INSMOD_1(pca9532);
24
25#define PCA9532_REG_PSC(i) (0x2+(i)*2) 22#define PCA9532_REG_PSC(i) (0x2+(i)*2)
26#define PCA9532_REG_PWM(i) (0x3+(i)*2) 23#define PCA9532_REG_PWM(i) (0x3+(i)*2)
27#define PCA9532_REG_LS0 0x6 24#define PCA9532_REG_LS0 0x6
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index b4d3f7ca554f..bd1632388e4a 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -508,7 +508,7 @@ static int close(struct inode *inode, struct file *file)
508 * uses: reading and writing a character device called /dev/lguest. All the 508 * uses: reading and writing a character device called /dev/lguest. All the
509 * work happens in the read(), write() and close() routines: 509 * work happens in the read(), write() and close() routines:
510 */ 510 */
511static struct file_operations lguest_fops = { 511static const struct file_operations lguest_fops = {
512 .owner = THIS_MODULE, 512 .owner = THIS_MODULE,
513 .release = close, 513 .release = close,
514 .write = write, 514 .write = write,
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index fde377c60cca..556f0feaa4df 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -124,6 +124,8 @@ read_reg(struct thermostat* th, int reg)
124 return data; 124 return data;
125} 125}
126 126
127static struct i2c_driver thermostat_driver;
128
127static int 129static int
128attach_thermostat(struct i2c_adapter *adapter) 130attach_thermostat(struct i2c_adapter *adapter)
129{ 131{
@@ -148,7 +150,7 @@ attach_thermostat(struct i2c_adapter *adapter)
148 * Let i2c-core delete that device on driver removal. 150 * Let i2c-core delete that device on driver removal.
149 * This is safe because i2c-core holds the core_lock mutex for us. 151 * This is safe because i2c-core holds the core_lock mutex for us.
150 */ 152 */
151 list_add_tail(&client->detected, &client->driver->clients); 153 list_add_tail(&client->detected, &thermostat_driver.clients);
152 return 0; 154 return 0;
153} 155}
154 156
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index a028598af2d3..ea32c7e5a9af 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -286,6 +286,8 @@ struct fcu_fan_table fcu_fans[] = {
286 }, 286 },
287}; 287};
288 288
289static struct i2c_driver therm_pm72_driver;
290
289/* 291/*
290 * Utility function to create an i2c_client structure and 292 * Utility function to create an i2c_client structure and
291 * attach it to one of u3 adapters 293 * attach it to one of u3 adapters
@@ -318,7 +320,7 @@ static struct i2c_client *attach_i2c_chip(int id, const char *name)
318 * Let i2c-core delete that device on driver removal. 320 * Let i2c-core delete that device on driver removal.
319 * This is safe because i2c-core holds the core_lock mutex for us. 321 * This is safe because i2c-core holds the core_lock mutex for us.
320 */ 322 */
321 list_add_tail(&clt->detected, &clt->driver->clients); 323 list_add_tail(&clt->detected, &therm_pm72_driver.clients);
322 return clt; 324 return clt;
323} 325}
324 326
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index 529886c7a826..ed6426a10773 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -115,6 +115,8 @@ static int wf_lm75_probe(struct i2c_client *client,
115 return rc; 115 return rc;
116} 116}
117 117
118static struct i2c_driver wf_lm75_driver;
119
118static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter, 120static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter,
119 u8 addr, int ds1775, 121 u8 addr, int ds1775,
120 const char *loc) 122 const char *loc)
@@ -157,7 +159,7 @@ static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter,
157 * Let i2c-core delete that device on driver removal. 159 * Let i2c-core delete that device on driver removal.
158 * This is safe because i2c-core holds the core_lock mutex for us. 160 * This is safe because i2c-core holds the core_lock mutex for us.
159 */ 161 */
160 list_add_tail(&client->detected, &client->driver->clients); 162 list_add_tail(&client->detected, &wf_lm75_driver.clients);
161 return client; 163 return client;
162 fail: 164 fail:
163 return NULL; 165 return NULL;
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index e2a55ecda2b2..a67b349319e9 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -88,6 +88,8 @@ static int wf_max6690_probe(struct i2c_client *client,
88 return rc; 88 return rc;
89} 89}
90 90
91static struct i2c_driver wf_max6690_driver;
92
91static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter, 93static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter,
92 u8 addr, const char *loc) 94 u8 addr, const char *loc)
93{ 95{
@@ -119,7 +121,7 @@ static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter,
119 * Let i2c-core delete that device on driver removal. 121 * Let i2c-core delete that device on driver removal.
120 * This is safe because i2c-core holds the core_lock mutex for us. 122 * This is safe because i2c-core holds the core_lock mutex for us.
121 */ 123 */
122 list_add_tail(&client->detected, &client->driver->clients); 124 list_add_tail(&client->detected, &wf_max6690_driver.clients);
123 return client; 125 return client;
124 126
125 fail: 127 fail:
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 5da729e58f99..e20330a28959 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -194,6 +194,8 @@ static struct wf_sensor_ops wf_sat_ops = {
194 .owner = THIS_MODULE, 194 .owner = THIS_MODULE,
195}; 195};
196 196
197static struct i2c_driver wf_sat_driver;
198
197static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev) 199static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
198{ 200{
199 struct i2c_board_info info; 201 struct i2c_board_info info;
@@ -222,7 +224,7 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
222 * Let i2c-core delete that device on driver removal. 224 * Let i2c-core delete that device on driver removal.
223 * This is safe because i2c-core holds the core_lock mutex for us. 225 * This is safe because i2c-core holds the core_lock mutex for us.
224 */ 226 */
225 list_add_tail(&client->detected, &client->driver->clients); 227 list_add_tail(&client->detected, &wf_sat_driver.clients);
226} 228}
227 229
228static int wf_sat_probe(struct i2c_client *client, 230static int wf_sat_probe(struct i2c_client *client,
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index ba0edad2d048..54abf9e303b7 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -129,11 +129,13 @@ static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
129 * This is the connector callback that delivers data 129 * This is the connector callback that delivers data
130 * that was sent from userspace. 130 * that was sent from userspace.
131 */ 131 */
132static void cn_ulog_callback(void *data) 132static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
133{ 133{
134 struct cn_msg *msg = (struct cn_msg *)data;
135 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); 134 struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
136 135
136 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
137 return;
138
137 spin_lock(&receiving_list_lock); 139 spin_lock(&receiving_list_lock);
138 if (msg->len == 0) 140 if (msg->len == 0)
139 fill_pkg(msg, NULL); 141 fill_pkg(msg, NULL);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 376f1ab48a24..23e76fe0d359 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -130,7 +130,7 @@ struct mapped_device {
130 /* 130 /*
131 * A list of ios that arrived while we were suspended. 131 * A list of ios that arrived while we were suspended.
132 */ 132 */
133 atomic_t pending[2]; 133 atomic_t pending;
134 wait_queue_head_t wait; 134 wait_queue_head_t wait;
135 struct work_struct work; 135 struct work_struct work;
136 struct bio_list deferred; 136 struct bio_list deferred;
@@ -453,14 +453,13 @@ static void start_io_acct(struct dm_io *io)
453{ 453{
454 struct mapped_device *md = io->md; 454 struct mapped_device *md = io->md;
455 int cpu; 455 int cpu;
456 int rw = bio_data_dir(io->bio);
457 456
458 io->start_time = jiffies; 457 io->start_time = jiffies;
459 458
460 cpu = part_stat_lock(); 459 cpu = part_stat_lock();
461 part_round_stats(cpu, &dm_disk(md)->part0); 460 part_round_stats(cpu, &dm_disk(md)->part0);
462 part_stat_unlock(); 461 part_stat_unlock();
463 dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); 462 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
464} 463}
465 464
466static void end_io_acct(struct dm_io *io) 465static void end_io_acct(struct dm_io *io)
@@ -480,9 +479,8 @@ static void end_io_acct(struct dm_io *io)
480 * After this is decremented the bio must not be touched if it is 479 * After this is decremented the bio must not be touched if it is
481 * a barrier. 480 * a barrier.
482 */ 481 */
483 dm_disk(md)->part0.in_flight[rw] = pending = 482 dm_disk(md)->part0.in_flight = pending =
484 atomic_dec_return(&md->pending[rw]); 483 atomic_dec_return(&md->pending);
485 pending += atomic_read(&md->pending[rw^0x1]);
486 484
487 /* nudge anyone waiting on suspend queue */ 485 /* nudge anyone waiting on suspend queue */
488 if (!pending) 486 if (!pending)
@@ -1787,8 +1785,7 @@ static struct mapped_device *alloc_dev(int minor)
1787 if (!md->disk) 1785 if (!md->disk)
1788 goto bad_disk; 1786 goto bad_disk;
1789 1787
1790 atomic_set(&md->pending[0], 0); 1788 atomic_set(&md->pending, 0);
1791 atomic_set(&md->pending[1], 0);
1792 init_waitqueue_head(&md->wait); 1789 init_waitqueue_head(&md->wait);
1793 INIT_WORK(&md->work, dm_wq_work); 1790 INIT_WORK(&md->work, dm_wq_work);
1794 init_waitqueue_head(&md->eventq); 1791 init_waitqueue_head(&md->eventq);
@@ -2091,8 +2088,7 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2091 break; 2088 break;
2092 } 2089 }
2093 spin_unlock_irqrestore(q->queue_lock, flags); 2090 spin_unlock_irqrestore(q->queue_lock, flags);
2094 } else if (!atomic_read(&md->pending[0]) && 2091 } else if (!atomic_read(&md->pending))
2095 !atomic_read(&md->pending[1]))
2096 break; 2092 break;
2097 2093
2098 if (interruptible == TASK_INTERRUPTIBLE && 2094 if (interruptible == TASK_INTERRUPTIBLE &&
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 3750ff48cba1..c37790ad92d0 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -20,6 +20,7 @@
20 * 20 *
21 */ 21 */
22 22
23#include <linux/sched.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
25#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
@@ -1203,7 +1204,7 @@ static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
1203 return mask; 1204 return mask;
1204} 1205}
1205 1206
1206static struct file_operations dvb_dvr_fops = { 1207static const struct file_operations dvb_dvr_fops = {
1207 .owner = THIS_MODULE, 1208 .owner = THIS_MODULE,
1208 .read = dvb_dvr_read, 1209 .read = dvb_dvr_read,
1209 .write = dvb_dvr_write, 1210 .write = dvb_dvr_write,
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index eef6d3616626..91c537bca8ad 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -21,6 +21,7 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/sched.h>
24#include <linux/spinlock.h> 25#include <linux/spinlock.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c
index eeb80d0ea3ff..853e04b7cb36 100644
--- a/drivers/media/dvb/firewire/firedtv-ci.c
+++ b/drivers/media/dvb/firewire/firedtv-ci.c
@@ -215,7 +215,7 @@ static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait)
215 return POLLIN; 215 return POLLIN;
216} 216}
217 217
218static struct file_operations fdtv_ca_fops = { 218static const struct file_operations fdtv_ca_fops = {
219 .owner = THIS_MODULE, 219 .owner = THIS_MODULE,
220 .ioctl = dvb_generic_ioctl, 220 .ioctl = dvb_generic_ioctl,
221 .open = dvb_generic_open, 221 .open = dvb_generic_open,
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 8b1440136c45..482d0f3be5ff 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -38,6 +38,7 @@
38#include <linux/videodev2.h> /* V4L2 API defs */ 38#include <linux/videodev2.h> /* V4L2 API defs */
39#include <linux/param.h> 39#include <linux/param.h>
40#include <linux/pnp.h> 40#include <linux/pnp.h>
41#include <linux/sched.h>
41#include <linux/io.h> /* outb, outb_p */ 42#include <linux/io.h> /* outb, outb_p */
42#include <media/v4l2-device.h> 43#include <media/v4l2-device.h>
43#include <media/v4l2-ioctl.h> 44#include <media/v4l2-ioctl.h>
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 43ab0adf3b61..2377313c041a 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -31,6 +31,7 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
34#include <linux/sched.h>
34#include <linux/slab.h> 35#include <linux/slab.h>
35#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
36#include <linux/ctype.h> 37#include <linux/ctype.h>
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 5447da16a170..613481028272 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -57,8 +57,6 @@
57 * The AB3100 is usually assigned address 0x48 (7-bit) 57 * The AB3100 is usually assigned address 0x48 (7-bit)
58 * The chip is defined in the platform i2c_board_data section. 58 * The chip is defined in the platform i2c_board_data section.
59 */ 59 */
60static unsigned short normal_i2c[] = { 0x48, I2C_CLIENT_END };
61I2C_CLIENT_INSMOD_1(ab3100);
62 60
63u8 ab3100_get_chip_type(struct ab3100 *ab3100) 61u8 ab3100_get_chip_type(struct ab3100 *ab3100)
64{ 62{
@@ -966,7 +964,7 @@ static int __exit ab3100_remove(struct i2c_client *client)
966} 964}
967 965
968static const struct i2c_device_id ab3100_id[] = { 966static const struct i2c_device_id ab3100_id[] = {
969 { "ab3100", ab3100 }, 967 { "ab3100", 0 },
970 { } 968 { }
971}; 969};
972MODULE_DEVICE_TABLE(i2c, ab3100_id); 970MODULE_DEVICE_TABLE(i2c, ab3100_id);
diff --git a/drivers/mfd/ucb1400_core.c b/drivers/mfd/ucb1400_core.c
index 2afc08006e6d..fa294b6d600a 100644
--- a/drivers/mfd/ucb1400_core.c
+++ b/drivers/mfd/ucb1400_core.c
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/sched.h>
24#include <linux/ucb1400.h> 25#include <linux/ucb1400.h>
25 26
26unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, 27unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel,
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 3c0c58eed347..5a6b2bce8ad5 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -33,12 +33,6 @@
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35 35
36/* Do not scan - the MAX6875 access method will write to some EEPROM chips */
37static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
38
39/* Insmod parameters */
40I2C_CLIENT_INSMOD_1(max6875);
41
42/* The MAX6875 can only read/write 16 bytes at a time */ 36/* The MAX6875 can only read/write 16 bytes at a time */
43#define SLICE_SIZE 16 37#define SLICE_SIZE 16
44#define SLICE_BITS 4 38#define SLICE_BITS 4
@@ -146,31 +140,21 @@ static struct bin_attribute user_eeprom_attr = {
146 .read = max6875_read, 140 .read = max6875_read,
147}; 141};
148 142
149/* Return 0 if detection is successful, -ENODEV otherwise */ 143static int max6875_probe(struct i2c_client *client,
150static int max6875_detect(struct i2c_client *client, int kind, 144 const struct i2c_device_id *id)
151 struct i2c_board_info *info)
152{ 145{
153 struct i2c_adapter *adapter = client->adapter; 146 struct i2c_adapter *adapter = client->adapter;
147 struct max6875_data *data;
148 int err;
154 149
155 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA 150 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA
156 | I2C_FUNC_SMBUS_READ_BYTE)) 151 | I2C_FUNC_SMBUS_READ_BYTE))
157 return -ENODEV; 152 return -ENODEV;
158 153
159 /* Only check even addresses */ 154 /* Only bind to even addresses */
160 if (client->addr & 1) 155 if (client->addr & 1)
161 return -ENODEV; 156 return -ENODEV;
162 157
163 strlcpy(info->type, "max6875", I2C_NAME_SIZE);
164
165 return 0;
166}
167
168static int max6875_probe(struct i2c_client *client,
169 const struct i2c_device_id *id)
170{
171 struct max6875_data *data;
172 int err;
173
174 if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL))) 158 if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL)))
175 return -ENOMEM; 159 return -ENOMEM;
176 160
@@ -222,9 +206,6 @@ static struct i2c_driver max6875_driver = {
222 .probe = max6875_probe, 206 .probe = max6875_probe,
223 .remove = max6875_remove, 207 .remove = max6875_remove,
224 .id_table = max6875_id, 208 .id_table = max6875_id,
225
226 .detect = max6875_detect,
227 .address_data = &addr_data,
228}; 209};
229 210
230static int __init max6875_init(void) 211static int __init max6875_init(void)
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index fa57b67593ae..90a95ce8dc34 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -271,7 +271,7 @@ static unsigned int phantom_poll(struct file *file, poll_table *wait)
271 return mask; 271 return mask;
272} 272}
273 273
274static struct file_operations phantom_file_ops = { 274static const struct file_operations phantom_file_ops = {
275 .open = phantom_open, 275 .open = phantom_open,
276 .release = phantom_release, 276 .release = phantom_release,
277 .unlocked_ioctl = phantom_ioctl, 277 .unlocked_ioctl = phantom_ioctl,
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 300e7ba391a0..41c8fe2a928c 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -53,7 +53,6 @@ struct gru_stats_s gru_stats;
53/* Guaranteed user available resources on each node */ 53/* Guaranteed user available resources on each node */
54static int max_user_cbrs, max_user_dsr_bytes; 54static int max_user_cbrs, max_user_dsr_bytes;
55 55
56static struct file_operations gru_fops;
57static struct miscdevice gru_miscdev; 56static struct miscdevice gru_miscdev;
58 57
59 58
@@ -426,7 +425,7 @@ static void __exit gru_exit(void)
426 gru_proc_exit(); 425 gru_proc_exit();
427} 426}
428 427
429static struct file_operations gru_fops = { 428static const struct file_operations gru_fops = {
430 .owner = THIS_MODULE, 429 .owner = THIS_MODULE,
431 .unlocked_ioctl = gru_file_unlocked_ioctl, 430 .unlocked_ioctl = gru_file_unlocked_ioctl,
432 .mmap = gru_file_mmap, 431 .mmap = gru_file_mmap,
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 610dbd1fcc82..96d10f40fb23 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -240,7 +240,7 @@ static int mmc_ext_csd_release(struct inode *inode, struct file *file)
240 return 0; 240 return 0;
241} 241}
242 242
243static struct file_operations mmc_dbg_ext_csd_fops = { 243static const struct file_operations mmc_dbg_ext_csd_fops = {
244 .open = mmc_ext_csd_open, 244 .open = mmc_ext_csd_open,
245 .read = mmc_ext_csd_read, 245 .read = mmc_ext_csd_read,
246 .release = mmc_ext_csd_release, 246 .release = mmc_ext_csd_release,
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 6636354b48ce..f85dcd536508 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -29,6 +29,8 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
29 unsigned i, nr_strings; 29 unsigned i, nr_strings;
30 char **buffer, *string; 30 char **buffer, *string;
31 31
32 /* Find all null-terminated (including zero length) strings in
33 the TPLLV1_INFO field. Trailing garbage is ignored. */
32 buf += 2; 34 buf += 2;
33 size -= 2; 35 size -= 2;
34 36
@@ -39,11 +41,8 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
39 if (buf[i] == 0) 41 if (buf[i] == 0)
40 nr_strings++; 42 nr_strings++;
41 } 43 }
42 44 if (nr_strings == 0)
43 if (nr_strings < 4) {
44 printk(KERN_WARNING "SDIO: ignoring broken CISTPL_VERS_1\n");
45 return 0; 45 return 0;
46 }
47 46
48 size = i; 47 size = i;
49 48
@@ -98,6 +97,22 @@ static const unsigned char speed_val[16] =
98static const unsigned int speed_unit[8] = 97static const unsigned int speed_unit[8] =
99 { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 }; 98 { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 };
100 99
100/* FUNCE tuples with these types get passed to SDIO drivers */
101static const unsigned char funce_type_whitelist[] = {
102 4 /* CISTPL_FUNCE_LAN_NODE_ID used in Broadcom cards */
103};
104
105static int cistpl_funce_whitelisted(unsigned char type)
106{
107 int i;
108
109 for (i = 0; i < ARRAY_SIZE(funce_type_whitelist); i++) {
110 if (funce_type_whitelist[i] == type)
111 return 1;
112 }
113 return 0;
114}
115
101static int cistpl_funce_common(struct mmc_card *card, 116static int cistpl_funce_common(struct mmc_card *card,
102 const unsigned char *buf, unsigned size) 117 const unsigned char *buf, unsigned size)
103{ 118{
@@ -120,6 +135,10 @@ static int cistpl_funce_func(struct sdio_func *func,
120 unsigned vsn; 135 unsigned vsn;
121 unsigned min_size; 136 unsigned min_size;
122 137
138 /* let SDIO drivers take care of whitelisted FUNCE tuples */
139 if (cistpl_funce_whitelisted(buf[0]))
140 return -EILSEQ;
141
123 vsn = func->card->cccr.sdio_vsn; 142 vsn = func->card->cccr.sdio_vsn;
124 min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42; 143 min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42;
125 144
@@ -154,13 +173,12 @@ static int cistpl_funce(struct mmc_card *card, struct sdio_func *func,
154 else 173 else
155 ret = cistpl_funce_common(card, buf, size); 174 ret = cistpl_funce_common(card, buf, size);
156 175
157 if (ret) { 176 if (ret && ret != -EILSEQ) {
158 printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u " 177 printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u "
159 "type %u\n", mmc_hostname(card->host), size, buf[0]); 178 "type %u\n", mmc_hostname(card->host), size, buf[0]);
160 return ret;
161 } 179 }
162 180
163 return 0; 181 return ret;
164} 182}
165 183
166typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *, 184typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
@@ -253,21 +271,12 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
253 for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++) 271 for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++)
254 if (cis_tpl_list[i].code == tpl_code) 272 if (cis_tpl_list[i].code == tpl_code)
255 break; 273 break;
256 if (i >= ARRAY_SIZE(cis_tpl_list)) { 274 if (i < ARRAY_SIZE(cis_tpl_list)) {
257 /* this tuple is unknown to the core */
258 this->next = NULL;
259 this->code = tpl_code;
260 this->size = tpl_link;
261 *prev = this;
262 prev = &this->next;
263 printk(KERN_DEBUG
264 "%s: queuing CIS tuple 0x%02x length %u\n",
265 mmc_hostname(card->host), tpl_code, tpl_link);
266 } else {
267 const struct cis_tpl *tpl = cis_tpl_list + i; 275 const struct cis_tpl *tpl = cis_tpl_list + i;
268 if (tpl_link < tpl->min_size) { 276 if (tpl_link < tpl->min_size) {
269 printk(KERN_ERR 277 printk(KERN_ERR
270 "%s: bad CIS tuple 0x%02x (length = %u, expected >= %u)\n", 278 "%s: bad CIS tuple 0x%02x"
279 " (length = %u, expected >= %u)\n",
271 mmc_hostname(card->host), 280 mmc_hostname(card->host),
272 tpl_code, tpl_link, tpl->min_size); 281 tpl_code, tpl_link, tpl->min_size);
273 ret = -EINVAL; 282 ret = -EINVAL;
@@ -275,7 +284,30 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
275 ret = tpl->parse(card, func, 284 ret = tpl->parse(card, func,
276 this->data, tpl_link); 285 this->data, tpl_link);
277 } 286 }
278 kfree(this); 287 /*
288 * We don't need the tuple anymore if it was
289 * successfully parsed by the SDIO core or if it is
290 * not going to be parsed by SDIO drivers.
291 */
292 if (!ret || ret != -EILSEQ)
293 kfree(this);
294 } else {
295 /* unknown tuple */
296 ret = -EILSEQ;
297 }
298
299 if (ret == -EILSEQ) {
300 /* this tuple is unknown to the core or whitelisted */
301 this->next = NULL;
302 this->code = tpl_code;
303 this->size = tpl_link;
304 *prev = this;
305 prev = &this->next;
306 printk(KERN_DEBUG
307 "%s: queuing CIS tuple 0x%02x length %u\n",
308 mmc_hostname(card->host), tpl_code, tpl_link);
309 /* keep on analyzing tuples */
310 ret = 0;
279 } 311 }
280 312
281 ptr += tpl_link; 313 ptr += tpl_link;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 7cb057f3f883..432ae8358c86 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -276,6 +276,47 @@ config MMC_S3C
276 276
277 If unsure, say N. 277 If unsure, say N.
278 278
279config MMC_S3C_HW_SDIO_IRQ
280 bool "Hardware support for SDIO IRQ"
281 depends on MMC_S3C
282 help
283 Enable the hardware support for SDIO interrupts instead of using
284 the generic polling code.
285
286choice
287 prompt "Samsung S3C SD/MMC transfer code"
288 depends on MMC_S3C
289
290config MMC_S3C_PIO
291 bool "Use PIO transfers only"
292 help
293 Use PIO to transfer data between memory and the hardware.
294
295 PIO is slower than DMA as it requires CPU instructions to
296 move the data. This has been the traditional default for
297 the S3C MCI driver.
298
299config MMC_S3C_DMA
300 bool "Use DMA transfers only (EXPERIMENTAL)"
301 depends on EXPERIMENTAL
302 help
303 Use DMA to transfer data between memory and the hardare.
304
305 Currently, the DMA support in this driver seems to not be
306 working properly and needs to be debugged before this
307 option is useful.
308
309config MMC_S3C_PIODMA
310 bool "Support for both PIO and DMA (EXPERIMENTAL)"
311 help
312 Compile both the PIO and DMA transfer routines into the
313 driver and let the platform select at run-time which one
314 is best.
315
316 See notes for the DMA option.
317
318endchoice
319
279config MMC_SDRICOH_CS 320config MMC_SDRICOH_CS
280 tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)" 321 tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)"
281 depends on EXPERIMENTAL && PCI && PCMCIA 322 depends on EXPERIMENTAL && PCI && PCMCIA
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 3d1e5329da12..705a5894a6bb 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -678,7 +678,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
678 writel(0, host->base + MMCIMASK1); 678 writel(0, host->base + MMCIMASK1);
679 writel(0xfff, host->base + MMCICLEAR); 679 writel(0xfff, host->base + MMCICLEAR);
680 680
681#ifdef CONFIG_GPIOLIB
682 if (gpio_is_valid(plat->gpio_cd)) { 681 if (gpio_is_valid(plat->gpio_cd)) {
683 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 682 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
684 if (ret == 0) 683 if (ret == 0)
@@ -697,7 +696,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
697 else if (ret != -ENOSYS) 696 else if (ret != -ENOSYS)
698 goto err_gpio_wp; 697 goto err_gpio_wp;
699 } 698 }
700#endif
701 699
702 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 700 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
703 if (ret) 701 if (ret)
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 8c08cd7efa7f..99b74a351020 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -17,6 +17,8 @@
17#include <linux/mmc/host.h> 17#include <linux/mmc/host.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/cpufreq.h> 19#include <linux/cpufreq.h>
20#include <linux/debugfs.h>
21#include <linux/seq_file.h>
20#include <linux/gpio.h> 22#include <linux/gpio.h>
21#include <linux/irq.h> 23#include <linux/irq.h>
22#include <linux/io.h> 24#include <linux/io.h>
@@ -58,8 +60,6 @@ static const int dbgmap_debug = dbg_err | dbg_debug;
58 dev_dbg(&host->pdev->dev, args); \ 60 dev_dbg(&host->pdev->dev, args); \
59 } while (0) 61 } while (0)
60 62
61#define RESSIZE(ressource) (((ressource)->end - (ressource)->start)+1)
62
63static struct s3c2410_dma_client s3cmci_dma_client = { 63static struct s3c2410_dma_client s3cmci_dma_client = {
64 .name = "s3c-mci", 64 .name = "s3c-mci",
65}; 65};
@@ -164,6 +164,40 @@ static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { }
164 164
165#endif /* CONFIG_MMC_DEBUG */ 165#endif /* CONFIG_MMC_DEBUG */
166 166
167/**
168 * s3cmci_host_usedma - return whether the host is using dma or pio
169 * @host: The host state
170 *
171 * Return true if the host is using DMA to transfer data, else false
172 * to use PIO mode. Will return static data depending on the driver
173 * configuration.
174 */
175static inline bool s3cmci_host_usedma(struct s3cmci_host *host)
176{
177#ifdef CONFIG_MMC_S3C_PIO
178 return false;
179#elif defined(CONFIG_MMC_S3C_DMA)
180 return true;
181#else
182 return host->dodma;
183#endif
184}
185
186/**
187 * s3cmci_host_canpio - return true if host has pio code available
188 *
189 * Return true if the driver has been compiled with the PIO support code
190 * available.
191 */
192static inline bool s3cmci_host_canpio(void)
193{
194#ifdef CONFIG_MMC_S3C_PIO
195 return true;
196#else
197 return false;
198#endif
199}
200
167static inline u32 enable_imask(struct s3cmci_host *host, u32 imask) 201static inline u32 enable_imask(struct s3cmci_host *host, u32 imask)
168{ 202{
169 u32 newmask; 203 u32 newmask;
@@ -190,7 +224,33 @@ static inline u32 disable_imask(struct s3cmci_host *host, u32 imask)
190 224
191static inline void clear_imask(struct s3cmci_host *host) 225static inline void clear_imask(struct s3cmci_host *host)
192{ 226{
193 writel(0, host->base + host->sdiimsk); 227 u32 mask = readl(host->base + host->sdiimsk);
228
229 /* preserve the SDIO IRQ mask state */
230 mask &= S3C2410_SDIIMSK_SDIOIRQ;
231 writel(mask, host->base + host->sdiimsk);
232}
233
234/**
235 * s3cmci_check_sdio_irq - test whether the SDIO IRQ is being signalled
236 * @host: The host to check.
237 *
238 * Test to see if the SDIO interrupt is being signalled in case the
239 * controller has failed to re-detect a card interrupt. Read GPE8 and
240 * see if it is low and if so, signal a SDIO interrupt.
241 *
242 * This is currently called if a request is finished (we assume that the
243 * bus is now idle) and when the SDIO IRQ is enabled in case the IRQ is
244 * already being indicated.
245*/
246static void s3cmci_check_sdio_irq(struct s3cmci_host *host)
247{
248 if (host->sdio_irqen) {
249 if (gpio_get_value(S3C2410_GPE(8)) == 0) {
250 printk(KERN_DEBUG "%s: signalling irq\n", __func__);
251 mmc_signal_sdio_irq(host->mmc);
252 }
253 }
194} 254}
195 255
196static inline int get_data_buffer(struct s3cmci_host *host, 256static inline int get_data_buffer(struct s3cmci_host *host,
@@ -238,6 +298,64 @@ static inline u32 fifo_free(struct s3cmci_host *host)
238 return 63 - fifostat; 298 return 63 - fifostat;
239} 299}
240 300
301/**
302 * s3cmci_enable_irq - enable IRQ, after having disabled it.
303 * @host: The device state.
304 * @more: True if more IRQs are expected from transfer.
305 *
306 * Enable the main IRQ if needed after it has been disabled.
307 *
308 * The IRQ can be one of the following states:
309 * - disabled during IDLE
310 * - disabled whilst processing data
311 * - enabled during transfer
312 * - enabled whilst awaiting SDIO interrupt detection
313 */
314static void s3cmci_enable_irq(struct s3cmci_host *host, bool more)
315{
316 unsigned long flags;
317 bool enable = false;
318
319 local_irq_save(flags);
320
321 host->irq_enabled = more;
322 host->irq_disabled = false;
323
324 enable = more | host->sdio_irqen;
325
326 if (host->irq_state != enable) {
327 host->irq_state = enable;
328
329 if (enable)
330 enable_irq(host->irq);
331 else
332 disable_irq(host->irq);
333 }
334
335 local_irq_restore(flags);
336}
337
338/**
339 *
340 */
341static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer)
342{
343 unsigned long flags;
344
345 local_irq_save(flags);
346
347 //printk(KERN_DEBUG "%s: transfer %d\n", __func__, transfer);
348
349 host->irq_disabled = transfer;
350
351 if (transfer && host->irq_state) {
352 host->irq_state = false;
353 disable_irq(host->irq);
354 }
355
356 local_irq_restore(flags);
357}
358
241static void do_pio_read(struct s3cmci_host *host) 359static void do_pio_read(struct s3cmci_host *host)
242{ 360{
243 int res; 361 int res;
@@ -374,8 +492,7 @@ static void pio_tasklet(unsigned long data)
374{ 492{
375 struct s3cmci_host *host = (struct s3cmci_host *) data; 493 struct s3cmci_host *host = (struct s3cmci_host *) data;
376 494
377 495 s3cmci_disable_irq(host, true);
378 disable_irq(host->irq);
379 496
380 if (host->pio_active == XFER_WRITE) 497 if (host->pio_active == XFER_WRITE)
381 do_pio_write(host); 498 do_pio_write(host);
@@ -395,9 +512,10 @@ static void pio_tasklet(unsigned long data)
395 host->mrq->data->error = -EINVAL; 512 host->mrq->data->error = -EINVAL;
396 } 513 }
397 514
515 s3cmci_enable_irq(host, false);
398 finalize_request(host); 516 finalize_request(host);
399 } else 517 } else
400 enable_irq(host->irq); 518 s3cmci_enable_irq(host, true);
401} 519}
402 520
403/* 521/*
@@ -432,17 +550,27 @@ static irqreturn_t s3cmci_irq(int irq, void *dev_id)
432 struct s3cmci_host *host = dev_id; 550 struct s3cmci_host *host = dev_id;
433 struct mmc_command *cmd; 551 struct mmc_command *cmd;
434 u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk; 552 u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk;
435 u32 mci_cclear, mci_dclear; 553 u32 mci_cclear = 0, mci_dclear;
436 unsigned long iflags; 554 unsigned long iflags;
437 555
556 mci_dsta = readl(host->base + S3C2410_SDIDSTA);
557 mci_imsk = readl(host->base + host->sdiimsk);
558
559 if (mci_dsta & S3C2410_SDIDSTA_SDIOIRQDETECT) {
560 if (mci_imsk & S3C2410_SDIIMSK_SDIOIRQ) {
561 mci_dclear = S3C2410_SDIDSTA_SDIOIRQDETECT;
562 writel(mci_dclear, host->base + S3C2410_SDIDSTA);
563
564 mmc_signal_sdio_irq(host->mmc);
565 return IRQ_HANDLED;
566 }
567 }
568
438 spin_lock_irqsave(&host->complete_lock, iflags); 569 spin_lock_irqsave(&host->complete_lock, iflags);
439 570
440 mci_csta = readl(host->base + S3C2410_SDICMDSTAT); 571 mci_csta = readl(host->base + S3C2410_SDICMDSTAT);
441 mci_dsta = readl(host->base + S3C2410_SDIDSTA);
442 mci_dcnt = readl(host->base + S3C2410_SDIDCNT); 572 mci_dcnt = readl(host->base + S3C2410_SDIDCNT);
443 mci_fsta = readl(host->base + S3C2410_SDIFSTA); 573 mci_fsta = readl(host->base + S3C2410_SDIFSTA);
444 mci_imsk = readl(host->base + host->sdiimsk);
445 mci_cclear = 0;
446 mci_dclear = 0; 574 mci_dclear = 0;
447 575
448 if ((host->complete_what == COMPLETION_NONE) || 576 if ((host->complete_what == COMPLETION_NONE) ||
@@ -466,7 +594,7 @@ static irqreturn_t s3cmci_irq(int irq, void *dev_id)
466 goto irq_out; 594 goto irq_out;
467 } 595 }
468 596
469 if (!host->dodma) { 597 if (!s3cmci_host_usedma(host)) {
470 if ((host->pio_active == XFER_WRITE) && 598 if ((host->pio_active == XFER_WRITE) &&
471 (mci_fsta & S3C2410_SDIFSTA_TFDET)) { 599 (mci_fsta & S3C2410_SDIFSTA_TFDET)) {
472 600
@@ -673,6 +801,7 @@ static void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch,
673 dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n", 801 dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n",
674 size, mci_dsta, mci_dcnt); 802 size, mci_dsta, mci_dcnt);
675 803
804 host->dma_complete = 1;
676 host->complete_what = COMPLETION_FINALIZE; 805 host->complete_what = COMPLETION_FINALIZE;
677 806
678out: 807out:
@@ -683,9 +812,9 @@ out:
683fail_request: 812fail_request:
684 host->mrq->data->error = -EINVAL; 813 host->mrq->data->error = -EINVAL;
685 host->complete_what = COMPLETION_FINALIZE; 814 host->complete_what = COMPLETION_FINALIZE;
686 writel(0, host->base + host->sdiimsk); 815 clear_imask(host);
687 goto out;
688 816
817 goto out;
689} 818}
690 819
691static void finalize_request(struct s3cmci_host *host) 820static void finalize_request(struct s3cmci_host *host)
@@ -702,8 +831,9 @@ static void finalize_request(struct s3cmci_host *host)
702 831
703 if (cmd->data && (cmd->error == 0) && 832 if (cmd->data && (cmd->error == 0) &&
704 (cmd->data->error == 0)) { 833 (cmd->data->error == 0)) {
705 if (host->dodma && (!host->dma_complete)) { 834 if (s3cmci_host_usedma(host) && (!host->dma_complete)) {
706 dbg(host, dbg_dma, "DMA Missing!\n"); 835 dbg(host, dbg_dma, "DMA Missing (%d)!\n",
836 host->dma_complete);
707 return; 837 return;
708 } 838 }
709 } 839 }
@@ -728,7 +858,7 @@ static void finalize_request(struct s3cmci_host *host)
728 writel(0, host->base + S3C2410_SDICMDARG); 858 writel(0, host->base + S3C2410_SDICMDARG);
729 writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); 859 writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
730 writel(0, host->base + S3C2410_SDICMDCON); 860 writel(0, host->base + S3C2410_SDICMDCON);
731 writel(0, host->base + host->sdiimsk); 861 clear_imask(host);
732 862
733 if (cmd->data && cmd->error) 863 if (cmd->data && cmd->error)
734 cmd->data->error = cmd->error; 864 cmd->data->error = cmd->error;
@@ -754,7 +884,7 @@ static void finalize_request(struct s3cmci_host *host)
754 /* If we had an error while transfering data we flush the 884 /* If we had an error while transfering data we flush the
755 * DMA channel and the fifo to clear out any garbage. */ 885 * DMA channel and the fifo to clear out any garbage. */
756 if (mrq->data->error != 0) { 886 if (mrq->data->error != 0) {
757 if (host->dodma) 887 if (s3cmci_host_usedma(host))
758 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); 888 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
759 889
760 if (host->is2440) { 890 if (host->is2440) {
@@ -776,6 +906,8 @@ static void finalize_request(struct s3cmci_host *host)
776request_done: 906request_done:
777 host->complete_what = COMPLETION_NONE; 907 host->complete_what = COMPLETION_NONE;
778 host->mrq = NULL; 908 host->mrq = NULL;
909
910 s3cmci_check_sdio_irq(host);
779 mmc_request_done(host->mmc, mrq); 911 mmc_request_done(host->mmc, mrq);
780} 912}
781 913
@@ -872,7 +1004,7 @@ static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
872 1004
873 dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK; 1005 dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK;
874 1006
875 if (host->dodma) 1007 if (s3cmci_host_usedma(host))
876 dcon |= S3C2410_SDIDCON_DMAEN; 1008 dcon |= S3C2410_SDIDCON_DMAEN;
877 1009
878 if (host->bus_width == MMC_BUS_WIDTH_4) 1010 if (host->bus_width == MMC_BUS_WIDTH_4)
@@ -950,7 +1082,7 @@ static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data)
950static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) 1082static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
951{ 1083{
952 int dma_len, i; 1084 int dma_len, i;
953 int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; 1085 int rw = data->flags & MMC_DATA_WRITE;
954 1086
955 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); 1087 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
956 1088
@@ -958,7 +1090,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
958 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); 1090 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
959 1091
960 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 1092 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
961 (rw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1093 rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
962 1094
963 if (dma_len == 0) 1095 if (dma_len == 0)
964 return -ENOMEM; 1096 return -ENOMEM;
@@ -969,11 +1101,11 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
969 for (i = 0; i < dma_len; i++) { 1101 for (i = 0; i < dma_len; i++) {
970 int res; 1102 int res;
971 1103
972 dbg(host, dbg_dma, "enqueue %i:%u@%u\n", i, 1104 dbg(host, dbg_dma, "enqueue %i: %08x@%u\n", i,
973 sg_dma_address(&data->sg[i]), 1105 sg_dma_address(&data->sg[i]),
974 sg_dma_len(&data->sg[i])); 1106 sg_dma_len(&data->sg[i]));
975 1107
976 res = s3c2410_dma_enqueue(host->dma, (void *) host, 1108 res = s3c2410_dma_enqueue(host->dma, host,
977 sg_dma_address(&data->sg[i]), 1109 sg_dma_address(&data->sg[i]),
978 sg_dma_len(&data->sg[i])); 1110 sg_dma_len(&data->sg[i]));
979 1111
@@ -1018,7 +1150,7 @@ static void s3cmci_send_request(struct mmc_host *mmc)
1018 return; 1150 return;
1019 } 1151 }
1020 1152
1021 if (host->dodma) 1153 if (s3cmci_host_usedma(host))
1022 res = s3cmci_prepare_dma(host, cmd->data); 1154 res = s3cmci_prepare_dma(host, cmd->data);
1023 else 1155 else
1024 res = s3cmci_prepare_pio(host, cmd->data); 1156 res = s3cmci_prepare_pio(host, cmd->data);
@@ -1037,7 +1169,7 @@ static void s3cmci_send_request(struct mmc_host *mmc)
1037 s3cmci_send_command(host, cmd); 1169 s3cmci_send_command(host, cmd);
1038 1170
1039 /* Enable Interrupt */ 1171 /* Enable Interrupt */
1040 enable_irq(host->irq); 1172 s3cmci_enable_irq(host, true);
1041} 1173}
1042 1174
1043static int s3cmci_card_present(struct mmc_host *mmc) 1175static int s3cmci_card_present(struct mmc_host *mmc)
@@ -1049,7 +1181,7 @@ static int s3cmci_card_present(struct mmc_host *mmc)
1049 if (pdata->gpio_detect == 0) 1181 if (pdata->gpio_detect == 0)
1050 return -ENOSYS; 1182 return -ENOSYS;
1051 1183
1052 ret = s3c2410_gpio_getpin(pdata->gpio_detect) ? 0 : 1; 1184 ret = gpio_get_value(pdata->gpio_detect) ? 0 : 1;
1053 return ret ^ pdata->detect_invert; 1185 return ret ^ pdata->detect_invert;
1054} 1186}
1055 1187
@@ -1104,12 +1236,12 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1104 switch (ios->power_mode) { 1236 switch (ios->power_mode) {
1105 case MMC_POWER_ON: 1237 case MMC_POWER_ON:
1106 case MMC_POWER_UP: 1238 case MMC_POWER_UP:
1107 s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_SDCLK); 1239 s3c2410_gpio_cfgpin(S3C2410_GPE(5), S3C2410_GPE5_SDCLK);
1108 s3c2410_gpio_cfgpin(S3C2410_GPE6, S3C2410_GPE6_SDCMD); 1240 s3c2410_gpio_cfgpin(S3C2410_GPE(6), S3C2410_GPE6_SDCMD);
1109 s3c2410_gpio_cfgpin(S3C2410_GPE7, S3C2410_GPE7_SDDAT0); 1241 s3c2410_gpio_cfgpin(S3C2410_GPE(7), S3C2410_GPE7_SDDAT0);
1110 s3c2410_gpio_cfgpin(S3C2410_GPE8, S3C2410_GPE8_SDDAT1); 1242 s3c2410_gpio_cfgpin(S3C2410_GPE(8), S3C2410_GPE8_SDDAT1);
1111 s3c2410_gpio_cfgpin(S3C2410_GPE9, S3C2410_GPE9_SDDAT2); 1243 s3c2410_gpio_cfgpin(S3C2410_GPE(9), S3C2410_GPE9_SDDAT2);
1112 s3c2410_gpio_cfgpin(S3C2410_GPE10, S3C2410_GPE10_SDDAT3); 1244 s3c2410_gpio_cfgpin(S3C2410_GPE(10), S3C2410_GPE10_SDDAT3);
1113 1245
1114 if (host->pdata->set_power) 1246 if (host->pdata->set_power)
1115 host->pdata->set_power(ios->power_mode, ios->vdd); 1247 host->pdata->set_power(ios->power_mode, ios->vdd);
@@ -1121,8 +1253,7 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1121 1253
1122 case MMC_POWER_OFF: 1254 case MMC_POWER_OFF:
1123 default: 1255 default:
1124 s3c2410_gpio_setpin(S3C2410_GPE5, 0); 1256 gpio_direction_output(S3C2410_GPE(5), 0);
1125 s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPIO_OUTPUT);
1126 1257
1127 if (host->is2440) 1258 if (host->is2440)
1128 mci_con |= S3C2440_SDICON_SDRESET; 1259 mci_con |= S3C2440_SDICON_SDRESET;
@@ -1168,7 +1299,7 @@ static int s3cmci_get_ro(struct mmc_host *mmc)
1168 struct s3c24xx_mci_pdata *pdata = host->pdata; 1299 struct s3c24xx_mci_pdata *pdata = host->pdata;
1169 int ret; 1300 int ret;
1170 1301
1171 if (pdata->gpio_wprotect == 0) 1302 if (pdata->no_wprotect)
1172 return 0; 1303 return 0;
1173 1304
1174 ret = s3c2410_gpio_getpin(pdata->gpio_wprotect); 1305 ret = s3c2410_gpio_getpin(pdata->gpio_wprotect);
@@ -1179,11 +1310,52 @@ static int s3cmci_get_ro(struct mmc_host *mmc)
1179 return ret; 1310 return ret;
1180} 1311}
1181 1312
1313static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1314{
1315 struct s3cmci_host *host = mmc_priv(mmc);
1316 unsigned long flags;
1317 u32 con;
1318
1319 local_irq_save(flags);
1320
1321 con = readl(host->base + S3C2410_SDICON);
1322 host->sdio_irqen = enable;
1323
1324 if (enable == host->sdio_irqen)
1325 goto same_state;
1326
1327 if (enable) {
1328 con |= S3C2410_SDICON_SDIOIRQ;
1329 enable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
1330
1331 if (!host->irq_state && !host->irq_disabled) {
1332 host->irq_state = true;
1333 enable_irq(host->irq);
1334 }
1335 } else {
1336 disable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
1337 con &= ~S3C2410_SDICON_SDIOIRQ;
1338
1339 if (!host->irq_enabled && host->irq_state) {
1340 disable_irq_nosync(host->irq);
1341 host->irq_state = false;
1342 }
1343 }
1344
1345 writel(con, host->base + S3C2410_SDICON);
1346
1347 same_state:
1348 local_irq_restore(flags);
1349
1350 s3cmci_check_sdio_irq(host);
1351}
1352
1182static struct mmc_host_ops s3cmci_ops = { 1353static struct mmc_host_ops s3cmci_ops = {
1183 .request = s3cmci_request, 1354 .request = s3cmci_request,
1184 .set_ios = s3cmci_set_ios, 1355 .set_ios = s3cmci_set_ios,
1185 .get_ro = s3cmci_get_ro, 1356 .get_ro = s3cmci_get_ro,
1186 .get_cd = s3cmci_card_present, 1357 .get_cd = s3cmci_card_present,
1358 .enable_sdio_irq = s3cmci_enable_sdio_irq,
1187}; 1359};
1188 1360
1189static struct s3c24xx_mci_pdata s3cmci_def_pdata = { 1361static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
@@ -1246,11 +1418,140 @@ static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host)
1246} 1418}
1247#endif 1419#endif
1248 1420
1249static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) 1421
1422#ifdef CONFIG_DEBUG_FS
1423
1424static int s3cmci_state_show(struct seq_file *seq, void *v)
1425{
1426 struct s3cmci_host *host = seq->private;
1427
1428 seq_printf(seq, "Register base = 0x%08x\n", (u32)host->base);
1429 seq_printf(seq, "Clock rate = %ld\n", host->clk_rate);
1430 seq_printf(seq, "Prescale = %d\n", host->prescaler);
1431 seq_printf(seq, "is2440 = %d\n", host->is2440);
1432 seq_printf(seq, "IRQ = %d\n", host->irq);
1433 seq_printf(seq, "IRQ enabled = %d\n", host->irq_enabled);
1434 seq_printf(seq, "IRQ disabled = %d\n", host->irq_disabled);
1435 seq_printf(seq, "IRQ state = %d\n", host->irq_state);
1436 seq_printf(seq, "CD IRQ = %d\n", host->irq_cd);
1437 seq_printf(seq, "Do DMA = %d\n", s3cmci_host_usedma(host));
1438 seq_printf(seq, "SDIIMSK at %d\n", host->sdiimsk);
1439 seq_printf(seq, "SDIDATA at %d\n", host->sdidata);
1440
1441 return 0;
1442}
1443
1444static int s3cmci_state_open(struct inode *inode, struct file *file)
1445{
1446 return single_open(file, s3cmci_state_show, inode->i_private);
1447}
1448
1449static const struct file_operations s3cmci_fops_state = {
1450 .owner = THIS_MODULE,
1451 .open = s3cmci_state_open,
1452 .read = seq_read,
1453 .llseek = seq_lseek,
1454 .release = single_release,
1455};
1456
1457#define DBG_REG(_r) { .addr = S3C2410_SDI##_r, .name = #_r }
1458
1459struct s3cmci_reg {
1460 unsigned short addr;
1461 unsigned char *name;
1462} debug_regs[] = {
1463 DBG_REG(CON),
1464 DBG_REG(PRE),
1465 DBG_REG(CMDARG),
1466 DBG_REG(CMDCON),
1467 DBG_REG(CMDSTAT),
1468 DBG_REG(RSP0),
1469 DBG_REG(RSP1),
1470 DBG_REG(RSP2),
1471 DBG_REG(RSP3),
1472 DBG_REG(TIMER),
1473 DBG_REG(BSIZE),
1474 DBG_REG(DCON),
1475 DBG_REG(DCNT),
1476 DBG_REG(DSTA),
1477 DBG_REG(FSTA),
1478 {}
1479};
1480
1481static int s3cmci_regs_show(struct seq_file *seq, void *v)
1482{
1483 struct s3cmci_host *host = seq->private;
1484 struct s3cmci_reg *rptr = debug_regs;
1485
1486 for (; rptr->name; rptr++)
1487 seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
1488 readl(host->base + rptr->addr));
1489
1490 seq_printf(seq, "SDIIMSK\t=0x%08x\n", readl(host->base + host->sdiimsk));
1491
1492 return 0;
1493}
1494
1495static int s3cmci_regs_open(struct inode *inode, struct file *file)
1496{
1497 return single_open(file, s3cmci_regs_show, inode->i_private);
1498}
1499
1500static const struct file_operations s3cmci_fops_regs = {
1501 .owner = THIS_MODULE,
1502 .open = s3cmci_regs_open,
1503 .read = seq_read,
1504 .llseek = seq_lseek,
1505 .release = single_release,
1506};
1507
1508static void s3cmci_debugfs_attach(struct s3cmci_host *host)
1509{
1510 struct device *dev = &host->pdev->dev;
1511
1512 host->debug_root = debugfs_create_dir(dev_name(dev), NULL);
1513 if (IS_ERR(host->debug_root)) {
1514 dev_err(dev, "failed to create debugfs root\n");
1515 return;
1516 }
1517
1518 host->debug_state = debugfs_create_file("state", 0444,
1519 host->debug_root, host,
1520 &s3cmci_fops_state);
1521
1522 if (IS_ERR(host->debug_state))
1523 dev_err(dev, "failed to create debug state file\n");
1524
1525 host->debug_regs = debugfs_create_file("regs", 0444,
1526 host->debug_root, host,
1527 &s3cmci_fops_regs);
1528
1529 if (IS_ERR(host->debug_regs))
1530 dev_err(dev, "failed to create debug regs file\n");
1531}
1532
1533static void s3cmci_debugfs_remove(struct s3cmci_host *host)
1534{
1535 debugfs_remove(host->debug_regs);
1536 debugfs_remove(host->debug_state);
1537 debugfs_remove(host->debug_root);
1538}
1539
1540#else
1541static inline void s3cmci_debugfs_attach(struct s3cmci_host *host) { }
1542static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { }
1543
1544#endif /* CONFIG_DEBUG_FS */
1545
1546static int __devinit s3cmci_probe(struct platform_device *pdev)
1250{ 1547{
1251 struct s3cmci_host *host; 1548 struct s3cmci_host *host;
1252 struct mmc_host *mmc; 1549 struct mmc_host *mmc;
1253 int ret; 1550 int ret;
1551 int is2440;
1552 int i;
1553
1554 is2440 = platform_get_device_id(pdev)->driver_data;
1254 1555
1255 mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev); 1556 mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
1256 if (!mmc) { 1557 if (!mmc) {
@@ -1258,6 +1559,18 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1258 goto probe_out; 1559 goto probe_out;
1259 } 1560 }
1260 1561
1562 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++) {
1563 ret = gpio_request(i, dev_name(&pdev->dev));
1564 if (ret) {
1565 dev_err(&pdev->dev, "failed to get gpio %d\n", i);
1566
1567 for (i--; i >= S3C2410_GPE(5); i--)
1568 gpio_free(i);
1569
1570 goto probe_free_host;
1571 }
1572 }
1573
1261 host = mmc_priv(mmc); 1574 host = mmc_priv(mmc);
1262 host->mmc = mmc; 1575 host->mmc = mmc;
1263 host->pdev = pdev; 1576 host->pdev = pdev;
@@ -1282,11 +1595,12 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1282 host->clk_div = 2; 1595 host->clk_div = 2;
1283 } 1596 }
1284 1597
1285 host->dodma = 0;
1286 host->complete_what = COMPLETION_NONE; 1598 host->complete_what = COMPLETION_NONE;
1287 host->pio_active = XFER_NONE; 1599 host->pio_active = XFER_NONE;
1288 1600
1289 host->dma = S3CMCI_DMA; 1601#ifdef CONFIG_MMC_S3C_PIODMA
1602 host->dodma = host->pdata->dma;
1603#endif
1290 1604
1291 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1605 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1292 if (!host->mem) { 1606 if (!host->mem) {
@@ -1294,19 +1608,19 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1294 "failed to get io memory region resouce.\n"); 1608 "failed to get io memory region resouce.\n");
1295 1609
1296 ret = -ENOENT; 1610 ret = -ENOENT;
1297 goto probe_free_host; 1611 goto probe_free_gpio;
1298 } 1612 }
1299 1613
1300 host->mem = request_mem_region(host->mem->start, 1614 host->mem = request_mem_region(host->mem->start,
1301 RESSIZE(host->mem), pdev->name); 1615 resource_size(host->mem), pdev->name);
1302 1616
1303 if (!host->mem) { 1617 if (!host->mem) {
1304 dev_err(&pdev->dev, "failed to request io memory region.\n"); 1618 dev_err(&pdev->dev, "failed to request io memory region.\n");
1305 ret = -ENOENT; 1619 ret = -ENOENT;
1306 goto probe_free_host; 1620 goto probe_free_gpio;
1307 } 1621 }
1308 1622
1309 host->base = ioremap(host->mem->start, RESSIZE(host->mem)); 1623 host->base = ioremap(host->mem->start, resource_size(host->mem));
1310 if (!host->base) { 1624 if (!host->base) {
1311 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n"); 1625 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
1312 ret = -EINVAL; 1626 ret = -EINVAL;
@@ -1331,31 +1645,60 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1331 * ensure we don't lock the system with un-serviceable requests. */ 1645 * ensure we don't lock the system with un-serviceable requests. */
1332 1646
1333 disable_irq(host->irq); 1647 disable_irq(host->irq);
1648 host->irq_state = false;
1334 1649
1335 host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect); 1650 if (!host->pdata->no_detect) {
1336 1651 ret = gpio_request(host->pdata->gpio_detect, "s3cmci detect");
1337 if (host->irq_cd >= 0) { 1652 if (ret) {
1338 if (request_irq(host->irq_cd, s3cmci_irq_cd, 1653 dev_err(&pdev->dev, "failed to get detect gpio\n");
1339 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1340 DRIVER_NAME, host)) {
1341 dev_err(&pdev->dev, "can't get card detect irq.\n");
1342 ret = -ENOENT;
1343 goto probe_free_irq; 1654 goto probe_free_irq;
1344 } 1655 }
1345 } else { 1656
1346 dev_warn(&pdev->dev, "host detect has no irq available\n"); 1657 host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect);
1347 s3c2410_gpio_cfgpin(host->pdata->gpio_detect, 1658
1348 S3C2410_GPIO_INPUT); 1659 if (host->irq_cd >= 0) {
1660 if (request_irq(host->irq_cd, s3cmci_irq_cd,
1661 IRQF_TRIGGER_RISING |
1662 IRQF_TRIGGER_FALLING,
1663 DRIVER_NAME, host)) {
1664 dev_err(&pdev->dev,
1665 "can't get card detect irq.\n");
1666 ret = -ENOENT;
1667 goto probe_free_gpio_cd;
1668 }
1669 } else {
1670 dev_warn(&pdev->dev,
1671 "host detect has no irq available\n");
1672 gpio_direction_input(host->pdata->gpio_detect);
1673 }
1674 } else
1675 host->irq_cd = -1;
1676
1677 if (!host->pdata->no_wprotect) {
1678 ret = gpio_request(host->pdata->gpio_wprotect, "s3cmci wp");
1679 if (ret) {
1680 dev_err(&pdev->dev, "failed to get writeprotect\n");
1681 goto probe_free_irq_cd;
1682 }
1683
1684 gpio_direction_input(host->pdata->gpio_wprotect);
1349 } 1685 }
1350 1686
1351 if (host->pdata->gpio_wprotect) 1687 /* depending on the dma state, get a dma channel to use. */
1352 s3c2410_gpio_cfgpin(host->pdata->gpio_wprotect,
1353 S3C2410_GPIO_INPUT);
1354 1688
1355 if (s3c2410_dma_request(S3CMCI_DMA, &s3cmci_dma_client, NULL) < 0) { 1689 if (s3cmci_host_usedma(host)) {
1356 dev_err(&pdev->dev, "unable to get DMA channel.\n"); 1690 host->dma = s3c2410_dma_request(DMACH_SDI, &s3cmci_dma_client,
1357 ret = -EBUSY; 1691 host);
1358 goto probe_free_irq_cd; 1692 if (host->dma < 0) {
1693 dev_err(&pdev->dev, "cannot get DMA channel.\n");
1694 if (!s3cmci_host_canpio()) {
1695 ret = -EBUSY;
1696 goto probe_free_gpio_wp;
1697 } else {
1698 dev_warn(&pdev->dev, "falling back to PIO.\n");
1699 host->dodma = 0;
1700 }
1701 }
1359 } 1702 }
1360 1703
1361 host->clk = clk_get(&pdev->dev, "sdi"); 1704 host->clk = clk_get(&pdev->dev, "sdi");
@@ -1363,7 +1706,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1363 dev_err(&pdev->dev, "failed to find clock source.\n"); 1706 dev_err(&pdev->dev, "failed to find clock source.\n");
1364 ret = PTR_ERR(host->clk); 1707 ret = PTR_ERR(host->clk);
1365 host->clk = NULL; 1708 host->clk = NULL;
1366 goto probe_free_host; 1709 goto probe_free_dma;
1367 } 1710 }
1368 1711
1369 ret = clk_enable(host->clk); 1712 ret = clk_enable(host->clk);
@@ -1376,7 +1719,11 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1376 1719
1377 mmc->ops = &s3cmci_ops; 1720 mmc->ops = &s3cmci_ops;
1378 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1721 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1722#ifdef CONFIG_MMC_S3C_HW_SDIO_IRQ
1723 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1724#else
1379 mmc->caps = MMC_CAP_4_BIT_DATA; 1725 mmc->caps = MMC_CAP_4_BIT_DATA;
1726#endif
1380 mmc->f_min = host->clk_rate / (host->clk_div * 256); 1727 mmc->f_min = host->clk_rate / (host->clk_div * 256);
1381 mmc->f_max = host->clk_rate / host->clk_div; 1728 mmc->f_max = host->clk_rate / host->clk_div;
1382 1729
@@ -1408,8 +1755,12 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1408 goto free_cpufreq; 1755 goto free_cpufreq;
1409 } 1756 }
1410 1757
1758 s3cmci_debugfs_attach(host);
1759
1411 platform_set_drvdata(pdev, mmc); 1760 platform_set_drvdata(pdev, mmc);
1412 dev_info(&pdev->dev, "initialisation done.\n"); 1761 dev_info(&pdev->dev, "%s - using %s, %s SDIO IRQ\n", mmc_hostname(mmc),
1762 s3cmci_host_usedma(host) ? "dma" : "pio",
1763 mmc->caps & MMC_CAP_SDIO_IRQ ? "hw" : "sw");
1413 1764
1414 return 0; 1765 return 0;
1415 1766
@@ -1422,6 +1773,18 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1422 clk_free: 1773 clk_free:
1423 clk_put(host->clk); 1774 clk_put(host->clk);
1424 1775
1776 probe_free_dma:
1777 if (s3cmci_host_usedma(host))
1778 s3c2410_dma_free(host->dma, &s3cmci_dma_client);
1779
1780 probe_free_gpio_wp:
1781 if (!host->pdata->no_wprotect)
1782 gpio_free(host->pdata->gpio_wprotect);
1783
1784 probe_free_gpio_cd:
1785 if (!host->pdata->no_detect)
1786 gpio_free(host->pdata->gpio_detect);
1787
1425 probe_free_irq_cd: 1788 probe_free_irq_cd:
1426 if (host->irq_cd >= 0) 1789 if (host->irq_cd >= 0)
1427 free_irq(host->irq_cd, host); 1790 free_irq(host->irq_cd, host);
@@ -1433,10 +1796,15 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1433 iounmap(host->base); 1796 iounmap(host->base);
1434 1797
1435 probe_free_mem_region: 1798 probe_free_mem_region:
1436 release_mem_region(host->mem->start, RESSIZE(host->mem)); 1799 release_mem_region(host->mem->start, resource_size(host->mem));
1800
1801 probe_free_gpio:
1802 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
1803 gpio_free(i);
1437 1804
1438 probe_free_host: 1805 probe_free_host:
1439 mmc_free_host(mmc); 1806 mmc_free_host(mmc);
1807
1440 probe_out: 1808 probe_out:
1441 return ret; 1809 return ret;
1442} 1810}
@@ -1449,6 +1817,7 @@ static void s3cmci_shutdown(struct platform_device *pdev)
1449 if (host->irq_cd >= 0) 1817 if (host->irq_cd >= 0)
1450 free_irq(host->irq_cd, host); 1818 free_irq(host->irq_cd, host);
1451 1819
1820 s3cmci_debugfs_remove(host);
1452 s3cmci_cpufreq_deregister(host); 1821 s3cmci_cpufreq_deregister(host);
1453 mmc_remove_host(mmc); 1822 mmc_remove_host(mmc);
1454 clk_disable(host->clk); 1823 clk_disable(host->clk);
@@ -1458,104 +1827,102 @@ static int __devexit s3cmci_remove(struct platform_device *pdev)
1458{ 1827{
1459 struct mmc_host *mmc = platform_get_drvdata(pdev); 1828 struct mmc_host *mmc = platform_get_drvdata(pdev);
1460 struct s3cmci_host *host = mmc_priv(mmc); 1829 struct s3cmci_host *host = mmc_priv(mmc);
1830 struct s3c24xx_mci_pdata *pd = host->pdata;
1831 int i;
1461 1832
1462 s3cmci_shutdown(pdev); 1833 s3cmci_shutdown(pdev);
1463 1834
1464 clk_put(host->clk); 1835 clk_put(host->clk);
1465 1836
1466 tasklet_disable(&host->pio_tasklet); 1837 tasklet_disable(&host->pio_tasklet);
1467 s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client); 1838
1839 if (s3cmci_host_usedma(host))
1840 s3c2410_dma_free(host->dma, &s3cmci_dma_client);
1468 1841
1469 free_irq(host->irq, host); 1842 free_irq(host->irq, host);
1470 1843
1844 if (!pd->no_wprotect)
1845 gpio_free(pd->gpio_wprotect);
1846
1847 if (!pd->no_detect)
1848 gpio_free(pd->gpio_detect);
1849
1850 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
1851 gpio_free(i);
1852
1853
1471 iounmap(host->base); 1854 iounmap(host->base);
1472 release_mem_region(host->mem->start, RESSIZE(host->mem)); 1855 release_mem_region(host->mem->start, resource_size(host->mem));
1473 1856
1474 mmc_free_host(mmc); 1857 mmc_free_host(mmc);
1475 return 0; 1858 return 0;
1476} 1859}
1477 1860
1478static int __devinit s3cmci_2410_probe(struct platform_device *dev) 1861static struct platform_device_id s3cmci_driver_ids[] = {
1479{ 1862 {
1480 return s3cmci_probe(dev, 0); 1863 .name = "s3c2410-sdi",
1481} 1864 .driver_data = 0,
1865 }, {
1866 .name = "s3c2412-sdi",
1867 .driver_data = 1,
1868 }, {
1869 .name = "s3c2440-sdi",
1870 .driver_data = 1,
1871 },
1872 { }
1873};
1482 1874
1483static int __devinit s3cmci_2412_probe(struct platform_device *dev) 1875MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
1484{
1485 return s3cmci_probe(dev, 1);
1486}
1487 1876
1488static int __devinit s3cmci_2440_probe(struct platform_device *dev)
1489{
1490 return s3cmci_probe(dev, 1);
1491}
1492 1877
1493#ifdef CONFIG_PM 1878#ifdef CONFIG_PM
1494 1879
1495static int s3cmci_suspend(struct platform_device *dev, pm_message_t state) 1880static int s3cmci_suspend(struct device *dev)
1496{ 1881{
1497 struct mmc_host *mmc = platform_get_drvdata(dev); 1882 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
1883 struct pm_message event = { PM_EVENT_SUSPEND };
1498 1884
1499 return mmc_suspend_host(mmc, state); 1885 return mmc_suspend_host(mmc, event);
1500} 1886}
1501 1887
1502static int s3cmci_resume(struct platform_device *dev) 1888static int s3cmci_resume(struct device *dev)
1503{ 1889{
1504 struct mmc_host *mmc = platform_get_drvdata(dev); 1890 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
1505 1891
1506 return mmc_resume_host(mmc); 1892 return mmc_resume_host(mmc);
1507} 1893}
1508 1894
1509#else /* CONFIG_PM */ 1895static struct dev_pm_ops s3cmci_pm = {
1510#define s3cmci_suspend NULL
1511#define s3cmci_resume NULL
1512#endif /* CONFIG_PM */
1513
1514
1515static struct platform_driver s3cmci_2410_driver = {
1516 .driver.name = "s3c2410-sdi",
1517 .driver.owner = THIS_MODULE,
1518 .probe = s3cmci_2410_probe,
1519 .remove = __devexit_p(s3cmci_remove),
1520 .shutdown = s3cmci_shutdown,
1521 .suspend = s3cmci_suspend, 1896 .suspend = s3cmci_suspend,
1522 .resume = s3cmci_resume, 1897 .resume = s3cmci_resume,
1523}; 1898};
1524 1899
1525static struct platform_driver s3cmci_2412_driver = { 1900#define s3cmci_pm_ops &s3cmci_pm
1526 .driver.name = "s3c2412-sdi", 1901#else /* CONFIG_PM */
1527 .driver.owner = THIS_MODULE, 1902#define s3cmci_pm_ops NULL
1528 .probe = s3cmci_2412_probe, 1903#endif /* CONFIG_PM */
1529 .remove = __devexit_p(s3cmci_remove),
1530 .shutdown = s3cmci_shutdown,
1531 .suspend = s3cmci_suspend,
1532 .resume = s3cmci_resume,
1533};
1534 1904
1535static struct platform_driver s3cmci_2440_driver = { 1905
1536 .driver.name = "s3c2440-sdi", 1906static struct platform_driver s3cmci_driver = {
1537 .driver.owner = THIS_MODULE, 1907 .driver = {
1538 .probe = s3cmci_2440_probe, 1908 .name = "s3c-sdi",
1909 .owner = THIS_MODULE,
1910 .pm = s3cmci_pm_ops,
1911 },
1912 .id_table = s3cmci_driver_ids,
1913 .probe = s3cmci_probe,
1539 .remove = __devexit_p(s3cmci_remove), 1914 .remove = __devexit_p(s3cmci_remove),
1540 .shutdown = s3cmci_shutdown, 1915 .shutdown = s3cmci_shutdown,
1541 .suspend = s3cmci_suspend,
1542 .resume = s3cmci_resume,
1543}; 1916};
1544 1917
1545
1546static int __init s3cmci_init(void) 1918static int __init s3cmci_init(void)
1547{ 1919{
1548 platform_driver_register(&s3cmci_2410_driver); 1920 return platform_driver_register(&s3cmci_driver);
1549 platform_driver_register(&s3cmci_2412_driver);
1550 platform_driver_register(&s3cmci_2440_driver);
1551 return 0;
1552} 1921}
1553 1922
1554static void __exit s3cmci_exit(void) 1923static void __exit s3cmci_exit(void)
1555{ 1924{
1556 platform_driver_unregister(&s3cmci_2410_driver); 1925 platform_driver_unregister(&s3cmci_driver);
1557 platform_driver_unregister(&s3cmci_2412_driver);
1558 platform_driver_unregister(&s3cmci_2440_driver);
1559} 1926}
1560 1927
1561module_init(s3cmci_init); 1928module_init(s3cmci_init);
@@ -1564,6 +1931,3 @@ module_exit(s3cmci_exit);
1564MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver"); 1931MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver");
1565MODULE_LICENSE("GPL v2"); 1932MODULE_LICENSE("GPL v2");
1566MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>"); 1933MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>");
1567MODULE_ALIAS("platform:s3c2410-sdi");
1568MODULE_ALIAS("platform:s3c2412-sdi");
1569MODULE_ALIAS("platform:s3c2440-sdi");
diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h
index ca1ba3d58cfd..c76b53dbeb61 100644
--- a/drivers/mmc/host/s3cmci.h
+++ b/drivers/mmc/host/s3cmci.h
@@ -8,9 +8,6 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11/* FIXME: DMA Resource management ?! */
12#define S3CMCI_DMA 0
13
14enum s3cmci_waitfor { 11enum s3cmci_waitfor {
15 COMPLETION_NONE, 12 COMPLETION_NONE,
16 COMPLETION_FINALIZE, 13 COMPLETION_FINALIZE,
@@ -42,6 +39,11 @@ struct s3cmci_host {
42 int dodma; 39 int dodma;
43 int dmatogo; 40 int dmatogo;
44 41
42 bool irq_disabled;
43 bool irq_enabled;
44 bool irq_state;
45 int sdio_irqen;
46
45 struct mmc_request *mrq; 47 struct mmc_request *mrq;
46 int cmd_is_stop; 48 int cmd_is_stop;
47 49
@@ -68,6 +70,12 @@ struct s3cmci_host {
68 unsigned int ccnt, dcnt; 70 unsigned int ccnt, dcnt;
69 struct tasklet_struct pio_tasklet; 71 struct tasklet_struct pio_tasklet;
70 72
73#ifdef CONFIG_DEBUG_FS
74 struct dentry *debug_root;
75 struct dentry *debug_state;
76 struct dentry *debug_regs;
77#endif
78
71#ifdef CONFIG_CPU_FREQ 79#ifdef CONFIG_CPU_FREQ
72 struct notifier_block freq_transition; 80 struct notifier_block freq_transition;
73#endif 81#endif
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 0acbf4f5be50..8ca17a3e96ea 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -32,14 +32,6 @@ struct mtd_blkcore_priv {
32 spinlock_t queue_lock; 32 spinlock_t queue_lock;
33}; 33};
34 34
35static int blktrans_discard_request(struct request_queue *q,
36 struct request *req)
37{
38 req->cmd_type = REQ_TYPE_LINUX_BLOCK;
39 req->cmd[0] = REQ_LB_OP_DISCARD;
40 return 0;
41}
42
43static int do_blktrans_request(struct mtd_blktrans_ops *tr, 35static int do_blktrans_request(struct mtd_blktrans_ops *tr,
44 struct mtd_blktrans_dev *dev, 36 struct mtd_blktrans_dev *dev,
45 struct request *req) 37 struct request *req)
@@ -52,10 +44,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
52 44
53 buf = req->buffer; 45 buf = req->buffer;
54 46
55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
56 req->cmd[0] == REQ_LB_OP_DISCARD)
57 return tr->discard(dev, block, nsect);
58
59 if (!blk_fs_request(req)) 47 if (!blk_fs_request(req))
60 return -EIO; 48 return -EIO;
61 49
@@ -63,6 +51,9 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
63 get_capacity(req->rq_disk)) 51 get_capacity(req->rq_disk))
64 return -EIO; 52 return -EIO;
65 53
54 if (blk_discard_rq(req))
55 return tr->discard(dev, block, nsect);
56
66 switch(rq_data_dir(req)) { 57 switch(rq_data_dir(req)) {
67 case READ: 58 case READ:
68 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 59 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
@@ -380,8 +371,8 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
380 tr->blkcore_priv->rq->queuedata = tr; 371 tr->blkcore_priv->rq->queuedata = tr;
381 blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); 372 blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
382 if (tr->discard) 373 if (tr->discard)
383 blk_queue_set_discard(tr->blkcore_priv->rq, 374 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
384 blktrans_discard_request); 375 tr->blkcore_priv->rq);
385 376
386 tr->blkshift = ffs(tr->blksize) - 1; 377 tr->blkshift = ffs(tr->blksize) - 1;
387 378
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index b9eeadf01b74..975e25b19ebe 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -805,52 +805,54 @@ static void poll_vortex(struct net_device *dev)
805 805
806#ifdef CONFIG_PM 806#ifdef CONFIG_PM
807 807
808static int vortex_suspend(struct pci_dev *pdev, pm_message_t state) 808static int vortex_suspend(struct device *dev)
809{ 809{
810 struct net_device *dev = pci_get_drvdata(pdev); 810 struct pci_dev *pdev = to_pci_dev(dev);
811 struct net_device *ndev = pci_get_drvdata(pdev);
812
813 if (!ndev || !netif_running(ndev))
814 return 0;
815
816 netif_device_detach(ndev);
817 vortex_down(ndev, 1);
811 818
812 if (dev && netdev_priv(dev)) {
813 if (netif_running(dev)) {
814 netif_device_detach(dev);
815 vortex_down(dev, 1);
816 disable_irq(dev->irq);
817 }
818 pci_save_state(pdev);
819 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
820 pci_disable_device(pdev);
821 pci_set_power_state(pdev, pci_choose_state(pdev, state));
822 }
823 return 0; 819 return 0;
824} 820}
825 821
826static int vortex_resume(struct pci_dev *pdev) 822static int vortex_resume(struct device *dev)
827{ 823{
828 struct net_device *dev = pci_get_drvdata(pdev); 824 struct pci_dev *pdev = to_pci_dev(dev);
829 struct vortex_private *vp = netdev_priv(dev); 825 struct net_device *ndev = pci_get_drvdata(pdev);
830 int err; 826 int err;
831 827
832 if (dev && vp) { 828 if (!ndev || !netif_running(ndev))
833 pci_set_power_state(pdev, PCI_D0); 829 return 0;
834 pci_restore_state(pdev); 830
835 err = pci_enable_device(pdev); 831 err = vortex_up(ndev);
836 if (err) { 832 if (err)
837 pr_warning("%s: Could not enable device\n", 833 return err;
838 dev->name); 834
839 return err; 835 netif_device_attach(ndev);
840 } 836
841 pci_set_master(pdev);
842 if (netif_running(dev)) {
843 err = vortex_up(dev);
844 if (err)
845 return err;
846 enable_irq(dev->irq);
847 netif_device_attach(dev);
848 }
849 }
850 return 0; 837 return 0;
851} 838}
852 839
853#endif /* CONFIG_PM */ 840static struct dev_pm_ops vortex_pm_ops = {
841 .suspend = vortex_suspend,
842 .resume = vortex_resume,
843 .freeze = vortex_suspend,
844 .thaw = vortex_resume,
845 .poweroff = vortex_suspend,
846 .restore = vortex_resume,
847};
848
849#define VORTEX_PM_OPS (&vortex_pm_ops)
850
851#else /* !CONFIG_PM */
852
853#define VORTEX_PM_OPS NULL
854
855#endif /* !CONFIG_PM */
854 856
855#ifdef CONFIG_EISA 857#ifdef CONFIG_EISA
856static struct eisa_device_id vortex_eisa_ids[] = { 858static struct eisa_device_id vortex_eisa_ids[] = {
@@ -3199,10 +3201,7 @@ static struct pci_driver vortex_driver = {
3199 .probe = vortex_init_one, 3201 .probe = vortex_init_one,
3200 .remove = __devexit_p(vortex_remove_one), 3202 .remove = __devexit_p(vortex_remove_one),
3201 .id_table = vortex_pci_tbl, 3203 .id_table = vortex_pci_tbl,
3202#ifdef CONFIG_PM 3204 .driver.pm = VORTEX_PM_OPS,
3203 .suspend = vortex_suspend,
3204 .resume = vortex_resume,
3205#endif
3206}; 3205};
3207 3206
3208 3207
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2bea67c134f0..712776089b46 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1738,6 +1738,13 @@ config KS8851
1738 help 1738 help
1739 SPI driver for Micrel KS8851 SPI attached network chip. 1739 SPI driver for Micrel KS8851 SPI attached network chip.
1740 1740
1741config KS8851_MLL
1742 tristate "Micrel KS8851 MLL"
1743 depends on HAS_IOMEM
1744 help
1745 This platform driver is for Micrel KS8851 Address/data bus
1746 multiplexed network chip.
1747
1741config VIA_RHINE 1748config VIA_RHINE
1742 tristate "VIA Rhine support" 1749 tristate "VIA Rhine support"
1743 depends on NET_PCI && PCI 1750 depends on NET_PCI && PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ae8cd30f13d6..d866b8cf65d1 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_SKY2) += sky2.o
89obj-$(CONFIG_SKFP) += skfp/ 89obj-$(CONFIG_SKFP) += skfp/
90obj-$(CONFIG_KS8842) += ks8842.o 90obj-$(CONFIG_KS8842) += ks8842.o
91obj-$(CONFIG_KS8851) += ks8851.o 91obj-$(CONFIG_KS8851) += ks8851.o
92obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
92obj-$(CONFIG_VIA_RHINE) += via-rhine.o 93obj-$(CONFIG_VIA_RHINE) += via-rhine.o
93obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o 94obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
94obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o 95obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index fdf5937233fc..04f63c77071d 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -721,7 +721,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status)
721 ps->rx_errors++; 721 ps->rx_errors++;
722 if (status & RX_MISSED_FRAME) 722 if (status & RX_MISSED_FRAME)
723 ps->rx_missed_errors++; 723 ps->rx_missed_errors++;
724 if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR)) 724 if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
725 ps->rx_length_errors++; 725 ps->rx_length_errors++;
726 if (status & RX_CRC_ERROR) 726 if (status & RX_CRC_ERROR)
727 ps->rx_crc_errors++; 727 ps->rx_crc_errors++;
@@ -794,8 +794,6 @@ static int au1000_rx(struct net_device *dev)
794 printk("rx len error\n"); 794 printk("rx len error\n");
795 if (status & RX_U_CNTRL_FRAME) 795 if (status & RX_U_CNTRL_FRAME)
796 printk("rx u control frame\n"); 796 printk("rx u control frame\n");
797 if (status & RX_MISSED_FRAME)
798 printk("rx miss\n");
799 } 797 }
800 } 798 }
801 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); 799 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 09d270913c50..ba29dc319b34 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -90,7 +90,7 @@ static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
90 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) 90 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
91 break; 91 break;
92 udelay(1); 92 udelay(1);
93 } while (limit-- >= 0); 93 } while (limit-- > 0);
94 94
95 return (limit < 0) ? 1 : 0; 95 return (limit < 0) ? 1 : 0;
96} 96}
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 684c6fe24c8d..a80da0e14a52 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -258,6 +258,7 @@ struct be_adapter {
258 bool link_up; 258 bool link_up;
259 u32 port_num; 259 u32 port_num;
260 bool promiscuous; 260 bool promiscuous;
261 u32 cap;
261}; 262};
262 263
263extern const struct ethtool_ops be_ethtool_ops; 264extern const struct ethtool_ops be_ethtool_ops;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 3dd76c4170bf..89876ade5e33 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1068,7 +1068,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1068} 1068}
1069 1069
1070/* Uses mbox */ 1070/* Uses mbox */
1071int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num) 1071int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
1072{ 1072{
1073 struct be_mcc_wrb *wrb; 1073 struct be_mcc_wrb *wrb;
1074 struct be_cmd_req_query_fw_cfg *req; 1074 struct be_cmd_req_query_fw_cfg *req;
@@ -1088,6 +1088,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
1088 if (!status) { 1088 if (!status) {
1089 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 1089 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1090 *port_num = le32_to_cpu(resp->phys_port); 1090 *port_num = le32_to_cpu(resp->phys_port);
1091 *cap = le32_to_cpu(resp->function_cap);
1091 } 1092 }
1092 1093
1093 spin_unlock(&adapter->mbox_lock); 1094 spin_unlock(&adapter->mbox_lock);
@@ -1128,7 +1129,6 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1128 spin_lock_bh(&adapter->mcc_lock); 1129 spin_lock_bh(&adapter->mcc_lock);
1129 1130
1130 wrb = wrb_from_mccq(adapter); 1131 wrb = wrb_from_mccq(adapter);
1131 req = embedded_payload(wrb);
1132 sge = nonembedded_sgl(wrb); 1132 sge = nonembedded_sgl(wrb);
1133 1133
1134 be_wrb_hdr_prepare(wrb, cmd->size, false, 1); 1134 be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 93e432f3d926..a86f917f85f4 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -62,7 +62,7 @@ enum {
62 MCC_STATUS_QUEUE_FLUSHING = 0x4, 62 MCC_STATUS_QUEUE_FLUSHING = 0x4,
63/* The command is completing with a DMA error */ 63/* The command is completing with a DMA error */
64 MCC_STATUS_DMA_FAILED = 0x5, 64 MCC_STATUS_DMA_FAILED = 0x5,
65 MCC_STATUS_NOT_SUPPORTED = 0x66 65 MCC_STATUS_NOT_SUPPORTED = 66
66}; 66};
67 67
68#define CQE_STATUS_COMPL_MASK 0xFFFF 68#define CQE_STATUS_COMPL_MASK 0xFFFF
@@ -760,7 +760,8 @@ extern int be_cmd_set_flow_control(struct be_adapter *adapter,
760 u32 tx_fc, u32 rx_fc); 760 u32 tx_fc, u32 rx_fc);
761extern int be_cmd_get_flow_control(struct be_adapter *adapter, 761extern int be_cmd_get_flow_control(struct be_adapter *adapter,
762 u32 *tx_fc, u32 *rx_fc); 762 u32 *tx_fc, u32 *rx_fc);
763extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num); 763extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
764 u32 *port_num, u32 *cap);
764extern int be_cmd_reset_function(struct be_adapter *adapter); 765extern int be_cmd_reset_function(struct be_adapter *adapter);
765extern int be_process_mcc(struct be_adapter *adapter); 766extern int be_process_mcc(struct be_adapter *adapter);
766extern int be_cmd_write_flashrom(struct be_adapter *adapter, 767extern int be_cmd_write_flashrom(struct be_adapter *adapter,
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 11445df3dbc0..cda5bf2fc50a 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -358,7 +358,7 @@ const struct ethtool_ops be_ethtool_ops = {
358 .get_rx_csum = be_get_rx_csum, 358 .get_rx_csum = be_get_rx_csum,
359 .set_rx_csum = be_set_rx_csum, 359 .set_rx_csum = be_set_rx_csum,
360 .get_tx_csum = ethtool_op_get_tx_csum, 360 .get_tx_csum = ethtool_op_get_tx_csum,
361 .set_tx_csum = ethtool_op_set_tx_csum, 361 .set_tx_csum = ethtool_op_set_tx_hw_csum,
362 .get_sg = ethtool_op_get_sg, 362 .get_sg = ethtool_op_get_sg,
363 .set_sg = ethtool_op_set_sg, 363 .set_sg = ethtool_op_set_sg,
364 .get_tso = ethtool_op_get_tso, 364 .get_tso = ethtool_op_get_tso,
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 409cf0595903..6d5e81f7046f 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -197,7 +197,7 @@ void netdev_stats_update(struct be_adapter *adapter)
197 /* no space available in linux */ 197 /* no space available in linux */
198 dev_stats->tx_dropped = 0; 198 dev_stats->tx_dropped = 0;
199 199
200 dev_stats->multicast = port_stats->tx_multicastframes; 200 dev_stats->multicast = port_stats->rx_multicast_frames;
201 dev_stats->collisions = 0; 201 dev_stats->collisions = 0;
202 202
203 /* detailed tx_errors */ 203 /* detailed tx_errors */
@@ -747,9 +747,16 @@ static void be_rx_compl_process(struct be_adapter *adapter,
747 struct be_eth_rx_compl *rxcp) 747 struct be_eth_rx_compl *rxcp)
748{ 748{
749 struct sk_buff *skb; 749 struct sk_buff *skb;
750 u32 vtp, vid; 750 u32 vlanf, vid;
751 u8 vtm;
751 752
752 vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 753 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
754 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
755
756 /* vlanf could be wrongly set in some cards.
757 * ignore if vtm is not set */
758 if ((adapter->cap == 0x400) && !vtm)
759 vlanf = 0;
753 760
754 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); 761 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
755 if (!skb) { 762 if (!skb) {
@@ -772,7 +779,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
772 skb->protocol = eth_type_trans(skb, adapter->netdev); 779 skb->protocol = eth_type_trans(skb, adapter->netdev);
773 skb->dev = adapter->netdev; 780 skb->dev = adapter->netdev;
774 781
775 if (vtp) { 782 if (vlanf) {
776 if (!adapter->vlan_grp || adapter->num_vlans == 0) { 783 if (!adapter->vlan_grp || adapter->num_vlans == 0) {
777 kfree_skb(skb); 784 kfree_skb(skb);
778 return; 785 return;
@@ -797,11 +804,18 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
797 struct be_eq_obj *eq_obj = &adapter->rx_eq; 804 struct be_eq_obj *eq_obj = &adapter->rx_eq;
798 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; 805 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
799 u16 i, rxq_idx = 0, vid, j; 806 u16 i, rxq_idx = 0, vid, j;
807 u8 vtm;
800 808
801 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 809 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
802 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 810 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
803 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 811 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
804 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 812 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
813 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
814
815 /* vlanf could be wrongly set in some cards.
816 * ignore if vtm is not set */
817 if ((adapter->cap == 0x400) && !vtm)
818 vlanf = 0;
805 819
806 skb = napi_get_frags(&eq_obj->napi); 820 skb = napi_get_frags(&eq_obj->napi);
807 if (!skb) { 821 if (!skb) {
@@ -1885,8 +1899,8 @@ static void be_netdev_init(struct net_device *netdev)
1885 struct be_adapter *adapter = netdev_priv(netdev); 1899 struct be_adapter *adapter = netdev_priv(netdev);
1886 1900
1887 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | 1901 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
1888 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | 1902 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
1889 NETIF_F_IPV6_CSUM | NETIF_F_GRO; 1903 NETIF_F_GRO;
1890 1904
1891 netdev->flags |= IFF_MULTICAST; 1905 netdev->flags |= IFF_MULTICAST;
1892 1906
@@ -2045,7 +2059,8 @@ static int be_hw_up(struct be_adapter *adapter)
2045 if (status) 2059 if (status)
2046 return status; 2060 return status;
2047 2061
2048 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num); 2062 status = be_cmd_query_fw_cfg(adapter,
2063 &adapter->port_num, &adapter->cap);
2049 return status; 2064 return status;
2050} 2065}
2051 2066
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 6044e12ff9fc..ff449de6f3c0 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1182,6 +1182,7 @@ static ssize_t bonding_store_primary(struct device *d,
1182 ": %s: Setting %s as primary slave.\n", 1182 ": %s: Setting %s as primary slave.\n",
1183 bond->dev->name, slave->dev->name); 1183 bond->dev->name, slave->dev->name);
1184 bond->primary_slave = slave; 1184 bond->primary_slave = slave;
1185 strcpy(bond->params.primary, slave->dev->name);
1185 bond_select_active_slave(bond); 1186 bond_select_active_slave(bond);
1186 goto out; 1187 goto out;
1187 } 1188 }
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 211c8e9182fc..46c87ec7960c 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2733,7 +2733,8 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
2733 cnic_ulp_init(dev); 2733 cnic_ulp_init(dev);
2734 else if (event == NETDEV_UNREGISTER) 2734 else if (event == NETDEV_UNREGISTER)
2735 cnic_ulp_exit(dev); 2735 cnic_ulp_exit(dev);
2736 else if (event == NETDEV_UP) { 2736
2737 if (event == NETDEV_UP) {
2737 if (cnic_register_netdev(dev) != 0) { 2738 if (cnic_register_netdev(dev) != 0) {
2738 cnic_put(dev); 2739 cnic_put(dev);
2739 goto done; 2740 goto done;
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index a49235739eef..d8b09efdcb52 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.0.0" 15#define CNIC_MODULE_VERSION "2.0.1"
16#define CNIC_MODULE_RELDATE "May 21, 2009" 16#define CNIC_MODULE_RELDATE "Oct 01, 2009"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index b53b40ba88a8..d1e0563a67df 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1803,7 +1803,7 @@ struct e1000_info e1000_82574_info = {
1803 | FLAG_HAS_AMT 1803 | FLAG_HAS_AMT
1804 | FLAG_HAS_CTRLEXT_ON_LOAD, 1804 | FLAG_HAS_CTRLEXT_ON_LOAD,
1805 .pba = 20, 1805 .pba = 20,
1806 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, 1806 .max_hw_frame_size = DEFAULT_JUMBO,
1807 .get_variants = e1000_get_variants_82571, 1807 .get_variants = e1000_get_variants_82571,
1808 .mac_ops = &e82571_mac_ops, 1808 .mac_ops = &e82571_mac_ops,
1809 .phy_ops = &e82_phy_ops_bm, 1809 .phy_ops = &e82_phy_ops_bm,
@@ -1820,7 +1820,7 @@ struct e1000_info e1000_82583_info = {
1820 | FLAG_HAS_AMT 1820 | FLAG_HAS_AMT
1821 | FLAG_HAS_CTRLEXT_ON_LOAD, 1821 | FLAG_HAS_CTRLEXT_ON_LOAD,
1822 .pba = 20, 1822 .pba = 20,
1823 .max_hw_frame_size = DEFAULT_JUMBO, 1823 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
1824 .get_variants = e1000_get_variants_82571, 1824 .get_variants = e1000_get_variants_82571,
1825 .mac_ops = &e82571_mac_ops, 1825 .mac_ops = &e82571_mac_ops,
1826 .phy_ops = &e82_phy_ops_bm, 1826 .phy_ops = &e82_phy_ops_bm,
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 16c193a6c95c..0687c6aa4e46 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4982,12 +4982,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4982 goto err_pci_reg; 4982 goto err_pci_reg;
4983 4983
4984 /* AER (Advanced Error Reporting) hooks */ 4984 /* AER (Advanced Error Reporting) hooks */
4985 err = pci_enable_pcie_error_reporting(pdev); 4985 pci_enable_pcie_error_reporting(pdev);
4986 if (err) {
4987 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
4988 "0x%x\n", err);
4989 /* non-fatal, continue */
4990 }
4991 4986
4992 pci_set_master(pdev); 4987 pci_set_master(pdev);
4993 /* PCI config space info */ 4988 /* PCI config space info */
@@ -5263,7 +5258,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5263{ 5258{
5264 struct net_device *netdev = pci_get_drvdata(pdev); 5259 struct net_device *netdev = pci_get_drvdata(pdev);
5265 struct e1000_adapter *adapter = netdev_priv(netdev); 5260 struct e1000_adapter *adapter = netdev_priv(netdev);
5266 int err;
5267 5261
5268 /* 5262 /*
5269 * flush_scheduled work may reschedule our watchdog task, so 5263 * flush_scheduled work may reschedule our watchdog task, so
@@ -5299,10 +5293,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5299 free_netdev(netdev); 5293 free_netdev(netdev);
5300 5294
5301 /* AER disable */ 5295 /* AER disable */
5302 err = pci_disable_pcie_error_reporting(pdev); 5296 pci_disable_pcie_error_reporting(pdev);
5303 if (err)
5304 dev_err(&pdev->dev,
5305 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
5306 5297
5307 pci_disable_device(pdev); 5298 pci_disable_device(pdev);
5308} 5299}
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index b7311bc00258..34d0c69e67f7 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -19,6 +19,10 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <net/ethoc.h> 20#include <net/ethoc.h>
21 21
22static int buffer_size = 0x8000; /* 32 KBytes */
23module_param(buffer_size, int, 0);
24MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
25
22/* register offsets */ 26/* register offsets */
23#define MODER 0x00 27#define MODER 0x00
24#define INT_SOURCE 0x04 28#define INT_SOURCE 0x04
@@ -167,6 +171,7 @@
167 * struct ethoc - driver-private device structure 171 * struct ethoc - driver-private device structure
168 * @iobase: pointer to I/O memory region 172 * @iobase: pointer to I/O memory region
169 * @membase: pointer to buffer memory region 173 * @membase: pointer to buffer memory region
174 * @dma_alloc: dma allocated buffer size
170 * @num_tx: number of send buffers 175 * @num_tx: number of send buffers
171 * @cur_tx: last send buffer written 176 * @cur_tx: last send buffer written
172 * @dty_tx: last buffer actually sent 177 * @dty_tx: last buffer actually sent
@@ -185,6 +190,7 @@
185struct ethoc { 190struct ethoc {
186 void __iomem *iobase; 191 void __iomem *iobase;
187 void __iomem *membase; 192 void __iomem *membase;
193 int dma_alloc;
188 194
189 unsigned int num_tx; 195 unsigned int num_tx;
190 unsigned int cur_tx; 196 unsigned int cur_tx;
@@ -284,7 +290,7 @@ static int ethoc_init_ring(struct ethoc *dev)
284 dev->cur_rx = 0; 290 dev->cur_rx = 0;
285 291
286 /* setup transmission buffers */ 292 /* setup transmission buffers */
287 bd.addr = 0; 293 bd.addr = virt_to_phys(dev->membase);
288 bd.stat = TX_BD_IRQ | TX_BD_CRC; 294 bd.stat = TX_BD_IRQ | TX_BD_CRC;
289 295
290 for (i = 0; i < dev->num_tx; i++) { 296 for (i = 0; i < dev->num_tx; i++) {
@@ -295,7 +301,6 @@ static int ethoc_init_ring(struct ethoc *dev)
295 bd.addr += ETHOC_BUFSIZ; 301 bd.addr += ETHOC_BUFSIZ;
296 } 302 }
297 303
298 bd.addr = dev->num_tx * ETHOC_BUFSIZ;
299 bd.stat = RX_BD_EMPTY | RX_BD_IRQ; 304 bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
300 305
301 for (i = 0; i < dev->num_rx; i++) { 306 for (i = 0; i < dev->num_rx; i++) {
@@ -400,8 +405,12 @@ static int ethoc_rx(struct net_device *dev, int limit)
400 if (ethoc_update_rx_stats(priv, &bd) == 0) { 405 if (ethoc_update_rx_stats(priv, &bd) == 0) {
401 int size = bd.stat >> 16; 406 int size = bd.stat >> 16;
402 struct sk_buff *skb = netdev_alloc_skb(dev, size); 407 struct sk_buff *skb = netdev_alloc_skb(dev, size);
408
409 size -= 4; /* strip the CRC */
410 skb_reserve(skb, 2); /* align TCP/IP header */
411
403 if (likely(skb)) { 412 if (likely(skb)) {
404 void *src = priv->membase + bd.addr; 413 void *src = phys_to_virt(bd.addr);
405 memcpy_fromio(skb_put(skb, size), src, size); 414 memcpy_fromio(skb_put(skb, size), src, size);
406 skb->protocol = eth_type_trans(skb, dev); 415 skb->protocol = eth_type_trans(skb, dev);
407 priv->stats.rx_packets++; 416 priv->stats.rx_packets++;
@@ -653,9 +662,9 @@ static int ethoc_open(struct net_device *dev)
653 if (ret) 662 if (ret)
654 return ret; 663 return ret;
655 664
656 /* calculate the number of TX/RX buffers */ 665 /* calculate the number of TX/RX buffers, maximum 128 supported */
657 num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ; 666 num_bd = min(128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
658 priv->num_tx = min(min_tx, num_bd / 4); 667 priv->num_tx = max(min_tx, num_bd / 4);
659 priv->num_rx = num_bd - priv->num_tx; 668 priv->num_rx = num_bd - priv->num_tx;
660 ethoc_write(priv, TX_BD_NUM, priv->num_tx); 669 ethoc_write(priv, TX_BD_NUM, priv->num_tx);
661 670
@@ -823,7 +832,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
823 else 832 else
824 bd.stat &= ~TX_BD_PAD; 833 bd.stat &= ~TX_BD_PAD;
825 834
826 dest = priv->membase + bd.addr; 835 dest = phys_to_virt(bd.addr);
827 memcpy_toio(dest, skb->data, skb->len); 836 memcpy_toio(dest, skb->data, skb->len);
828 837
829 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); 838 bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
@@ -903,22 +912,19 @@ static int ethoc_probe(struct platform_device *pdev)
903 912
904 /* obtain buffer memory space */ 913 /* obtain buffer memory space */
905 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 914 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
906 if (!res) { 915 if (res) {
907 dev_err(&pdev->dev, "cannot obtain memory space\n"); 916 mem = devm_request_mem_region(&pdev->dev, res->start,
908 ret = -ENXIO;
909 goto free;
910 }
911
912 mem = devm_request_mem_region(&pdev->dev, res->start,
913 res->end - res->start + 1, res->name); 917 res->end - res->start + 1, res->name);
914 if (!mem) { 918 if (!mem) {
915 dev_err(&pdev->dev, "cannot request memory space\n"); 919 dev_err(&pdev->dev, "cannot request memory space\n");
916 ret = -ENXIO; 920 ret = -ENXIO;
917 goto free; 921 goto free;
922 }
923
924 netdev->mem_start = mem->start;
925 netdev->mem_end = mem->end;
918 } 926 }
919 927
920 netdev->mem_start = mem->start;
921 netdev->mem_end = mem->end;
922 928
923 /* obtain device IRQ number */ 929 /* obtain device IRQ number */
924 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 930 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -933,6 +939,7 @@ static int ethoc_probe(struct platform_device *pdev)
933 /* setup driver-private data */ 939 /* setup driver-private data */
934 priv = netdev_priv(netdev); 940 priv = netdev_priv(netdev);
935 priv->netdev = netdev; 941 priv->netdev = netdev;
942 priv->dma_alloc = 0;
936 943
937 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, 944 priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
938 mmio->end - mmio->start + 1); 945 mmio->end - mmio->start + 1);
@@ -942,12 +949,27 @@ static int ethoc_probe(struct platform_device *pdev)
942 goto error; 949 goto error;
943 } 950 }
944 951
945 priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start, 952 if (netdev->mem_end) {
946 mem->end - mem->start + 1); 953 priv->membase = devm_ioremap_nocache(&pdev->dev,
947 if (!priv->membase) { 954 netdev->mem_start, mem->end - mem->start + 1);
948 dev_err(&pdev->dev, "cannot remap memory space\n"); 955 if (!priv->membase) {
949 ret = -ENXIO; 956 dev_err(&pdev->dev, "cannot remap memory space\n");
950 goto error; 957 ret = -ENXIO;
958 goto error;
959 }
960 } else {
961 /* Allocate buffer memory */
962 priv->membase = dma_alloc_coherent(NULL,
963 buffer_size, (void *)&netdev->mem_start,
964 GFP_KERNEL);
965 if (!priv->membase) {
966 dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
967 buffer_size);
968 ret = -ENOMEM;
969 goto error;
970 }
971 netdev->mem_end = netdev->mem_start + buffer_size;
972 priv->dma_alloc = buffer_size;
951 } 973 }
952 974
953 /* Allow the platform setup code to pass in a MAC address. */ 975 /* Allow the platform setup code to pass in a MAC address. */
@@ -1034,6 +1056,9 @@ free_mdio:
1034 kfree(priv->mdio->irq); 1056 kfree(priv->mdio->irq);
1035 mdiobus_free(priv->mdio); 1057 mdiobus_free(priv->mdio);
1036free: 1058free:
1059 if (priv->dma_alloc)
1060 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1061 netdev->mem_start);
1037 free_netdev(netdev); 1062 free_netdev(netdev);
1038out: 1063out:
1039 return ret; 1064 return ret;
@@ -1059,7 +1084,9 @@ static int ethoc_remove(struct platform_device *pdev)
1059 kfree(priv->mdio->irq); 1084 kfree(priv->mdio->irq);
1060 mdiobus_free(priv->mdio); 1085 mdiobus_free(priv->mdio);
1061 } 1086 }
1062 1087 if (priv->dma_alloc)
1088 dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
1089 netdev->mem_start);
1063 unregister_netdev(netdev); 1090 unregister_netdev(netdev);
1064 free_netdev(netdev); 1091 free_netdev(netdev);
1065 } 1092 }
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 33b55f729742..db4b7f1603f6 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -258,7 +258,7 @@ static void ax_bump(struct mkiss *ax)
258 } 258 }
259 if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) { 259 if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) {
260 printk(KERN_INFO 260 printk(KERN_INFO
261 "mkiss: %s: Switchting to crc-smack\n", 261 "mkiss: %s: Switching to crc-smack\n",
262 ax->dev->name); 262 ax->dev->name);
263 ax->crcmode = CRC_MODE_SMACK; 263 ax->crcmode = CRC_MODE_SMACK;
264 } 264 }
@@ -272,7 +272,7 @@ static void ax_bump(struct mkiss *ax)
272 } 272 }
273 if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) { 273 if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) {
274 printk(KERN_INFO 274 printk(KERN_INFO
275 "mkiss: %s: Switchting to crc-flexnet\n", 275 "mkiss: %s: Switching to crc-flexnet\n",
276 ax->dev->name); 276 ax->dev->name);
277 ax->crcmode = CRC_MODE_FLEX; 277 ax->crcmode = CRC_MODE_FLEX;
278 } 278 }
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 5d6c1530a8c0..714c3a4a44ef 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1246,12 +1246,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1246 if (err) 1246 if (err)
1247 goto err_pci_reg; 1247 goto err_pci_reg;
1248 1248
1249 err = pci_enable_pcie_error_reporting(pdev); 1249 pci_enable_pcie_error_reporting(pdev);
1250 if (err) {
1251 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
1252 "0x%x\n", err);
1253 /* non-fatal, continue */
1254 }
1255 1250
1256 pci_set_master(pdev); 1251 pci_set_master(pdev);
1257 pci_save_state(pdev); 1252 pci_save_state(pdev);
@@ -1628,7 +1623,6 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1628 struct net_device *netdev = pci_get_drvdata(pdev); 1623 struct net_device *netdev = pci_get_drvdata(pdev);
1629 struct igb_adapter *adapter = netdev_priv(netdev); 1624 struct igb_adapter *adapter = netdev_priv(netdev);
1630 struct e1000_hw *hw = &adapter->hw; 1625 struct e1000_hw *hw = &adapter->hw;
1631 int err;
1632 1626
1633 /* flush_scheduled work may reschedule our watchdog task, so 1627 /* flush_scheduled work may reschedule our watchdog task, so
1634 * explicitly disable watchdog tasks from being rescheduled */ 1628 * explicitly disable watchdog tasks from being rescheduled */
@@ -1682,10 +1676,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1682 1676
1683 free_netdev(netdev); 1677 free_netdev(netdev);
1684 1678
1685 err = pci_disable_pcie_error_reporting(pdev); 1679 pci_disable_pcie_error_reporting(pdev);
1686 if (err)
1687 dev_err(&pdev->dev,
1688 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
1689 1680
1690 pci_disable_device(pdev); 1681 pci_disable_device(pdev);
1691} 1682}
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index e36e951cbc65..aa7286bc4364 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -495,7 +495,7 @@ static void veth_take_cap_ack(struct veth_lpar_connection *cnx,
495 cnx->remote_lp); 495 cnx->remote_lp);
496 } else { 496 } else {
497 memcpy(&cnx->cap_ack_event, event, 497 memcpy(&cnx->cap_ack_event, event,
498 sizeof(&cnx->cap_ack_event)); 498 sizeof(cnx->cap_ack_event));
499 cnx->state |= VETH_STATE_GOTCAPACK; 499 cnx->state |= VETH_STATE_GOTCAPACK;
500 veth_kick_statemachine(cnx); 500 veth_kick_statemachine(cnx);
501 } 501 }
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 56b12f3192f1..e2d5343f1275 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -425,7 +425,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
425#endif /* CONFIG_DCB */ 425#endif /* CONFIG_DCB */
426 default: 426 default:
427 hw_dbg(hw, "Flow control param set incorrectly\n"); 427 hw_dbg(hw, "Flow control param set incorrectly\n");
428 ret_val = -IXGBE_ERR_CONFIG; 428 ret_val = IXGBE_ERR_CONFIG;
429 goto out; 429 goto out;
430 break; 430 break;
431 } 431 }
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 2ec58dcdb82b..34b04924c8a1 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -330,6 +330,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
330 330
331 switch (hw->device_id) { 331 switch (hw->device_id) {
332 case IXGBE_DEV_ID_82599_KX4: 332 case IXGBE_DEV_ID_82599_KX4:
333 case IXGBE_DEV_ID_82599_KX4_MEZZ:
334 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
333 case IXGBE_DEV_ID_82599_XAUI_LOM: 335 case IXGBE_DEV_ID_82599_XAUI_LOM:
334 /* Default device ID is mezzanine card KX/KX4 */ 336 /* Default device ID is mezzanine card KX/KX4 */
335 media_type = ixgbe_media_type_backplane; 337 media_type = ixgbe_media_type_backplane;
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 6621e172df3d..40ff120a9ad4 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1355,9 +1355,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1355/** 1355/**
1356 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 1356 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1357 * @hw: pointer to hardware structure 1357 * @hw: pointer to hardware structure
1358 * @addr_list: the list of new addresses 1358 * @uc_list: the list of new addresses
1359 * @addr_count: number of addresses
1360 * @next: iterator function to walk the address list
1361 * 1359 *
1362 * The given list replaces any existing list. Clears the secondary addrs from 1360 * The given list replaces any existing list. Clears the secondary addrs from
1363 * receive address registers. Uses unused receive address registers for the 1361 * receive address registers. Uses unused receive address registers for the
@@ -1663,7 +1661,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1663#endif /* CONFIG_DCB */ 1661#endif /* CONFIG_DCB */
1664 default: 1662 default:
1665 hw_dbg(hw, "Flow control param set incorrectly\n"); 1663 hw_dbg(hw, "Flow control param set incorrectly\n");
1666 ret_val = -IXGBE_ERR_CONFIG; 1664 ret_val = IXGBE_ERR_CONFIG;
1667 goto out; 1665 goto out;
1668 break; 1666 break;
1669 } 1667 }
@@ -1734,75 +1732,140 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1734 s32 ret_val = 0; 1732 s32 ret_val = 0;
1735 ixgbe_link_speed speed; 1733 ixgbe_link_speed speed;
1736 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 1734 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
1735 u32 links2, anlp1_reg, autoc_reg, links;
1737 bool link_up; 1736 bool link_up;
1738 1737
1739 /* 1738 /*
1740 * AN should have completed when the cable was plugged in. 1739 * AN should have completed when the cable was plugged in.
1741 * Look for reasons to bail out. Bail out if: 1740 * Look for reasons to bail out. Bail out if:
1742 * - FC autoneg is disabled, or if 1741 * - FC autoneg is disabled, or if
1743 * - we don't have multispeed fiber, or if 1742 * - link is not up.
1744 * - we're not running at 1G, or if
1745 * - link is not up, or if
1746 * - link is up but AN did not complete, or if
1747 * - link is up and AN completed but timed out
1748 * 1743 *
1749 * Since we're being called from an LSC, link is already know to be up. 1744 * Since we're being called from an LSC, link is already known to be up.
1750 * So use link_up_wait_to_complete=false. 1745 * So use link_up_wait_to_complete=false.
1751 */ 1746 */
1752 hw->mac.ops.check_link(hw, &speed, &link_up, false); 1747 hw->mac.ops.check_link(hw, &speed, &link_up, false);
1753 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 1748
1754 1749 if (hw->fc.disable_fc_autoneg || (!link_up)) {
1755 if (hw->fc.disable_fc_autoneg ||
1756 !hw->phy.multispeed_fiber ||
1757 (speed != IXGBE_LINK_SPEED_1GB_FULL) ||
1758 !link_up ||
1759 ((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1760 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1761 hw->fc.fc_was_autonegged = false; 1750 hw->fc.fc_was_autonegged = false;
1762 hw->fc.current_mode = hw->fc.requested_mode; 1751 hw->fc.current_mode = hw->fc.requested_mode;
1763 hw_dbg(hw, "Autoneg FC was skipped.\n");
1764 goto out; 1752 goto out;
1765 } 1753 }
1766 1754
1767 /* 1755 /*
1756 * On backplane, bail out if
1757 * - backplane autoneg was not completed, or if
1758 * - link partner is not AN enabled
1759 */
1760 if (hw->phy.media_type == ixgbe_media_type_backplane) {
1761 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1762 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
1763 if (((links & IXGBE_LINKS_KX_AN_COMP) == 0) ||
1764 ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)) {
1765 hw->fc.fc_was_autonegged = false;
1766 hw->fc.current_mode = hw->fc.requested_mode;
1767 goto out;
1768 }
1769 }
1770
1771 /*
1772 * On multispeed fiber at 1g, bail out if
1773 * - link is up but AN did not complete, or if
1774 * - link is up and AN completed but timed out
1775 */
1776 if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
1777 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
1778 if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
1779 ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
1780 hw->fc.fc_was_autonegged = false;
1781 hw->fc.current_mode = hw->fc.requested_mode;
1782 goto out;
1783 }
1784 }
1785
1786 /*
1768 * Read the AN advertisement and LP ability registers and resolve 1787 * Read the AN advertisement and LP ability registers and resolve
1769 * local flow control settings accordingly 1788 * local flow control settings accordingly
1770 */ 1789 */
1771 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 1790 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1772 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 1791 (hw->phy.media_type != ixgbe_media_type_backplane)) {
1773 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && 1792 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
1774 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) { 1793 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
1794 if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1795 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
1796 /*
1797 * Now we need to check if the user selected Rx ONLY
1798 * of pause frames. In this case, we had to advertise
1799 * FULL flow control because we could not advertise RX
1800 * ONLY. Hence, we must now check to see if we need to
1801 * turn OFF the TRANSMISSION of PAUSE frames.
1802 */
1803 if (hw->fc.requested_mode == ixgbe_fc_full) {
1804 hw->fc.current_mode = ixgbe_fc_full;
1805 hw_dbg(hw, "Flow Control = FULL.\n");
1806 } else {
1807 hw->fc.current_mode = ixgbe_fc_rx_pause;
1808 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1809 }
1810 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1811 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1812 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1813 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1814 hw->fc.current_mode = ixgbe_fc_tx_pause;
1815 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1816 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1817 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1818 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1819 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1820 hw->fc.current_mode = ixgbe_fc_rx_pause;
1821 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1822 } else {
1823 hw->fc.current_mode = ixgbe_fc_none;
1824 hw_dbg(hw, "Flow Control = NONE.\n");
1825 }
1826 }
1827
1828 if (hw->phy.media_type == ixgbe_media_type_backplane) {
1775 /* 1829 /*
1776 * Now we need to check if the user selected Rx ONLY 1830 * Read the 10g AN autoc and LP ability registers and resolve
1777 * of pause frames. In this case, we had to advertise 1831 * local flow control settings accordingly
1778 * FULL flow control because we could not advertise RX
1779 * ONLY. Hence, we must now check to see if we need to
1780 * turn OFF the TRANSMISSION of PAUSE frames.
1781 */ 1832 */
1782 if (hw->fc.requested_mode == ixgbe_fc_full) { 1833 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1783 hw->fc.current_mode = ixgbe_fc_full; 1834 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
1784 hw_dbg(hw, "Flow Control = FULL.\n"); 1835
1785 } else { 1836 if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1837 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
1838 /*
1839 * Now we need to check if the user selected Rx ONLY
1840 * of pause frames. In this case, we had to advertise
1841 * FULL flow control because we could not advertise RX
1842 * ONLY. Hence, we must now check to see if we need to
1843 * turn OFF the TRANSMISSION of PAUSE frames.
1844 */
1845 if (hw->fc.requested_mode == ixgbe_fc_full) {
1846 hw->fc.current_mode = ixgbe_fc_full;
1847 hw_dbg(hw, "Flow Control = FULL.\n");
1848 } else {
1849 hw->fc.current_mode = ixgbe_fc_rx_pause;
1850 hw_dbg(hw, "Flow Control=RX PAUSE only\n");
1851 }
1852 } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1853 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1854 (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1855 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1856 hw->fc.current_mode = ixgbe_fc_tx_pause;
1857 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1858 } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
1859 (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
1860 !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
1861 (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
1786 hw->fc.current_mode = ixgbe_fc_rx_pause; 1862 hw->fc.current_mode = ixgbe_fc_rx_pause;
1787 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); 1863 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1864 } else {
1865 hw->fc.current_mode = ixgbe_fc_none;
1866 hw_dbg(hw, "Flow Control = NONE.\n");
1788 } 1867 }
1789 } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1790 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1791 (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1792 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1793 hw->fc.current_mode = ixgbe_fc_tx_pause;
1794 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
1795 } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1796 (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
1797 !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
1798 (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
1799 hw->fc.current_mode = ixgbe_fc_rx_pause;
1800 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
1801 } else {
1802 hw->fc.current_mode = ixgbe_fc_none;
1803 hw_dbg(hw, "Flow Control = NONE.\n");
1804 } 1868 }
1805
1806 /* Record that current_mode is the result of a successful autoneg */ 1869 /* Record that current_mode is the result of a successful autoneg */
1807 hw->fc.fc_was_autonegged = true; 1870 hw->fc.fc_was_autonegged = true;
1808 1871
@@ -1919,7 +1982,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1919#endif /* CONFIG_DCB */ 1982#endif /* CONFIG_DCB */
1920 default: 1983 default:
1921 hw_dbg(hw, "Flow control param set incorrectly\n"); 1984 hw_dbg(hw, "Flow control param set incorrectly\n");
1922 ret_val = -IXGBE_ERR_CONFIG; 1985 ret_val = IXGBE_ERR_CONFIG;
1923 goto out; 1986 goto out;
1924 break; 1987 break;
1925 } 1988 }
@@ -1927,9 +1990,6 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1927 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); 1990 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
1928 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); 1991 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
1929 1992
1930 /* Enable and restart autoneg to inform the link partner */
1931 reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
1932
1933 /* Disable AN timeout */ 1993 /* Disable AN timeout */
1934 if (hw->fc.strict_ieee) 1994 if (hw->fc.strict_ieee)
1935 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; 1995 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
@@ -1937,6 +1997,70 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1937 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); 1997 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
1938 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); 1998 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
1939 1999
2000 /*
2001 * Set up the 10G flow control advertisement registers so the HW
2002 * can do fc autoneg once the cable is plugged in. If we end up
2003 * using 1g instead, this is harmless.
2004 */
2005 reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2006
2007 /*
2008 * The possible values of fc.requested_mode are:
2009 * 0: Flow control is completely disabled
2010 * 1: Rx flow control is enabled (we can receive pause frames,
2011 * but not send pause frames).
2012 * 2: Tx flow control is enabled (we can send pause frames but
2013 * we do not support receiving pause frames).
2014 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2015 * other: Invalid.
2016 */
2017 switch (hw->fc.requested_mode) {
2018 case ixgbe_fc_none:
2019 /* Flow control completely disabled by software override. */
2020 reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2021 break;
2022 case ixgbe_fc_rx_pause:
2023 /*
2024 * Rx Flow control is enabled and Tx Flow control is
2025 * disabled by software override. Since there really
2026 * isn't a way to advertise that we are capable of RX
2027 * Pause ONLY, we will advertise that we support both
2028 * symmetric and asymmetric Rx PAUSE. Later, we will
2029 * disable the adapter's ability to send PAUSE frames.
2030 */
2031 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2032 break;
2033 case ixgbe_fc_tx_pause:
2034 /*
2035 * Tx Flow control is enabled, and Rx Flow control is
2036 * disabled by software override.
2037 */
2038 reg |= (IXGBE_AUTOC_ASM_PAUSE);
2039 reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
2040 break;
2041 case ixgbe_fc_full:
2042 /* Flow control (both Rx and Tx) is enabled by SW override. */
2043 reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
2044 break;
2045#ifdef CONFIG_DCB
2046 case ixgbe_fc_pfc:
2047 goto out;
2048 break;
2049#endif /* CONFIG_DCB */
2050 default:
2051 hw_dbg(hw, "Flow control param set incorrectly\n");
2052 ret_val = IXGBE_ERR_CONFIG;
2053 goto out;
2054 break;
2055 }
2056 /*
2057 * AUTOC restart handles negotiation of 1G and 10G. There is
2058 * no need to set the PCS1GCTL register.
2059 */
2060 reg |= IXGBE_AUTOC_AN_RESTART;
2061 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
2062 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2063
1940out: 2064out:
1941 return ret_val; 2065 return ret_val;
1942} 2066}
@@ -2000,7 +2124,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2000 2124
2001 while (timeout) { 2125 while (timeout) {
2002 if (ixgbe_get_eeprom_semaphore(hw)) 2126 if (ixgbe_get_eeprom_semaphore(hw))
2003 return -IXGBE_ERR_SWFW_SYNC; 2127 return IXGBE_ERR_SWFW_SYNC;
2004 2128
2005 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); 2129 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2006 if (!(gssr & (fwmask | swmask))) 2130 if (!(gssr & (fwmask | swmask)))
@@ -2017,7 +2141,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2017 2141
2018 if (!timeout) { 2142 if (!timeout) {
2019 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n"); 2143 hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
2020 return -IXGBE_ERR_SWFW_SYNC; 2144 return IXGBE_ERR_SWFW_SYNC;
2021 } 2145 }
2022 2146
2023 gssr |= swmask; 2147 gssr |= swmask;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 53b0a6680254..fa314cb005a4 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -53,6 +53,10 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
53 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, 53 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
54 {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)}, 54 {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
55 {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)}, 55 {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
56 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
57 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
58 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
59 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
56 {"lsc_int", IXGBE_STAT(lsc_int)}, 60 {"lsc_int", IXGBE_STAT(lsc_int)},
57 {"tx_busy", IXGBE_STAT(tx_busy)}, 61 {"tx_busy", IXGBE_STAT(tx_busy)},
58 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 62 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index c407bd9de0dd..cbb143ca1eb8 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -49,7 +49,7 @@ char ixgbe_driver_name[] = "ixgbe";
49static const char ixgbe_driver_string[] = 49static const char ixgbe_driver_string[] =
50 "Intel(R) 10 Gigabit PCI Express Network Driver"; 50 "Intel(R) 10 Gigabit PCI Express Network Driver";
51 51
52#define DRV_VERSION "2.0.37-k2" 52#define DRV_VERSION "2.0.44-k2"
53const char ixgbe_driver_version[] = DRV_VERSION; 53const char ixgbe_driver_version[] = DRV_VERSION;
54static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation."; 54static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
55 55
@@ -97,8 +97,12 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
97 board_82599 }, 97 board_82599 },
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), 98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
99 board_82599 }, 99 board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
101 board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
101 board_82599 }, 103 board_82599 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
105 board_82599 },
102 106
103 /* required last entry */ 107 /* required last entry */
104 {0, } 108 {0, }
@@ -1885,12 +1889,29 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1885 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 1889 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1886 adapter->tx_ring[i].head = IXGBE_TDH(j); 1890 adapter->tx_ring[i].head = IXGBE_TDH(j);
1887 adapter->tx_ring[i].tail = IXGBE_TDT(j); 1891 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1888 /* Disable Tx Head Writeback RO bit, since this hoses 1892 /*
1893 * Disable Tx Head Writeback RO bit, since this hoses
1889 * bookkeeping if things aren't delivered in order. 1894 * bookkeeping if things aren't delivered in order.
1890 */ 1895 */
1891 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 1896 switch (hw->mac.type) {
1897 case ixgbe_mac_82598EB:
1898 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
1899 break;
1900 case ixgbe_mac_82599EB:
1901 default:
1902 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
1903 break;
1904 }
1892 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1905 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1893 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); 1906 switch (hw->mac.type) {
1907 case ixgbe_mac_82598EB:
1908 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
1909 break;
1910 case ixgbe_mac_82599EB:
1911 default:
1912 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
1913 break;
1914 }
1894 } 1915 }
1895 if (hw->mac.type == ixgbe_mac_82599EB) { 1916 if (hw->mac.type == ixgbe_mac_82599EB) {
1896 /* We enable 8 traffic classes, DCB only */ 1917 /* We enable 8 traffic classes, DCB only */
@@ -4432,10 +4453,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4432 4453
4433 /* 82598 hardware only has a 32 bit counter in the high register */ 4454 /* 82598 hardware only has a 32 bit counter in the high register */
4434 if (hw->mac.type == ixgbe_mac_82599EB) { 4455 if (hw->mac.type == ixgbe_mac_82599EB) {
4456 u64 tmp;
4435 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 4457 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
4436 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ 4458 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
4459 adapter->stats.gorc += (tmp << 32);
4437 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 4460 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
4438 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ 4461 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
4462 adapter->stats.gotc += (tmp << 32);
4439 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL); 4463 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
4440 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 4464 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
4441 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 4465 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
@@ -5071,7 +5095,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5071 /* Right now, we support IPv4 only */ 5095 /* Right now, we support IPv4 only */
5072 struct ixgbe_atr_input atr_input; 5096 struct ixgbe_atr_input atr_input;
5073 struct tcphdr *th; 5097 struct tcphdr *th;
5074 struct udphdr *uh;
5075 struct iphdr *iph = ip_hdr(skb); 5098 struct iphdr *iph = ip_hdr(skb);
5076 struct ethhdr *eth = (struct ethhdr *)skb->data; 5099 struct ethhdr *eth = (struct ethhdr *)skb->data;
5077 u16 vlan_id, src_port, dst_port, flex_bytes; 5100 u16 vlan_id, src_port, dst_port, flex_bytes;
@@ -5085,12 +5108,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5085 dst_port = th->dest; 5108 dst_port = th->dest;
5086 l4type |= IXGBE_ATR_L4TYPE_TCP; 5109 l4type |= IXGBE_ATR_L4TYPE_TCP;
5087 /* l4type IPv4 type is 0, no need to assign */ 5110 /* l4type IPv4 type is 0, no need to assign */
5088 } else if(iph->protocol == IPPROTO_UDP) {
5089 uh = udp_hdr(skb);
5090 src_port = uh->source;
5091 dst_port = uh->dest;
5092 l4type |= IXGBE_ATR_L4TYPE_UDP;
5093 /* l4type IPv4 type is 0, no need to assign */
5094 } else { 5111 } else {
5095 /* Unsupported L4 header, just bail here */ 5112 /* Unsupported L4 header, just bail here */
5096 return; 5113 return;
@@ -5494,12 +5511,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5494 goto err_pci_reg; 5511 goto err_pci_reg;
5495 } 5512 }
5496 5513
5497 err = pci_enable_pcie_error_reporting(pdev); 5514 pci_enable_pcie_error_reporting(pdev);
5498 if (err) {
5499 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
5500 "0x%x\n", err);
5501 /* non-fatal, continue */
5502 }
5503 5515
5504 pci_set_master(pdev); 5516 pci_set_master(pdev);
5505 pci_save_state(pdev); 5517 pci_save_state(pdev);
@@ -5808,7 +5820,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
5808{ 5820{
5809 struct net_device *netdev = pci_get_drvdata(pdev); 5821 struct net_device *netdev = pci_get_drvdata(pdev);
5810 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5822 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5811 int err;
5812 5823
5813 set_bit(__IXGBE_DOWN, &adapter->state); 5824 set_bit(__IXGBE_DOWN, &adapter->state);
5814 /* clear the module not found bit to make sure the worker won't 5825 /* clear the module not found bit to make sure the worker won't
@@ -5859,10 +5870,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
5859 5870
5860 free_netdev(netdev); 5871 free_netdev(netdev);
5861 5872
5862 err = pci_disable_pcie_error_reporting(pdev); 5873 pci_disable_pcie_error_reporting(pdev);
5863 if (err)
5864 dev_err(&pdev->dev,
5865 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
5866 5874
5867 pci_disable_device(pdev); 5875 pci_disable_device(pdev);
5868} 5876}
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 8761d7899f7d..ef4bdd58e016 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -49,9 +49,11 @@
49#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 49#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
50#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 50#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
51#define IXGBE_DEV_ID_82599_KX4 0x10F7 51#define IXGBE_DEV_ID_82599_KX4 0x10F7
52#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
52#define IXGBE_DEV_ID_82599_CX4 0x10F9 53#define IXGBE_DEV_ID_82599_CX4 0x10F9
53#define IXGBE_DEV_ID_82599_SFP 0x10FB 54#define IXGBE_DEV_ID_82599_SFP 0x10FB
54#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC 55#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
56#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
55 57
56/* General Registers */ 58/* General Registers */
57#define IXGBE_CTRL 0x00000 59#define IXGBE_CTRL 0x00000
@@ -1336,6 +1338,8 @@
1336#define IXGBE_AUTOC_KX4_SUPP 0x80000000 1338#define IXGBE_AUTOC_KX4_SUPP 0x80000000
1337#define IXGBE_AUTOC_KX_SUPP 0x40000000 1339#define IXGBE_AUTOC_KX_SUPP 0x40000000
1338#define IXGBE_AUTOC_PAUSE 0x30000000 1340#define IXGBE_AUTOC_PAUSE 0x30000000
1341#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
1342#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
1339#define IXGBE_AUTOC_RF 0x08000000 1343#define IXGBE_AUTOC_RF 0x08000000
1340#define IXGBE_AUTOC_PD_TMR 0x06000000 1344#define IXGBE_AUTOC_PD_TMR 0x06000000
1341#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 1345#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
@@ -1404,6 +1408,8 @@
1404#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ 1408#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
1405#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ 1409#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
1406 1410
1411#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
1412
1407/* PCS1GLSTA Bit Masks */ 1413/* PCS1GLSTA Bit Masks */
1408#define IXGBE_PCS1GLSTA_LINK_OK 1 1414#define IXGBE_PCS1GLSTA_LINK_OK 1
1409#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 1415#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
@@ -1424,6 +1430,11 @@
1424#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 1430#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
1425#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 1431#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
1426 1432
1433/* ANLP1 Bit Masks */
1434#define IXGBE_ANLP1_PAUSE 0x0C00
1435#define IXGBE_ANLP1_SYM_PAUSE 0x0400
1436#define IXGBE_ANLP1_ASM_PAUSE 0x0800
1437
1427/* SW Semaphore Register bitmasks */ 1438/* SW Semaphore Register bitmasks */
1428#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 1439#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
1429#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 1440#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
new file mode 100644
index 000000000000..0be14d702beb
--- /dev/null
+++ b/drivers/net/ks8851_mll.c
@@ -0,0 +1,1697 @@
1/**
2 * drivers/net/ks8851_mll.c
3 * Copyright (c) 2009 Micrel Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/**
20 * Supports:
21 * KS8851 16bit MLL chip from Micrel Inc.
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/cache.h>
30#include <linux/crc32.h>
31#include <linux/mii.h>
32#include <linux/platform_device.h>
33#include <linux/delay.h>
34
35#define DRV_NAME "ks8851_mll"
36
37static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
38#define MAX_RECV_FRAMES 32
39#define MAX_BUF_SIZE 2048
40#define TX_BUF_SIZE 2000
41#define RX_BUF_SIZE 2000
42
43#define KS_CCR 0x08
44#define CCR_EEPROM (1 << 9)
45#define CCR_SPI (1 << 8)
46#define CCR_8BIT (1 << 7)
47#define CCR_16BIT (1 << 6)
48#define CCR_32BIT (1 << 5)
49#define CCR_SHARED (1 << 4)
50#define CCR_32PIN (1 << 0)
51
52/* MAC address registers */
53#define KS_MARL 0x10
54#define KS_MARM 0x12
55#define KS_MARH 0x14
56
57#define KS_OBCR 0x20
58#define OBCR_ODS_16MA (1 << 6)
59
60#define KS_EEPCR 0x22
61#define EEPCR_EESA (1 << 4)
62#define EEPCR_EESB (1 << 3)
63#define EEPCR_EEDO (1 << 2)
64#define EEPCR_EESCK (1 << 1)
65#define EEPCR_EECS (1 << 0)
66
67#define KS_MBIR 0x24
68#define MBIR_TXMBF (1 << 12)
69#define MBIR_TXMBFA (1 << 11)
70#define MBIR_RXMBF (1 << 4)
71#define MBIR_RXMBFA (1 << 3)
72
73#define KS_GRR 0x26
74#define GRR_QMU (1 << 1)
75#define GRR_GSR (1 << 0)
76
77#define KS_WFCR 0x2A
78#define WFCR_MPRXE (1 << 7)
79#define WFCR_WF3E (1 << 3)
80#define WFCR_WF2E (1 << 2)
81#define WFCR_WF1E (1 << 1)
82#define WFCR_WF0E (1 << 0)
83
84#define KS_WF0CRC0 0x30
85#define KS_WF0CRC1 0x32
86#define KS_WF0BM0 0x34
87#define KS_WF0BM1 0x36
88#define KS_WF0BM2 0x38
89#define KS_WF0BM3 0x3A
90
91#define KS_WF1CRC0 0x40
92#define KS_WF1CRC1 0x42
93#define KS_WF1BM0 0x44
94#define KS_WF1BM1 0x46
95#define KS_WF1BM2 0x48
96#define KS_WF1BM3 0x4A
97
98#define KS_WF2CRC0 0x50
99#define KS_WF2CRC1 0x52
100#define KS_WF2BM0 0x54
101#define KS_WF2BM1 0x56
102#define KS_WF2BM2 0x58
103#define KS_WF2BM3 0x5A
104
105#define KS_WF3CRC0 0x60
106#define KS_WF3CRC1 0x62
107#define KS_WF3BM0 0x64
108#define KS_WF3BM1 0x66
109#define KS_WF3BM2 0x68
110#define KS_WF3BM3 0x6A
111
112#define KS_TXCR 0x70
113#define TXCR_TCGICMP (1 << 8)
114#define TXCR_TCGUDP (1 << 7)
115#define TXCR_TCGTCP (1 << 6)
116#define TXCR_TCGIP (1 << 5)
117#define TXCR_FTXQ (1 << 4)
118#define TXCR_TXFCE (1 << 3)
119#define TXCR_TXPE (1 << 2)
120#define TXCR_TXCRC (1 << 1)
121#define TXCR_TXE (1 << 0)
122
123#define KS_TXSR 0x72
124#define TXSR_TXLC (1 << 13)
125#define TXSR_TXMC (1 << 12)
126#define TXSR_TXFID_MASK (0x3f << 0)
127#define TXSR_TXFID_SHIFT (0)
128#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
129
130
131#define KS_RXCR1 0x74
132#define RXCR1_FRXQ (1 << 15)
133#define RXCR1_RXUDPFCC (1 << 14)
134#define RXCR1_RXTCPFCC (1 << 13)
135#define RXCR1_RXIPFCC (1 << 12)
136#define RXCR1_RXPAFMA (1 << 11)
137#define RXCR1_RXFCE (1 << 10)
138#define RXCR1_RXEFE (1 << 9)
139#define RXCR1_RXMAFMA (1 << 8)
140#define RXCR1_RXBE (1 << 7)
141#define RXCR1_RXME (1 << 6)
142#define RXCR1_RXUE (1 << 5)
143#define RXCR1_RXAE (1 << 4)
144#define RXCR1_RXINVF (1 << 1)
145#define RXCR1_RXE (1 << 0)
146#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
147 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
148
149#define KS_RXCR2 0x76
150#define RXCR2_SRDBL_MASK (0x7 << 5)
151#define RXCR2_SRDBL_SHIFT (5)
152#define RXCR2_SRDBL_4B (0x0 << 5)
153#define RXCR2_SRDBL_8B (0x1 << 5)
154#define RXCR2_SRDBL_16B (0x2 << 5)
155#define RXCR2_SRDBL_32B (0x3 << 5)
156/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
157#define RXCR2_IUFFP (1 << 4)
158#define RXCR2_RXIUFCEZ (1 << 3)
159#define RXCR2_UDPLFE (1 << 2)
160#define RXCR2_RXICMPFCC (1 << 1)
161#define RXCR2_RXSAF (1 << 0)
162
163#define KS_TXMIR 0x78
164
165#define KS_RXFHSR 0x7C
166#define RXFSHR_RXFV (1 << 15)
167#define RXFSHR_RXICMPFCS (1 << 13)
168#define RXFSHR_RXIPFCS (1 << 12)
169#define RXFSHR_RXTCPFCS (1 << 11)
170#define RXFSHR_RXUDPFCS (1 << 10)
171#define RXFSHR_RXBF (1 << 7)
172#define RXFSHR_RXMF (1 << 6)
173#define RXFSHR_RXUF (1 << 5)
174#define RXFSHR_RXMR (1 << 4)
175#define RXFSHR_RXFT (1 << 3)
176#define RXFSHR_RXFTL (1 << 2)
177#define RXFSHR_RXRF (1 << 1)
178#define RXFSHR_RXCE (1 << 0)
179#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
180 RXFSHR_RXFTL | RXFSHR_RXMR |\
181 RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
182 RXFSHR_RXTCPFCS)
183#define KS_RXFHBCR 0x7E
184#define RXFHBCR_CNT_MASK 0x0FFF
185
186#define KS_TXQCR 0x80
187#define TXQCR_AETFE (1 << 2)
188#define TXQCR_TXQMAM (1 << 1)
189#define TXQCR_METFE (1 << 0)
190
191#define KS_RXQCR 0x82
192#define RXQCR_RXDTTS (1 << 12)
193#define RXQCR_RXDBCTS (1 << 11)
194#define RXQCR_RXFCTS (1 << 10)
195#define RXQCR_RXIPHTOE (1 << 9)
196#define RXQCR_RXDTTE (1 << 7)
197#define RXQCR_RXDBCTE (1 << 6)
198#define RXQCR_RXFCTE (1 << 5)
199#define RXQCR_ADRFE (1 << 4)
200#define RXQCR_SDA (1 << 3)
201#define RXQCR_RRXEF (1 << 0)
202#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
203
204#define KS_TXFDPR 0x84
205#define TXFDPR_TXFPAI (1 << 14)
206#define TXFDPR_TXFP_MASK (0x7ff << 0)
207#define TXFDPR_TXFP_SHIFT (0)
208
209#define KS_RXFDPR 0x86
210#define RXFDPR_RXFPAI (1 << 14)
211
212#define KS_RXDTTR 0x8C
213#define KS_RXDBCTR 0x8E
214
215#define KS_IER 0x90
216#define KS_ISR 0x92
217#define IRQ_LCI (1 << 15)
218#define IRQ_TXI (1 << 14)
219#define IRQ_RXI (1 << 13)
220#define IRQ_RXOI (1 << 11)
221#define IRQ_TXPSI (1 << 9)
222#define IRQ_RXPSI (1 << 8)
223#define IRQ_TXSAI (1 << 6)
224#define IRQ_RXWFDI (1 << 5)
225#define IRQ_RXMPDI (1 << 4)
226#define IRQ_LDI (1 << 3)
227#define IRQ_EDI (1 << 2)
228#define IRQ_SPIBEI (1 << 1)
229#define IRQ_DEDI (1 << 0)
230
231#define KS_RXFCTR 0x9C
232#define RXFCTR_THRESHOLD_MASK 0x00FF
233
234#define KS_RXFC 0x9D
235#define RXFCTR_RXFC_MASK (0xff << 8)
236#define RXFCTR_RXFC_SHIFT (8)
237#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
238#define RXFCTR_RXFCT_MASK (0xff << 0)
239#define RXFCTR_RXFCT_SHIFT (0)
240
241#define KS_TXNTFSR 0x9E
242
243#define KS_MAHTR0 0xA0
244#define KS_MAHTR1 0xA2
245#define KS_MAHTR2 0xA4
246#define KS_MAHTR3 0xA6
247
248#define KS_FCLWR 0xB0
249#define KS_FCHWR 0xB2
250#define KS_FCOWR 0xB4
251
252#define KS_CIDER 0xC0
253#define CIDER_ID 0x8870
254#define CIDER_REV_MASK (0x7 << 1)
255#define CIDER_REV_SHIFT (1)
256#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
257
258#define KS_CGCR 0xC6
259#define KS_IACR 0xC8
260#define IACR_RDEN (1 << 12)
261#define IACR_TSEL_MASK (0x3 << 10)
262#define IACR_TSEL_SHIFT (10)
263#define IACR_TSEL_MIB (0x3 << 10)
264#define IACR_ADDR_MASK (0x1f << 0)
265#define IACR_ADDR_SHIFT (0)
266
267#define KS_IADLR 0xD0
268#define KS_IAHDR 0xD2
269
270#define KS_PMECR 0xD4
271#define PMECR_PME_DELAY (1 << 14)
272#define PMECR_PME_POL (1 << 12)
273#define PMECR_WOL_WAKEUP (1 << 11)
274#define PMECR_WOL_MAGICPKT (1 << 10)
275#define PMECR_WOL_LINKUP (1 << 9)
276#define PMECR_WOL_ENERGY (1 << 8)
277#define PMECR_AUTO_WAKE_EN (1 << 7)
278#define PMECR_WAKEUP_NORMAL (1 << 6)
279#define PMECR_WKEVT_MASK (0xf << 2)
280#define PMECR_WKEVT_SHIFT (2)
281#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
282#define PMECR_WKEVT_ENERGY (0x1 << 2)
283#define PMECR_WKEVT_LINK (0x2 << 2)
284#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
285#define PMECR_WKEVT_FRAME (0x8 << 2)
286#define PMECR_PM_MASK (0x3 << 0)
287#define PMECR_PM_SHIFT (0)
288#define PMECR_PM_NORMAL (0x0 << 0)
289#define PMECR_PM_ENERGY (0x1 << 0)
290#define PMECR_PM_SOFTDOWN (0x2 << 0)
291#define PMECR_PM_POWERSAVE (0x3 << 0)
292
293/* Standard MII PHY data */
294#define KS_P1MBCR 0xE4
295#define P1MBCR_FORCE_FDX (1 << 8)
296
297#define KS_P1MBSR 0xE6
298#define P1MBSR_AN_COMPLETE (1 << 5)
299#define P1MBSR_AN_CAPABLE (1 << 3)
300#define P1MBSR_LINK_UP (1 << 2)
301
302#define KS_PHY1ILR 0xE8
303#define KS_PHY1IHR 0xEA
304#define KS_P1ANAR 0xEC
305#define KS_P1ANLPR 0xEE
306
307#define KS_P1SCLMD 0xF4
308#define P1SCLMD_LEDOFF (1 << 15)
309#define P1SCLMD_TXIDS (1 << 14)
310#define P1SCLMD_RESTARTAN (1 << 13)
311#define P1SCLMD_DISAUTOMDIX (1 << 10)
312#define P1SCLMD_FORCEMDIX (1 << 9)
313#define P1SCLMD_AUTONEGEN (1 << 7)
314#define P1SCLMD_FORCE100 (1 << 6)
315#define P1SCLMD_FORCEFDX (1 << 5)
316#define P1SCLMD_ADV_FLOW (1 << 4)
317#define P1SCLMD_ADV_100BT_FDX (1 << 3)
318#define P1SCLMD_ADV_100BT_HDX (1 << 2)
319#define P1SCLMD_ADV_10BT_FDX (1 << 1)
320#define P1SCLMD_ADV_10BT_HDX (1 << 0)
321
322#define KS_P1CR 0xF6
323#define P1CR_HP_MDIX (1 << 15)
324#define P1CR_REV_POL (1 << 13)
325#define P1CR_OP_100M (1 << 10)
326#define P1CR_OP_FDX (1 << 9)
327#define P1CR_OP_MDI (1 << 7)
328#define P1CR_AN_DONE (1 << 6)
329#define P1CR_LINK_GOOD (1 << 5)
330#define P1CR_PNTR_FLOW (1 << 4)
331#define P1CR_PNTR_100BT_FDX (1 << 3)
332#define P1CR_PNTR_100BT_HDX (1 << 2)
333#define P1CR_PNTR_10BT_FDX (1 << 1)
334#define P1CR_PNTR_10BT_HDX (1 << 0)
335
336/* TX Frame control */
337
338#define TXFR_TXIC (1 << 15)
339#define TXFR_TXFID_MASK (0x3f << 0)
340#define TXFR_TXFID_SHIFT (0)
341
342#define KS_P1SR 0xF8
343#define P1SR_HP_MDIX (1 << 15)
344#define P1SR_REV_POL (1 << 13)
345#define P1SR_OP_100M (1 << 10)
346#define P1SR_OP_FDX (1 << 9)
347#define P1SR_OP_MDI (1 << 7)
348#define P1SR_AN_DONE (1 << 6)
349#define P1SR_LINK_GOOD (1 << 5)
350#define P1SR_PNTR_FLOW (1 << 4)
351#define P1SR_PNTR_100BT_FDX (1 << 3)
352#define P1SR_PNTR_100BT_HDX (1 << 2)
353#define P1SR_PNTR_10BT_FDX (1 << 1)
354#define P1SR_PNTR_10BT_HDX (1 << 0)
355
356#define ENUM_BUS_NONE 0
357#define ENUM_BUS_8BIT 1
358#define ENUM_BUS_16BIT 2
359#define ENUM_BUS_32BIT 3
360
361#define MAX_MCAST_LST 32
362#define HW_MCAST_SIZE 8
363#define MAC_ADDR_LEN 6
364
365/**
366 * union ks_tx_hdr - tx header data
367 * @txb: The header as bytes
368 * @txw: The header as 16bit, little-endian words
369 *
370 * A dual representation of the tx header data to allow
371 * access to individual bytes, and to allow 16bit accesses
372 * with 16bit alignment.
373 */
374union ks_tx_hdr {
375 u8 txb[4];
376 __le16 txw[2];
377};
378
379/**
380 * struct ks_net - KS8851 driver private data
381 * @net_device : The network device we're bound to
382 * @hw_addr : start address of data register.
383 * @hw_addr_cmd : start address of command register.
384 * @txh : temporaly buffer to save status/length.
385 * @lock : Lock to ensure that the device is not accessed when busy.
386 * @pdev : Pointer to platform device.
387 * @mii : The MII state information for the mii calls.
388 * @frame_head_info : frame header information for multi-pkt rx.
389 * @statelock : Lock on this structure for tx list.
390 * @msg_enable : The message flags controlling driver output (see ethtool).
391 * @frame_cnt : number of frames received.
392 * @bus_width : i/o bus width.
393 * @irq : irq number assigned to this device.
394 * @rc_rxqcr : Cached copy of KS_RXQCR.
395 * @rc_txcr : Cached copy of KS_TXCR.
396 * @rc_ier : Cached copy of KS_IER.
397 * @sharedbus : Multipex(addr and data bus) mode indicator.
398 * @cmd_reg_cache : command register cached.
399 * @cmd_reg_cache_int : command register cached. Used in the irq handler.
400 * @promiscuous : promiscuous mode indicator.
401 * @all_mcast : mutlicast indicator.
402 * @mcast_lst_size : size of multicast list.
403 * @mcast_lst : multicast list.
404 * @mcast_bits : multicast enabed.
405 * @mac_addr : MAC address assigned to this device.
406 * @fid : frame id.
407 * @extra_byte : number of extra byte prepended rx pkt.
408 * @enabled : indicator this device works.
409 *
410 * The @lock ensures that the chip is protected when certain operations are
411 * in progress. When the read or write packet transfer is in progress, most
412 * of the chip registers are not accessible until the transfer is finished and
413 * the DMA has been de-asserted.
414 *
415 * The @statelock is used to protect information in the structure which may
416 * need to be accessed via several sources, such as the network driver layer
417 * or one of the work queues.
418 *
419 */
420
421/* Receive multiplex framer header info */
422struct type_frame_head {
423 u16 sts; /* Frame status */
424 u16 len; /* Byte count */
425};
426
427struct ks_net {
428 struct net_device *netdev;
429 void __iomem *hw_addr;
430 void __iomem *hw_addr_cmd;
431 union ks_tx_hdr txh ____cacheline_aligned;
432 struct mutex lock; /* spinlock to be interrupt safe */
433 struct platform_device *pdev;
434 struct mii_if_info mii;
435 struct type_frame_head *frame_head_info;
436 spinlock_t statelock;
437 u32 msg_enable;
438 u32 frame_cnt;
439 int bus_width;
440 int irq;
441
442 u16 rc_rxqcr;
443 u16 rc_txcr;
444 u16 rc_ier;
445 u16 sharedbus;
446 u16 cmd_reg_cache;
447 u16 cmd_reg_cache_int;
448 u16 promiscuous;
449 u16 all_mcast;
450 u16 mcast_lst_size;
451 u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN];
452 u8 mcast_bits[HW_MCAST_SIZE];
453 u8 mac_addr[6];
454 u8 fid;
455 u8 extra_byte;
456 u8 enabled;
457};
458
459static int msg_enable;
460
461#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
462#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
463#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
464#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
465
466#define BE3 0x8000 /* Byte Enable 3 */
467#define BE2 0x4000 /* Byte Enable 2 */
468#define BE1 0x2000 /* Byte Enable 1 */
469#define BE0 0x1000 /* Byte Enable 0 */
470
471/**
472 * register read/write calls.
473 *
474 * All these calls issue transactions to access the chip's registers. They
475 * all require that the necessary lock is held to prevent accesses when the
476 * chip is busy transfering packet data (RX/TX FIFO accesses).
477 */
478
479/**
480 * ks_rdreg8 - read 8 bit register from device
481 * @ks : The chip information
482 * @offset: The register address
483 *
484 * Read a 8bit register from the chip, returning the result
485 */
486static u8 ks_rdreg8(struct ks_net *ks, int offset)
487{
488 u16 data;
489 u8 shift_bit = offset & 0x03;
490 u8 shift_data = (offset & 1) << 3;
491 ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
492 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
493 data = ioread16(ks->hw_addr);
494 return (u8)(data >> shift_data);
495}
496
497/**
498 * ks_rdreg16 - read 16 bit register from device
499 * @ks : The chip information
500 * @offset: The register address
501 *
502 * Read a 16bit register from the chip, returning the result
503 */
504
505static u16 ks_rdreg16(struct ks_net *ks, int offset)
506{
507 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
508 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
509 return ioread16(ks->hw_addr);
510}
511
512/**
513 * ks_wrreg8 - write 8bit register value to chip
514 * @ks: The chip information
515 * @offset: The register address
516 * @value: The value to write
517 *
518 */
519static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
520{
521 u8 shift_bit = (offset & 0x03);
522 u16 value_write = (u16)(value << ((offset & 1) << 3));
523 ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
524 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
525 iowrite16(value_write, ks->hw_addr);
526}
527
528/**
529 * ks_wrreg16 - write 16bit register value to chip
530 * @ks: The chip information
531 * @offset: The register address
532 * @value: The value to write
533 *
534 */
535
536static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
537{
538 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
539 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
540 iowrite16(value, ks->hw_addr);
541}
542
543/**
544 * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
545 * @ks: The chip state
546 * @wptr: buffer address to save data
547 * @len: length in byte to read
548 *
549 */
550static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
551{
552 len >>= 1;
553 while (len--)
554 *wptr++ = (u16)ioread16(ks->hw_addr);
555}
556
557/**
558 * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
559 * @ks: The chip information
560 * @wptr: buffer address
561 * @len: length in byte to write
562 *
563 */
564static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
565{
566 len >>= 1;
567 while (len--)
568 iowrite16(*wptr++, ks->hw_addr);
569}
570
571/**
572 * ks_tx_fifo_space - return the available hardware buffer size.
573 * @ks: The chip information
574 *
575 */
576static inline u16 ks_tx_fifo_space(struct ks_net *ks)
577{
578 return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
579}
580
581/**
582 * ks_save_cmd_reg - save the command register from the cache.
583 * @ks: The chip information
584 *
585 */
586static inline void ks_save_cmd_reg(struct ks_net *ks)
587{
588 /*ks8851 MLL has a bug to read back the command register.
589 * So rely on software to save the content of command register.
590 */
591 ks->cmd_reg_cache_int = ks->cmd_reg_cache;
592}
593
594/**
595 * ks_restore_cmd_reg - restore the command register from the cache and
596 * write to hardware register.
597 * @ks: The chip information
598 *
599 */
600static inline void ks_restore_cmd_reg(struct ks_net *ks)
601{
602 ks->cmd_reg_cache = ks->cmd_reg_cache_int;
603 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
604}
605
606/**
607 * ks_set_powermode - set power mode of the device
608 * @ks: The chip information
609 * @pwrmode: The power mode value to write to KS_PMECR.
610 *
611 * Change the power mode of the chip.
612 */
613static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
614{
615 unsigned pmecr;
616
617 if (netif_msg_hw(ks))
618 ks_dbg(ks, "setting power mode %d\n", pwrmode);
619
620 ks_rdreg16(ks, KS_GRR);
621 pmecr = ks_rdreg16(ks, KS_PMECR);
622 pmecr &= ~PMECR_PM_MASK;
623 pmecr |= pwrmode;
624
625 ks_wrreg16(ks, KS_PMECR, pmecr);
626}
627
628/**
629 * ks_read_config - read chip configuration of bus width.
630 * @ks: The chip information
631 *
632 */
633static void ks_read_config(struct ks_net *ks)
634{
635 u16 reg_data = 0;
636
637 /* Regardless of bus width, 8 bit read should always work.*/
638 reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
639 reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
640
641 /* addr/data bus are multiplexed */
642 ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
643
644 /* There are garbage data when reading data from QMU,
645 depending on bus-width.
646 */
647
648 if (reg_data & CCR_8BIT) {
649 ks->bus_width = ENUM_BUS_8BIT;
650 ks->extra_byte = 1;
651 } else if (reg_data & CCR_16BIT) {
652 ks->bus_width = ENUM_BUS_16BIT;
653 ks->extra_byte = 2;
654 } else {
655 ks->bus_width = ENUM_BUS_32BIT;
656 ks->extra_byte = 4;
657 }
658}
659
660/**
661 * ks_soft_reset - issue one of the soft reset to the device
662 * @ks: The device state.
663 * @op: The bit(s) to set in the GRR
664 *
665 * Issue the relevant soft-reset command to the device's GRR register
666 * specified by @op.
667 *
668 * Note, the delays are in there as a caution to ensure that the reset
669 * has time to take effect and then complete. Since the datasheet does
670 * not currently specify the exact sequence, we have chosen something
671 * that seems to work with our device.
672 */
673static void ks_soft_reset(struct ks_net *ks, unsigned op)
674{
675 /* Disable interrupt first */
676 ks_wrreg16(ks, KS_IER, 0x0000);
677 ks_wrreg16(ks, KS_GRR, op);
678 mdelay(10); /* wait a short time to effect reset */
679 ks_wrreg16(ks, KS_GRR, 0);
680 mdelay(1); /* wait for condition to clear */
681}
682
683
684/**
685 * ks_read_qmu - read 1 pkt data from the QMU.
686 * @ks: The chip information
687 * @buf: buffer address to save 1 pkt
688 * @len: Pkt length
689 * Here is the sequence to read 1 pkt:
690 * 1. set sudo DMA mode
691 * 2. read prepend data
692 * 3. read pkt data
693 * 4. reset sudo DMA Mode
694 */
695static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
696{
697 u32 r = ks->extra_byte & 0x1 ;
698 u32 w = ks->extra_byte - r;
699
700 /* 1. set sudo DMA mode */
701 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
702 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
703
704 /* 2. read prepend data */
705 /**
706 * read 4 + extra bytes and discard them.
707 * extra bytes for dummy, 2 for status, 2 for len
708 */
709
710 /* use likely(r) for 8 bit access for performance */
711 if (unlikely(r))
712 ioread8(ks->hw_addr);
713 ks_inblk(ks, buf, w + 2 + 2);
714
715 /* 3. read pkt data */
716 ks_inblk(ks, buf, ALIGN(len, 4));
717
718 /* 4. reset sudo DMA Mode */
719 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
720}
721
722/**
723 * ks_rcv - read multiple pkts data from the QMU.
724 * @ks: The chip information
725 * @netdev: The network device being opened.
726 *
727 * Read all of header information before reading pkt content.
728 * It is not allowed only port of pkts in QMU after issuing
729 * interrupt ack.
730 */
731static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
732{
733 u32 i;
734 struct type_frame_head *frame_hdr = ks->frame_head_info;
735 struct sk_buff *skb;
736
737 ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
738
739 /* read all header information */
740 for (i = 0; i < ks->frame_cnt; i++) {
741 /* Checking Received packet status */
742 frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
743 /* Get packet len from hardware */
744 frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
745 frame_hdr++;
746 }
747
748 frame_hdr = ks->frame_head_info;
749 while (ks->frame_cnt--) {
750 skb = dev_alloc_skb(frame_hdr->len + 16);
751 if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
752 (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
753 skb_reserve(skb, 2);
754 /* read data block including CRC 4 bytes */
755 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len + 4);
756 skb_put(skb, frame_hdr->len);
757 skb->dev = netdev;
758 skb->protocol = eth_type_trans(skb, netdev);
759 netif_rx(skb);
760 } else {
761 printk(KERN_ERR "%s: err:skb alloc\n", __func__);
762 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
763 if (skb)
764 dev_kfree_skb_irq(skb);
765 }
766 frame_hdr++;
767 }
768}
769
770/**
771 * ks_update_link_status - link status update.
772 * @netdev: The network device being opened.
773 * @ks: The chip information
774 *
775 */
776
777static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
778{
779 /* check the status of the link */
780 u32 link_up_status;
781 if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
782 netif_carrier_on(netdev);
783 link_up_status = true;
784 } else {
785 netif_carrier_off(netdev);
786 link_up_status = false;
787 }
788 if (netif_msg_link(ks))
789 ks_dbg(ks, "%s: %s\n",
790 __func__, link_up_status ? "UP" : "DOWN");
791}
792
793/**
794 * ks_irq - device interrupt handler
795 * @irq: Interrupt number passed from the IRQ hnalder.
796 * @pw: The private word passed to register_irq(), our struct ks_net.
797 *
798 * This is the handler invoked to find out what happened
799 *
800 * Read the interrupt status, work out what needs to be done and then clear
801 * any of the interrupts that are not needed.
802 */
803
804static irqreturn_t ks_irq(int irq, void *pw)
805{
806 struct ks_net *ks = pw;
807 struct net_device *netdev = ks->netdev;
808 u16 status;
809
810 /*this should be the first in IRQ handler */
811 ks_save_cmd_reg(ks);
812
813 status = ks_rdreg16(ks, KS_ISR);
814 if (unlikely(!status)) {
815 ks_restore_cmd_reg(ks);
816 return IRQ_NONE;
817 }
818
819 ks_wrreg16(ks, KS_ISR, status);
820
821 if (likely(status & IRQ_RXI))
822 ks_rcv(ks, netdev);
823
824 if (unlikely(status & IRQ_LCI))
825 ks_update_link_status(netdev, ks);
826
827 if (unlikely(status & IRQ_TXI))
828 netif_wake_queue(netdev);
829
830 if (unlikely(status & IRQ_LDI)) {
831
832 u16 pmecr = ks_rdreg16(ks, KS_PMECR);
833 pmecr &= ~PMECR_WKEVT_MASK;
834 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
835 }
836
837 /* this should be the last in IRQ handler*/
838 ks_restore_cmd_reg(ks);
839 return IRQ_HANDLED;
840}
841
842
843/**
844 * ks_net_open - open network device
845 * @netdev: The network device being opened.
846 *
847 * Called when the network device is marked active, such as a user executing
848 * 'ifconfig up' on the device.
849 */
850static int ks_net_open(struct net_device *netdev)
851{
852 struct ks_net *ks = netdev_priv(netdev);
853 int err;
854
855#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
856 /* lock the card, even if we may not actually do anything
857 * else at the moment.
858 */
859
860 if (netif_msg_ifup(ks))
861 ks_dbg(ks, "%s - entry\n", __func__);
862
863 /* reset the HW */
864 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, ks);
865
866 if (err) {
867 printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
868 ks->irq, err);
869 return err;
870 }
871
872 if (netif_msg_ifup(ks))
873 ks_dbg(ks, "network device %s up\n", netdev->name);
874
875 return 0;
876}
877
878/**
879 * ks_net_stop - close network device
880 * @netdev: The device being closed.
881 *
882 * Called to close down a network device which has been active. Cancell any
883 * work, shutdown the RX and TX process and then place the chip into a low
884 * power state whilst it is not being used.
885 */
886static int ks_net_stop(struct net_device *netdev)
887{
888 struct ks_net *ks = netdev_priv(netdev);
889
890 if (netif_msg_ifdown(ks))
891 ks_info(ks, "%s: shutting down\n", netdev->name);
892
893 netif_stop_queue(netdev);
894
895 kfree(ks->frame_head_info);
896
897 mutex_lock(&ks->lock);
898
899 /* turn off the IRQs and ack any outstanding */
900 ks_wrreg16(ks, KS_IER, 0x0000);
901 ks_wrreg16(ks, KS_ISR, 0xffff);
902
903 /* shutdown RX process */
904 ks_wrreg16(ks, KS_RXCR1, 0x0000);
905
906 /* shutdown TX process */
907 ks_wrreg16(ks, KS_TXCR, 0x0000);
908
909 /* set powermode to soft power down to save power */
910 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
911 free_irq(ks->irq, netdev);
912 mutex_unlock(&ks->lock);
913 return 0;
914}
915
916
917/**
918 * ks_write_qmu - write 1 pkt data to the QMU.
919 * @ks: The chip information
920 * @pdata: buffer address to save 1 pkt
921 * @len: Pkt length in byte
922 * Here is the sequence to write 1 pkt:
923 * 1. set sudo DMA mode
924 * 2. write status/length
925 * 3. write pkt data
926 * 4. reset sudo DMA Mode
927 * 5. reset sudo DMA mode
928 * 6. Wait until pkt is out
929 */
930static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
931{
932 unsigned fid = ks->fid;
933
934 fid = ks->fid;
935 ks->fid = (ks->fid + 1) & TXFR_TXFID_MASK;
936
937 /* reduce the tx interrupt occurrances. */
938 if (!fid)
939 fid |= TXFR_TXIC; /* irq on completion */
940
941 /* start header at txb[0] to align txw entries */
942 ks->txh.txw[0] = cpu_to_le16(fid);
943 ks->txh.txw[1] = cpu_to_le16(len);
944
945 /* 1. set sudo-DMA mode */
946 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
947 /* 2. write status/lenth info */
948 ks_outblk(ks, ks->txh.txw, 4);
949 /* 3. write pkt data */
950 ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
951 /* 4. reset sudo-DMA mode */
952 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
953 /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
954 ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
955 /* 6. wait until TXQCR_METFE is auto-cleared */
956 while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
957 ;
958}
959
960static void ks_disable_int(struct ks_net *ks)
961{
962 ks_wrreg16(ks, KS_IER, 0x0000);
963} /* ks_disable_int */
964
965static void ks_enable_int(struct ks_net *ks)
966{
967 ks_wrreg16(ks, KS_IER, ks->rc_ier);
968} /* ks_enable_int */
969
970/**
971 * ks_start_xmit - transmit packet
972 * @skb : The buffer to transmit
973 * @netdev : The device used to transmit the packet.
974 *
975 * Called by the network layer to transmit the @skb.
976 * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
977 * So while tx is in-progress, prevent IRQ interrupt from happenning.
978 */
979static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
980{
981 int retv = NETDEV_TX_OK;
982 struct ks_net *ks = netdev_priv(netdev);
983
984 disable_irq(netdev->irq);
985 ks_disable_int(ks);
986 spin_lock(&ks->statelock);
987
988 /* Extra space are required:
989 * 4 byte for alignment, 4 for status/length, 4 for CRC
990 */
991
992 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
993 ks_write_qmu(ks, skb->data, skb->len);
994 dev_kfree_skb(skb);
995 } else
996 retv = NETDEV_TX_BUSY;
997 spin_unlock(&ks->statelock);
998 ks_enable_int(ks);
999 enable_irq(netdev->irq);
1000 return retv;
1001}
1002
1003/**
1004 * ks_start_rx - ready to serve pkts
1005 * @ks : The chip information
1006 *
1007 */
1008static void ks_start_rx(struct ks_net *ks)
1009{
1010 u16 cntl;
1011
1012 /* Enables QMU Receive (RXCR1). */
1013 cntl = ks_rdreg16(ks, KS_RXCR1);
1014 cntl |= RXCR1_RXE ;
1015 ks_wrreg16(ks, KS_RXCR1, cntl);
1016} /* ks_start_rx */
1017
1018/**
1019 * ks_stop_rx - stop to serve pkts
1020 * @ks : The chip information
1021 *
1022 */
1023static void ks_stop_rx(struct ks_net *ks)
1024{
1025 u16 cntl;
1026
1027 /* Disables QMU Receive (RXCR1). */
1028 cntl = ks_rdreg16(ks, KS_RXCR1);
1029 cntl &= ~RXCR1_RXE ;
1030 ks_wrreg16(ks, KS_RXCR1, cntl);
1031
1032} /* ks_stop_rx */
1033
1034static unsigned long const ethernet_polynomial = 0x04c11db7U;
1035
1036static unsigned long ether_gen_crc(int length, u8 *data)
1037{
1038 long crc = -1;
1039 while (--length >= 0) {
1040 u8 current_octet = *data++;
1041 int bit;
1042
1043 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1044 crc = (crc << 1) ^
1045 ((crc < 0) ^ (current_octet & 1) ?
1046 ethernet_polynomial : 0);
1047 }
1048 }
1049 return (unsigned long)crc;
1050} /* ether_gen_crc */
1051
1052/**
1053* ks_set_grpaddr - set multicast information
1054* @ks : The chip information
1055*/
1056
1057static void ks_set_grpaddr(struct ks_net *ks)
1058{
1059 u8 i;
1060 u32 index, position, value;
1061
1062 memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
1063
1064 for (i = 0; i < ks->mcast_lst_size; i++) {
1065 position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
1066 index = position >> 3;
1067 value = 1 << (position & 7);
1068 ks->mcast_bits[index] |= (u8)value;
1069 }
1070
1071 for (i = 0; i < HW_MCAST_SIZE; i++) {
1072 if (i & 1) {
1073 ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
1074 (ks->mcast_bits[i] << 8) |
1075 ks->mcast_bits[i - 1]);
1076 }
1077 }
1078} /* ks_set_grpaddr */
1079
1080/*
1081* ks_clear_mcast - clear multicast information
1082*
1083* @ks : The chip information
1084* This routine removes all mcast addresses set in the hardware.
1085*/
1086
1087static void ks_clear_mcast(struct ks_net *ks)
1088{
1089 u16 i, mcast_size;
1090 for (i = 0; i < HW_MCAST_SIZE; i++)
1091 ks->mcast_bits[i] = 0;
1092
1093 mcast_size = HW_MCAST_SIZE >> 2;
1094 for (i = 0; i < mcast_size; i++)
1095 ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
1096}
1097
1098static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
1099{
1100 u16 cntl;
1101 ks->promiscuous = promiscuous_mode;
1102 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1103 cntl = ks_rdreg16(ks, KS_RXCR1);
1104
1105 cntl &= ~RXCR1_FILTER_MASK;
1106 if (promiscuous_mode)
1107 /* Enable Promiscuous mode */
1108 cntl |= RXCR1_RXAE | RXCR1_RXINVF;
1109 else
1110 /* Disable Promiscuous mode (default normal mode) */
1111 cntl |= RXCR1_RXPAFMA;
1112
1113 ks_wrreg16(ks, KS_RXCR1, cntl);
1114
1115 if (ks->enabled)
1116 ks_start_rx(ks);
1117
1118} /* ks_set_promis */
1119
1120static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1121{
1122 u16 cntl;
1123
1124 ks->all_mcast = mcast;
1125 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1126 cntl = ks_rdreg16(ks, KS_RXCR1);
1127 cntl &= ~RXCR1_FILTER_MASK;
1128 if (mcast)
1129 /* Enable "Perfect with Multicast address passed mode" */
1130 cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1131 else
1132 /**
1133 * Disable "Perfect with Multicast address passed
1134 * mode" (normal mode).
1135 */
1136 cntl |= RXCR1_RXPAFMA;
1137
1138 ks_wrreg16(ks, KS_RXCR1, cntl);
1139
1140 if (ks->enabled)
1141 ks_start_rx(ks);
1142} /* ks_set_mcast */
1143
1144static void ks_set_rx_mode(struct net_device *netdev)
1145{
1146 struct ks_net *ks = netdev_priv(netdev);
1147 struct dev_mc_list *ptr;
1148
1149 /* Turn on/off promiscuous mode. */
1150 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
1151 ks_set_promis(ks,
1152 (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
1153 /* Turn on/off all mcast mode. */
1154 else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
1155 ks_set_mcast(ks,
1156 (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
1157 else
1158 ks_set_promis(ks, false);
1159
1160 if ((netdev->flags & IFF_MULTICAST) && netdev->mc_count) {
1161 if (netdev->mc_count <= MAX_MCAST_LST) {
1162 int i = 0;
1163 for (ptr = netdev->mc_list; ptr; ptr = ptr->next) {
1164 if (!(*ptr->dmi_addr & 1))
1165 continue;
1166 if (i >= MAX_MCAST_LST)
1167 break;
1168 memcpy(ks->mcast_lst[i++], ptr->dmi_addr,
1169 MAC_ADDR_LEN);
1170 }
1171 ks->mcast_lst_size = (u8)i;
1172 ks_set_grpaddr(ks);
1173 } else {
1174 /**
1175 * List too big to support so
1176 * turn on all mcast mode.
1177 */
1178 ks->mcast_lst_size = MAX_MCAST_LST;
1179 ks_set_mcast(ks, true);
1180 }
1181 } else {
1182 ks->mcast_lst_size = 0;
1183 ks_clear_mcast(ks);
1184 }
1185} /* ks_set_rx_mode */
1186
1187static void ks_set_mac(struct ks_net *ks, u8 *data)
1188{
1189 u16 *pw = (u16 *)data;
1190 u16 w, u;
1191
1192 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1193
1194 u = *pw++;
1195 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1196 ks_wrreg16(ks, KS_MARH, w);
1197
1198 u = *pw++;
1199 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1200 ks_wrreg16(ks, KS_MARM, w);
1201
1202 u = *pw;
1203 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1204 ks_wrreg16(ks, KS_MARL, w);
1205
1206 memcpy(ks->mac_addr, data, 6);
1207
1208 if (ks->enabled)
1209 ks_start_rx(ks);
1210}
1211
1212static int ks_set_mac_address(struct net_device *netdev, void *paddr)
1213{
1214 struct ks_net *ks = netdev_priv(netdev);
1215 struct sockaddr *addr = paddr;
1216 u8 *da;
1217
1218 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1219
1220 da = (u8 *)netdev->dev_addr;
1221
1222 ks_set_mac(ks, da);
1223 return 0;
1224}
1225
1226static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1227{
1228 struct ks_net *ks = netdev_priv(netdev);
1229
1230 if (!netif_running(netdev))
1231 return -EINVAL;
1232
1233 return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
1234}
1235
1236static const struct net_device_ops ks_netdev_ops = {
1237 .ndo_open = ks_net_open,
1238 .ndo_stop = ks_net_stop,
1239 .ndo_do_ioctl = ks_net_ioctl,
1240 .ndo_start_xmit = ks_start_xmit,
1241 .ndo_set_mac_address = ks_set_mac_address,
1242 .ndo_set_rx_mode = ks_set_rx_mode,
1243 .ndo_change_mtu = eth_change_mtu,
1244 .ndo_validate_addr = eth_validate_addr,
1245};
1246
1247/* ethtool support */
1248
1249static void ks_get_drvinfo(struct net_device *netdev,
1250 struct ethtool_drvinfo *di)
1251{
1252 strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
1253 strlcpy(di->version, "1.00", sizeof(di->version));
1254 strlcpy(di->bus_info, dev_name(netdev->dev.parent),
1255 sizeof(di->bus_info));
1256}
1257
1258static u32 ks_get_msglevel(struct net_device *netdev)
1259{
1260 struct ks_net *ks = netdev_priv(netdev);
1261 return ks->msg_enable;
1262}
1263
1264static void ks_set_msglevel(struct net_device *netdev, u32 to)
1265{
1266 struct ks_net *ks = netdev_priv(netdev);
1267 ks->msg_enable = to;
1268}
1269
1270static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1271{
1272 struct ks_net *ks = netdev_priv(netdev);
1273 return mii_ethtool_gset(&ks->mii, cmd);
1274}
1275
1276static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1277{
1278 struct ks_net *ks = netdev_priv(netdev);
1279 return mii_ethtool_sset(&ks->mii, cmd);
1280}
1281
1282static u32 ks_get_link(struct net_device *netdev)
1283{
1284 struct ks_net *ks = netdev_priv(netdev);
1285 return mii_link_ok(&ks->mii);
1286}
1287
1288static int ks_nway_reset(struct net_device *netdev)
1289{
1290 struct ks_net *ks = netdev_priv(netdev);
1291 return mii_nway_restart(&ks->mii);
1292}
1293
1294static const struct ethtool_ops ks_ethtool_ops = {
1295 .get_drvinfo = ks_get_drvinfo,
1296 .get_msglevel = ks_get_msglevel,
1297 .set_msglevel = ks_set_msglevel,
1298 .get_settings = ks_get_settings,
1299 .set_settings = ks_set_settings,
1300 .get_link = ks_get_link,
1301 .nway_reset = ks_nway_reset,
1302};
1303
1304/* MII interface controls */
1305
1306/**
1307 * ks_phy_reg - convert MII register into a KS8851 register
1308 * @reg: MII register number.
1309 *
1310 * Return the KS8851 register number for the corresponding MII PHY register
1311 * if possible. Return zero if the MII register has no direct mapping to the
1312 * KS8851 register set.
1313 */
1314static int ks_phy_reg(int reg)
1315{
1316 switch (reg) {
1317 case MII_BMCR:
1318 return KS_P1MBCR;
1319 case MII_BMSR:
1320 return KS_P1MBSR;
1321 case MII_PHYSID1:
1322 return KS_PHY1ILR;
1323 case MII_PHYSID2:
1324 return KS_PHY1IHR;
1325 case MII_ADVERTISE:
1326 return KS_P1ANAR;
1327 case MII_LPA:
1328 return KS_P1ANLPR;
1329 }
1330
1331 return 0x0;
1332}
1333
1334/**
1335 * ks_phy_read - MII interface PHY register read.
1336 * @netdev: The network device the PHY is on.
1337 * @phy_addr: Address of PHY (ignored as we only have one)
1338 * @reg: The register to read.
1339 *
1340 * This call reads data from the PHY register specified in @reg. Since the
1341 * device does not support all the MII registers, the non-existant values
1342 * are always returned as zero.
1343 *
1344 * We return zero for unsupported registers as the MII code does not check
1345 * the value returned for any error status, and simply returns it to the
1346 * caller. The mii-tool that the driver was tested with takes any -ve error
1347 * as real PHY capabilities, thus displaying incorrect data to the user.
1348 */
1349static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1350{
1351 struct ks_net *ks = netdev_priv(netdev);
1352 int ksreg;
1353 int result;
1354
1355 ksreg = ks_phy_reg(reg);
1356 if (!ksreg)
1357 return 0x0; /* no error return allowed, so use zero */
1358
1359 mutex_lock(&ks->lock);
1360 result = ks_rdreg16(ks, ksreg);
1361 mutex_unlock(&ks->lock);
1362
1363 return result;
1364}
1365
1366static void ks_phy_write(struct net_device *netdev,
1367 int phy, int reg, int value)
1368{
1369 struct ks_net *ks = netdev_priv(netdev);
1370 int ksreg;
1371
1372 ksreg = ks_phy_reg(reg);
1373 if (ksreg) {
1374 mutex_lock(&ks->lock);
1375 ks_wrreg16(ks, ksreg, value);
1376 mutex_unlock(&ks->lock);
1377 }
1378}
1379
1380/**
1381 * ks_read_selftest - read the selftest memory info.
1382 * @ks: The device state
1383 *
1384 * Read and check the TX/RX memory selftest information.
1385 */
1386static int ks_read_selftest(struct ks_net *ks)
1387{
1388 unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1389 int ret = 0;
1390 unsigned rd;
1391
1392 rd = ks_rdreg16(ks, KS_MBIR);
1393
1394 if ((rd & both_done) != both_done) {
1395 ks_warn(ks, "Memory selftest not finished\n");
1396 return 0;
1397 }
1398
1399 if (rd & MBIR_TXMBFA) {
1400 ks_err(ks, "TX memory selftest fails\n");
1401 ret |= 1;
1402 }
1403
1404 if (rd & MBIR_RXMBFA) {
1405 ks_err(ks, "RX memory selftest fails\n");
1406 ret |= 2;
1407 }
1408
1409 ks_info(ks, "the selftest passes\n");
1410 return ret;
1411}
1412
1413static void ks_disable(struct ks_net *ks)
1414{
1415 u16 w;
1416
1417 w = ks_rdreg16(ks, KS_TXCR);
1418
1419 /* Disables QMU Transmit (TXCR). */
1420 w &= ~TXCR_TXE;
1421 ks_wrreg16(ks, KS_TXCR, w);
1422
1423 /* Disables QMU Receive (RXCR1). */
1424 w = ks_rdreg16(ks, KS_RXCR1);
1425 w &= ~RXCR1_RXE ;
1426 ks_wrreg16(ks, KS_RXCR1, w);
1427
1428 ks->enabled = false;
1429
1430} /* ks_disable */
1431
1432static void ks_setup(struct ks_net *ks)
1433{
1434 u16 w;
1435
1436 /**
1437 * Configure QMU Transmit
1438 */
1439
1440 /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
1441 ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1442
1443 /* Setup Receive Frame Data Pointer Auto-Increment */
1444 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1445
1446 /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1447 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
1448
1449 /* Setup RxQ Command Control (RXQCR) */
1450 ks->rc_rxqcr = RXQCR_CMD_CNTL;
1451 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1452
1453 /**
1454 * set the force mode to half duplex, default is full duplex
1455 * because if the auto-negotiation fails, most switch uses
1456 * half-duplex.
1457 */
1458
1459 w = ks_rdreg16(ks, KS_P1MBCR);
1460 w &= ~P1MBCR_FORCE_FDX;
1461 ks_wrreg16(ks, KS_P1MBCR, w);
1462
1463 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1464 ks_wrreg16(ks, KS_TXCR, w);
1465
1466 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE;
1467
1468 if (ks->promiscuous) /* bPromiscuous */
1469 w |= (RXCR1_RXAE | RXCR1_RXINVF);
1470 else if (ks->all_mcast) /* Multicast address passed mode */
1471 w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1472 else /* Normal mode */
1473 w |= RXCR1_RXPAFMA;
1474
1475 ks_wrreg16(ks, KS_RXCR1, w);
1476} /*ks_setup */
1477
1478
1479static void ks_setup_int(struct ks_net *ks)
1480{
1481 ks->rc_ier = 0x00;
1482 /* Clear the interrupts status of the hardware. */
1483 ks_wrreg16(ks, KS_ISR, 0xffff);
1484
1485 /* Enables the interrupts of the hardware. */
1486 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1487} /* ks_setup_int */
1488
1489void ks_enable(struct ks_net *ks)
1490{
1491 u16 w;
1492
1493 w = ks_rdreg16(ks, KS_TXCR);
1494 /* Enables QMU Transmit (TXCR). */
1495 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
1496
1497 /*
1498 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
1499 * Enable
1500 */
1501
1502 w = ks_rdreg16(ks, KS_RXQCR);
1503 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
1504
1505 /* Enables QMU Receive (RXCR1). */
1506 w = ks_rdreg16(ks, KS_RXCR1);
1507 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
1508 ks->enabled = true;
1509} /* ks_enable */
1510
1511static int ks_hw_init(struct ks_net *ks)
1512{
1513#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1514 ks->promiscuous = 0;
1515 ks->all_mcast = 0;
1516 ks->mcast_lst_size = 0;
1517
1518 ks->frame_head_info = (struct type_frame_head *) \
1519 kmalloc(MHEADER_SIZE, GFP_KERNEL);
1520 if (!ks->frame_head_info) {
1521 printk(KERN_ERR "Error: Fail to allocate frame memory\n");
1522 return false;
1523 }
1524
1525 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1526 return true;
1527}
1528
1529
1530static int __devinit ks8851_probe(struct platform_device *pdev)
1531{
1532 int err = -ENOMEM;
1533 struct resource *io_d, *io_c;
1534 struct net_device *netdev;
1535 struct ks_net *ks;
1536 u16 id, data;
1537
1538 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1539 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1540
1541 if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
1542 goto err_mem_region;
1543
1544 if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
1545 goto err_mem_region1;
1546
1547 netdev = alloc_etherdev(sizeof(struct ks_net));
1548 if (!netdev)
1549 goto err_alloc_etherdev;
1550
1551 SET_NETDEV_DEV(netdev, &pdev->dev);
1552
1553 ks = netdev_priv(netdev);
1554 ks->netdev = netdev;
1555 ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
1556
1557 if (!ks->hw_addr)
1558 goto err_ioremap;
1559
1560 ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
1561 if (!ks->hw_addr_cmd)
1562 goto err_ioremap1;
1563
1564 ks->irq = platform_get_irq(pdev, 0);
1565
1566 if (ks->irq < 0) {
1567 err = ks->irq;
1568 goto err_get_irq;
1569 }
1570
1571 ks->pdev = pdev;
1572
1573 mutex_init(&ks->lock);
1574 spin_lock_init(&ks->statelock);
1575
1576 netdev->netdev_ops = &ks_netdev_ops;
1577 netdev->ethtool_ops = &ks_ethtool_ops;
1578
1579 /* setup mii state */
1580 ks->mii.dev = netdev;
1581 ks->mii.phy_id = 1,
1582 ks->mii.phy_id_mask = 1;
1583 ks->mii.reg_num_mask = 0xf;
1584 ks->mii.mdio_read = ks_phy_read;
1585 ks->mii.mdio_write = ks_phy_write;
1586
1587 ks_info(ks, "message enable is %d\n", msg_enable);
1588 /* set the default message enable */
1589 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1590 NETIF_MSG_PROBE |
1591 NETIF_MSG_LINK));
1592 ks_read_config(ks);
1593
1594 /* simple check for a valid chip being connected to the bus */
1595 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1596 ks_err(ks, "failed to read device ID\n");
1597 err = -ENODEV;
1598 goto err_register;
1599 }
1600
1601 if (ks_read_selftest(ks)) {
1602 ks_err(ks, "failed to read device ID\n");
1603 err = -ENODEV;
1604 goto err_register;
1605 }
1606
1607 err = register_netdev(netdev);
1608 if (err)
1609 goto err_register;
1610
1611 platform_set_drvdata(pdev, netdev);
1612
1613 ks_soft_reset(ks, GRR_GSR);
1614 ks_hw_init(ks);
1615 ks_disable(ks);
1616 ks_setup(ks);
1617 ks_setup_int(ks);
1618 ks_enable_int(ks);
1619 ks_enable(ks);
1620 memcpy(netdev->dev_addr, ks->mac_addr, 6);
1621
1622 data = ks_rdreg16(ks, KS_OBCR);
1623 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1624
1625 /**
1626 * If you want to use the default MAC addr,
1627 * comment out the 2 functions below.
1628 */
1629
1630 random_ether_addr(netdev->dev_addr);
1631 ks_set_mac(ks, netdev->dev_addr);
1632
1633 id = ks_rdreg16(ks, KS_CIDER);
1634
1635 printk(KERN_INFO DRV_NAME
1636 " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1637 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1638 return 0;
1639
1640err_register:
1641err_get_irq:
1642 iounmap(ks->hw_addr_cmd);
1643err_ioremap1:
1644 iounmap(ks->hw_addr);
1645err_ioremap:
1646 free_netdev(netdev);
1647err_alloc_etherdev:
1648 release_mem_region(io_c->start, resource_size(io_c));
1649err_mem_region1:
1650 release_mem_region(io_d->start, resource_size(io_d));
1651err_mem_region:
1652 return err;
1653}
1654
1655static int __devexit ks8851_remove(struct platform_device *pdev)
1656{
1657 struct net_device *netdev = platform_get_drvdata(pdev);
1658 struct ks_net *ks = netdev_priv(netdev);
1659 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1660
1661 unregister_netdev(netdev);
1662 iounmap(ks->hw_addr);
1663 free_netdev(netdev);
1664 release_mem_region(iomem->start, resource_size(iomem));
1665 platform_set_drvdata(pdev, NULL);
1666 return 0;
1667
1668}
1669
1670static struct platform_driver ks8851_platform_driver = {
1671 .driver = {
1672 .name = DRV_NAME,
1673 .owner = THIS_MODULE,
1674 },
1675 .probe = ks8851_probe,
1676 .remove = __devexit_p(ks8851_remove),
1677};
1678
1679static int __init ks8851_init(void)
1680{
1681 return platform_driver_register(&ks8851_platform_driver);
1682}
1683
1684static void __exit ks8851_exit(void)
1685{
1686 platform_driver_unregister(&ks8851_platform_driver);
1687}
1688
1689module_init(ks8851_init);
1690module_exit(ks8851_exit);
1691
1692MODULE_DESCRIPTION("KS8851 MLL Network driver");
1693MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1694MODULE_LICENSE("GPL");
1695module_param_named(message, msg_enable, int, 0);
1696MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1697
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 92ceb689b4d4..2af81735386b 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -828,7 +828,7 @@ static int __exit meth_remove(struct platform_device *pdev)
828 828
829static struct platform_driver meth_driver = { 829static struct platform_driver meth_driver = {
830 .probe = meth_probe, 830 .probe = meth_probe,
831 .remove = __devexit_p(meth_remove), 831 .remove = __exit_p(meth_remove),
832 .driver = { 832 .driver = {
833 .name = "meth", 833 .name = "meth",
834 .owner = THIS_MODULE, 834 .owner = THIS_MODULE,
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index b5aa974827e5..9b9eab107704 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1714,7 +1714,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1714 /* 4 fragments per cmd des */ 1714 /* 4 fragments per cmd des */
1715 no_of_desc = (frag_count + 3) >> 2; 1715 no_of_desc = (frag_count + 3) >> 2;
1716 1716
1717 if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) { 1717 if (unlikely(no_of_desc + 2 > netxen_tx_avail(tx_ring))) {
1718 netif_stop_queue(netdev); 1718 netif_stop_queue(netdev);
1719 return NETDEV_TX_BUSY; 1719 return NETDEV_TX_BUSY;
1720 } 1720 }
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c
index 064a4fe1dd90..28a86224879d 100644
--- a/drivers/net/pasemi_mac_ethtool.c
+++ b/drivers/net/pasemi_mac_ethtool.c
@@ -71,6 +71,9 @@ pasemi_mac_ethtool_get_settings(struct net_device *netdev,
71 struct pasemi_mac *mac = netdev_priv(netdev); 71 struct pasemi_mac *mac = netdev_priv(netdev);
72 struct phy_device *phydev = mac->phydev; 72 struct phy_device *phydev = mac->phydev;
73 73
74 if (!phydev)
75 return -EOPNOTSUPP;
76
74 return phy_ethtool_gset(phydev, cmd); 77 return phy_ethtool_gset(phydev, cmd);
75} 78}
76 79
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 474876c879cb..bd3447f04902 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1754,14 +1754,14 @@ static struct pcmcia_device_id pcnet_ids[] = {
1754 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), 1754 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"),
1755 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), 1755 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"),
1756 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), 1756 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"),
1757 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"), 1757 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
1758 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"), 1758 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
1759 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "DP83903.cis"), 1759 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
1760 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), 1760 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
1761 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), 1761 PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
1762 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"), 1762 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
1763 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), 1763 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"),
1764 PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"), 1764 PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"),
1765 PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), 1765 PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b),
1766 PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", 1766 PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0",
1767 0xb4be14e3, 0x43ac239b, 0x0877b627), 1767 0xb4be14e3, 0x43ac239b, 0x0877b627),
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index cc394d073755..5910df60c93e 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -2179,7 +2179,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
2179 * session or the special tunnel type. 2179 * session or the special tunnel type.
2180 */ 2180 */
2181static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, 2181static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
2182 char __user *optval, int optlen) 2182 char __user *optval, unsigned int optlen)
2183{ 2183{
2184 struct sock *sk = sock->sk; 2184 struct sock *sk = sock->sk;
2185 struct pppol2tp_session *session = sk->sk_user_data; 2185 struct pppol2tp_session *session = sk->sk_user_data;
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index a9845a2f243f..3ec6e85587a2 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -9,6 +9,7 @@
9 9
10#include <linux/pci.h> 10#include <linux/pci.h>
11#include <linux/netdevice.h> 11#include <linux/netdevice.h>
12#include <linux/rtnetlink.h>
12 13
13/* 14/*
14 * General definitions... 15 * General definitions...
@@ -135,9 +136,9 @@ enum {
135 RST_FO_TFO = (1 << 0), 136 RST_FO_TFO = (1 << 0),
136 RST_FO_RR_MASK = 0x00060000, 137 RST_FO_RR_MASK = 0x00060000,
137 RST_FO_RR_CQ_CAM = 0x00000000, 138 RST_FO_RR_CQ_CAM = 0x00000000,
138 RST_FO_RR_DROP = 0x00000001, 139 RST_FO_RR_DROP = 0x00000002,
139 RST_FO_RR_DQ = 0x00000002, 140 RST_FO_RR_DQ = 0x00000004,
140 RST_FO_RR_RCV_FUNC_CQ = 0x00000003, 141 RST_FO_RR_RCV_FUNC_CQ = 0x00000006,
141 RST_FO_FRB = (1 << 12), 142 RST_FO_FRB = (1 << 12),
142 RST_FO_MOP = (1 << 13), 143 RST_FO_MOP = (1 << 13),
143 RST_FO_REG = (1 << 14), 144 RST_FO_REG = (1 << 14),
@@ -1381,15 +1382,15 @@ struct intr_context {
1381 1382
1382/* adapter flags definitions. */ 1383/* adapter flags definitions. */
1383enum { 1384enum {
1384 QL_ADAPTER_UP = (1 << 0), /* Adapter has been brought up. */ 1385 QL_ADAPTER_UP = 0, /* Adapter has been brought up. */
1385 QL_LEGACY_ENABLED = (1 << 3), 1386 QL_LEGACY_ENABLED = 1,
1386 QL_MSI_ENABLED = (1 << 3), 1387 QL_MSI_ENABLED = 2,
1387 QL_MSIX_ENABLED = (1 << 4), 1388 QL_MSIX_ENABLED = 3,
1388 QL_DMA64 = (1 << 5), 1389 QL_DMA64 = 4,
1389 QL_PROMISCUOUS = (1 << 6), 1390 QL_PROMISCUOUS = 5,
1390 QL_ALLMULTI = (1 << 7), 1391 QL_ALLMULTI = 6,
1391 QL_PORT_CFG = (1 << 8), 1392 QL_PORT_CFG = 7,
1392 QL_CAM_RT_SET = (1 << 9), 1393 QL_CAM_RT_SET = 8,
1393}; 1394};
1394 1395
1395/* link_status bit definitions */ 1396/* link_status bit definitions */
@@ -1477,7 +1478,6 @@ struct ql_adapter {
1477 u32 mailbox_in; 1478 u32 mailbox_in;
1478 u32 mailbox_out; 1479 u32 mailbox_out;
1479 struct mbox_params idc_mbc; 1480 struct mbox_params idc_mbc;
1480 struct mutex mpi_mutex;
1481 1481
1482 int tx_ring_size; 1482 int tx_ring_size;
1483 int rx_ring_size; 1483 int rx_ring_size;
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 68f9bd280f86..52073946bce3 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -45,7 +45,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
45 if (!netif_running(qdev->ndev)) 45 if (!netif_running(qdev->ndev))
46 return status; 46 return status;
47 47
48 spin_lock(&qdev->hw_lock);
49 /* Skip the default queue, and update the outbound handler 48 /* Skip the default queue, and update the outbound handler
50 * queues if they changed. 49 * queues if they changed.
51 */ 50 */
@@ -92,7 +91,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
92 } 91 }
93 } 92 }
94exit: 93exit:
95 spin_unlock(&qdev->hw_lock);
96 return status; 94 return status;
97} 95}
98 96
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 7783c5db81dc..61680715cde0 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -34,7 +34,6 @@
34#include <linux/etherdevice.h> 34#include <linux/etherdevice.h>
35#include <linux/ethtool.h> 35#include <linux/ethtool.h>
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/rtnetlink.h>
38#include <linux/if_vlan.h> 37#include <linux/if_vlan.h>
39#include <linux/delay.h> 38#include <linux/delay.h>
40#include <linux/mm.h> 39#include <linux/mm.h>
@@ -1926,12 +1925,10 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1926 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 1925 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1927 if (status) 1926 if (status)
1928 return; 1927 return;
1929 spin_lock(&qdev->hw_lock);
1930 if (ql_set_mac_addr_reg 1928 if (ql_set_mac_addr_reg
1931 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { 1929 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1932 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); 1930 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1933 } 1931 }
1934 spin_unlock(&qdev->hw_lock);
1935 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 1932 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1936} 1933}
1937 1934
@@ -1945,12 +1942,10 @@ static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1945 if (status) 1942 if (status)
1946 return; 1943 return;
1947 1944
1948 spin_lock(&qdev->hw_lock);
1949 if (ql_set_mac_addr_reg 1945 if (ql_set_mac_addr_reg
1950 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { 1946 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1951 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); 1947 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1952 } 1948 }
1953 spin_unlock(&qdev->hw_lock);
1954 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 1949 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1955 1950
1956} 1951}
@@ -2001,15 +1996,17 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2001 /* 1996 /*
2002 * Check MPI processor activity. 1997 * Check MPI processor activity.
2003 */ 1998 */
2004 if (var & STS_PI) { 1999 if ((var & STS_PI) &&
2000 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2005 /* 2001 /*
2006 * We've got an async event or mailbox completion. 2002 * We've got an async event or mailbox completion.
2007 * Handle it and clear the source of the interrupt. 2003 * Handle it and clear the source of the interrupt.
2008 */ 2004 */
2009 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); 2005 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2010 ql_disable_completion_interrupt(qdev, intr_context->intr); 2006 ql_disable_completion_interrupt(qdev, intr_context->intr);
2011 queue_delayed_work_on(smp_processor_id(), qdev->workqueue, 2007 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2012 &qdev->mpi_work, 0); 2008 queue_delayed_work_on(smp_processor_id(),
2009 qdev->workqueue, &qdev->mpi_work, 0);
2013 work_done++; 2010 work_done++;
2014 } 2011 }
2015 2012
@@ -3142,14 +3139,14 @@ static int ql_route_initialize(struct ql_adapter *qdev)
3142{ 3139{
3143 int status = 0; 3140 int status = 0;
3144 3141
3145 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 3142 /* Clear all the entries in the routing table. */
3143 status = ql_clear_routing_entries(qdev);
3146 if (status) 3144 if (status)
3147 return status; 3145 return status;
3148 3146
3149 /* Clear all the entries in the routing table. */ 3147 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3150 status = ql_clear_routing_entries(qdev);
3151 if (status) 3148 if (status)
3152 goto exit; 3149 return status;
3153 3150
3154 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); 3151 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3155 if (status) { 3152 if (status) {
@@ -3380,12 +3377,10 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3380 3377
3381 ql_free_rx_buffers(qdev); 3378 ql_free_rx_buffers(qdev);
3382 3379
3383 spin_lock(&qdev->hw_lock);
3384 status = ql_adapter_reset(qdev); 3380 status = ql_adapter_reset(qdev);
3385 if (status) 3381 if (status)
3386 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n", 3382 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3387 qdev->func); 3383 qdev->func);
3388 spin_unlock(&qdev->hw_lock);
3389 return status; 3384 return status;
3390} 3385}
3391 3386
@@ -3587,7 +3582,6 @@ static void qlge_set_multicast_list(struct net_device *ndev)
3587 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 3582 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3588 if (status) 3583 if (status)
3589 return; 3584 return;
3590 spin_lock(&qdev->hw_lock);
3591 /* 3585 /*
3592 * Set or clear promiscuous mode if a 3586 * Set or clear promiscuous mode if a
3593 * transition is taking place. 3587 * transition is taking place.
@@ -3664,7 +3658,6 @@ static void qlge_set_multicast_list(struct net_device *ndev)
3664 } 3658 }
3665 } 3659 }
3666exit: 3660exit:
3667 spin_unlock(&qdev->hw_lock);
3668 ql_sem_unlock(qdev, SEM_RT_IDX_MASK); 3661 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3669} 3662}
3670 3663
@@ -3684,10 +3677,8 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
3684 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 3677 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3685 if (status) 3678 if (status)
3686 return status; 3679 return status;
3687 spin_lock(&qdev->hw_lock);
3688 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, 3680 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3689 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); 3681 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
3690 spin_unlock(&qdev->hw_lock);
3691 if (status) 3682 if (status)
3692 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); 3683 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3693 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 3684 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
@@ -3705,7 +3696,7 @@ static void ql_asic_reset_work(struct work_struct *work)
3705 struct ql_adapter *qdev = 3696 struct ql_adapter *qdev =
3706 container_of(work, struct ql_adapter, asic_reset_work.work); 3697 container_of(work, struct ql_adapter, asic_reset_work.work);
3707 int status; 3698 int status;
3708 3699 rtnl_lock();
3709 status = ql_adapter_down(qdev); 3700 status = ql_adapter_down(qdev);
3710 if (status) 3701 if (status)
3711 goto error; 3702 goto error;
@@ -3713,12 +3704,12 @@ static void ql_asic_reset_work(struct work_struct *work)
3713 status = ql_adapter_up(qdev); 3704 status = ql_adapter_up(qdev);
3714 if (status) 3705 if (status)
3715 goto error; 3706 goto error;
3716 3707 rtnl_unlock();
3717 return; 3708 return;
3718error: 3709error:
3719 QPRINTK(qdev, IFUP, ALERT, 3710 QPRINTK(qdev, IFUP, ALERT,
3720 "Driver up/down cycle failed, closing device\n"); 3711 "Driver up/down cycle failed, closing device\n");
3721 rtnl_lock(); 3712
3722 set_bit(QL_ADAPTER_UP, &qdev->flags); 3713 set_bit(QL_ADAPTER_UP, &qdev->flags);
3723 dev_close(qdev->ndev); 3714 dev_close(qdev->ndev);
3724 rtnl_unlock(); 3715 rtnl_unlock();
@@ -3834,11 +3825,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3834 return err; 3825 return err;
3835 } 3826 }
3836 3827
3828 qdev->ndev = ndev;
3829 qdev->pdev = pdev;
3830 pci_set_drvdata(pdev, ndev);
3837 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 3831 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3838 if (pos <= 0) { 3832 if (pos <= 0) {
3839 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " 3833 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3840 "aborting.\n"); 3834 "aborting.\n");
3841 goto err_out; 3835 return pos;
3842 } else { 3836 } else {
3843 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); 3837 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3844 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; 3838 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
@@ -3851,7 +3845,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3851 err = pci_request_regions(pdev, DRV_NAME); 3845 err = pci_request_regions(pdev, DRV_NAME);
3852 if (err) { 3846 if (err) {
3853 dev_err(&pdev->dev, "PCI region request failed.\n"); 3847 dev_err(&pdev->dev, "PCI region request failed.\n");
3854 goto err_out; 3848 return err;
3855 } 3849 }
3856 3850
3857 pci_set_master(pdev); 3851 pci_set_master(pdev);
@@ -3869,7 +3863,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3869 goto err_out; 3863 goto err_out;
3870 } 3864 }
3871 3865
3872 pci_set_drvdata(pdev, ndev);
3873 qdev->reg_base = 3866 qdev->reg_base =
3874 ioremap_nocache(pci_resource_start(pdev, 1), 3867 ioremap_nocache(pci_resource_start(pdev, 1),
3875 pci_resource_len(pdev, 1)); 3868 pci_resource_len(pdev, 1));
@@ -3889,8 +3882,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3889 goto err_out; 3882 goto err_out;
3890 } 3883 }
3891 3884
3892 qdev->ndev = ndev;
3893 qdev->pdev = pdev;
3894 err = ql_get_board_info(qdev); 3885 err = ql_get_board_info(qdev);
3895 if (err) { 3886 if (err) {
3896 dev_err(&pdev->dev, "Register access failed.\n"); 3887 dev_err(&pdev->dev, "Register access failed.\n");
@@ -3930,7 +3921,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3930 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); 3921 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3931 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); 3922 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
3932 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); 3923 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
3933 mutex_init(&qdev->mpi_mutex);
3934 init_completion(&qdev->ide_completion); 3924 init_completion(&qdev->ide_completion);
3935 3925
3936 if (!cards_found) { 3926 if (!cards_found) {
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 6685bd97da91..c2e43073047e 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -472,7 +472,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
472{ 472{
473 int status, count; 473 int status, count;
474 474
475 mutex_lock(&qdev->mpi_mutex);
476 475
477 /* Begin polled mode for MPI */ 476 /* Begin polled mode for MPI */
478 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 477 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
@@ -541,7 +540,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
541 status = -EIO; 540 status = -EIO;
542 } 541 }
543end: 542end:
544 mutex_unlock(&qdev->mpi_mutex);
545 /* End polled mode for MPI */ 543 /* End polled mode for MPI */
546 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 544 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
547 return status; 545 return status;
@@ -776,7 +774,9 @@ static int ql_idc_wait(struct ql_adapter *qdev)
776static int ql_set_port_cfg(struct ql_adapter *qdev) 774static int ql_set_port_cfg(struct ql_adapter *qdev)
777{ 775{
778 int status; 776 int status;
777 rtnl_lock();
779 status = ql_mb_set_port_cfg(qdev); 778 status = ql_mb_set_port_cfg(qdev);
779 rtnl_unlock();
780 if (status) 780 if (status)
781 return status; 781 return status;
782 status = ql_idc_wait(qdev); 782 status = ql_idc_wait(qdev);
@@ -797,7 +797,9 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
797 container_of(work, struct ql_adapter, mpi_port_cfg_work.work); 797 container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
798 int status; 798 int status;
799 799
800 rtnl_lock();
800 status = ql_mb_get_port_cfg(qdev); 801 status = ql_mb_get_port_cfg(qdev);
802 rtnl_unlock();
801 if (status) { 803 if (status) {
802 QPRINTK(qdev, DRV, ERR, 804 QPRINTK(qdev, DRV, ERR,
803 "Bug: Failed to get port config data.\n"); 805 "Bug: Failed to get port config data.\n");
@@ -855,7 +857,9 @@ void ql_mpi_idc_work(struct work_struct *work)
855 * needs to be set. 857 * needs to be set.
856 * */ 858 * */
857 set_bit(QL_CAM_RT_SET, &qdev->flags); 859 set_bit(QL_CAM_RT_SET, &qdev->flags);
860 rtnl_lock();
858 status = ql_mb_idc_ack(qdev); 861 status = ql_mb_idc_ack(qdev);
862 rtnl_unlock();
859 if (status) { 863 if (status) {
860 QPRINTK(qdev, DRV, ERR, 864 QPRINTK(qdev, DRV, ERR,
861 "Bug: No pending IDC!\n"); 865 "Bug: No pending IDC!\n");
@@ -871,7 +875,7 @@ void ql_mpi_work(struct work_struct *work)
871 struct mbox_params *mbcp = &mbc; 875 struct mbox_params *mbcp = &mbc;
872 int err = 0; 876 int err = 0;
873 877
874 mutex_lock(&qdev->mpi_mutex); 878 rtnl_lock();
875 879
876 while (ql_read32(qdev, STS) & STS_PI) { 880 while (ql_read32(qdev, STS) & STS_PI) {
877 memset(mbcp, 0, sizeof(struct mbox_params)); 881 memset(mbcp, 0, sizeof(struct mbox_params));
@@ -884,7 +888,7 @@ void ql_mpi_work(struct work_struct *work)
884 break; 888 break;
885 } 889 }
886 890
887 mutex_unlock(&qdev->mpi_mutex); 891 rtnl_unlock();
888 ql_enable_completion_interrupt(qdev, 0); 892 ql_enable_completion_interrupt(qdev, 0);
889} 893}
890 894
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index ecf3279fbef5..f4dfd1f679a9 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -826,7 +826,7 @@ static int __exit sgiseeq_remove(struct platform_device *pdev)
826 826
827static struct platform_driver sgiseeq_driver = { 827static struct platform_driver sgiseeq_driver = {
828 .probe = sgiseeq_probe, 828 .probe = sgiseeq_probe,
829 .remove = __devexit_p(sgiseeq_remove), 829 .remove = __exit_p(sgiseeq_remove),
830 .driver = { 830 .driver = {
831 .name = "sgiseeq", 831 .name = "sgiseeq",
832 .owner = THIS_MODULE, 832 .owner = THIS_MODULE,
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 55bad4081966..01f6811f1324 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3935,11 +3935,14 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3935#endif 3935#endif
3936 3936
3937 err = -ENOMEM; 3937 err = -ENOMEM;
3938 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 3938 /* space for skge@pci:0000:04:00.0 */
3939 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:" )
3940 + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
3939 if (!hw) { 3941 if (!hw) {
3940 dev_err(&pdev->dev, "cannot allocate hardware struct\n"); 3942 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
3941 goto err_out_free_regions; 3943 goto err_out_free_regions;
3942 } 3944 }
3945 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
3943 3946
3944 hw->pdev = pdev; 3947 hw->pdev = pdev;
3945 spin_lock_init(&hw->hw_lock); 3948 spin_lock_init(&hw->hw_lock);
@@ -3974,7 +3977,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3974 goto err_out_free_netdev; 3977 goto err_out_free_netdev;
3975 } 3978 }
3976 3979
3977 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); 3980 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw);
3978 if (err) { 3981 if (err) {
3979 dev_err(&pdev->dev, "%s: cannot assign irq %d\n", 3982 dev_err(&pdev->dev, "%s: cannot assign irq %d\n",
3980 dev->name, pdev->irq); 3983 dev->name, pdev->irq);
@@ -3982,14 +3985,17 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3982 } 3985 }
3983 skge_show_addr(dev); 3986 skge_show_addr(dev);
3984 3987
3985 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) { 3988 if (hw->ports > 1) {
3986 if (register_netdev(dev1) == 0) 3989 dev1 = skge_devinit(hw, 1, using_dac);
3990 if (dev1 && register_netdev(dev1) == 0)
3987 skge_show_addr(dev1); 3991 skge_show_addr(dev1);
3988 else { 3992 else {
3989 /* Failure to register second port need not be fatal */ 3993 /* Failure to register second port need not be fatal */
3990 dev_warn(&pdev->dev, "register of second port failed\n"); 3994 dev_warn(&pdev->dev, "register of second port failed\n");
3991 hw->dev[1] = NULL; 3995 hw->dev[1] = NULL;
3992 free_netdev(dev1); 3996 hw->ports = 1;
3997 if (dev1)
3998 free_netdev(dev1);
3993 } 3999 }
3994 } 4000 }
3995 pci_set_drvdata(pdev, hw); 4001 pci_set_drvdata(pdev, hw);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 17caccbb7685..831de1b6e96e 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2423,6 +2423,8 @@ struct skge_hw {
2423 u16 phy_addr; 2423 u16 phy_addr;
2424 spinlock_t phy_lock; 2424 spinlock_t phy_lock;
2425 struct tasklet_struct phy_task; 2425 struct tasklet_struct phy_task;
2426
2427 char irq_name[0]; /* skge@pci:000:04:00.0 */
2426}; 2428};
2427 2429
2428enum pause_control { 2430enum pause_control {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index ef1165718dd7..2ab5c39f33ca 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -4487,13 +4487,16 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4487 wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0; 4487 wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
4488 4488
4489 err = -ENOMEM; 4489 err = -ENOMEM;
4490 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 4490
4491 hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
4492 + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
4491 if (!hw) { 4493 if (!hw) {
4492 dev_err(&pdev->dev, "cannot allocate hardware struct\n"); 4494 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
4493 goto err_out_free_regions; 4495 goto err_out_free_regions;
4494 } 4496 }
4495 4497
4496 hw->pdev = pdev; 4498 hw->pdev = pdev;
4499 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
4497 4500
4498 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 4501 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
4499 if (!hw->regs) { 4502 if (!hw->regs) {
@@ -4539,7 +4542,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4539 4542
4540 err = request_irq(pdev->irq, sky2_intr, 4543 err = request_irq(pdev->irq, sky2_intr,
4541 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED, 4544 (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
4542 dev->name, hw); 4545 hw->irq_name, hw);
4543 if (err) { 4546 if (err) {
4544 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); 4547 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
4545 goto err_out_unregister; 4548 goto err_out_unregister;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index e0f23a101043..ed54129698b4 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2085,6 +2085,8 @@ struct sky2_hw {
2085 struct timer_list watchdog_timer; 2085 struct timer_list watchdog_timer;
2086 struct work_struct restart_work; 2086 struct work_struct restart_work;
2087 wait_queue_head_t msi_wait; 2087 wait_queue_head_t msi_wait;
2088
2089 char irq_name[0];
2088}; 2090};
2089 2091
2090static inline int sky2_is_copper(const struct sky2_hw *hw) 2092static inline int sky2_is_copper(const struct sky2_hw *hw)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f09bc5dfe8b2..ba5d3fe753b6 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -902,11 +902,12 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
902 struct tg3 *tp = bp->priv; 902 struct tg3 *tp = bp->priv;
903 u32 val; 903 u32 val;
904 904
905 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) 905 spin_lock_bh(&tp->lock);
906 return -EAGAIN;
907 906
908 if (tg3_readphy(tp, reg, &val)) 907 if (tg3_readphy(tp, reg, &val))
909 return -EIO; 908 val = -EIO;
909
910 spin_unlock_bh(&tp->lock);
910 911
911 return val; 912 return val;
912} 913}
@@ -914,14 +915,16 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
914static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) 915static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
915{ 916{
916 struct tg3 *tp = bp->priv; 917 struct tg3 *tp = bp->priv;
918 u32 ret = 0;
917 919
918 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) 920 spin_lock_bh(&tp->lock);
919 return -EAGAIN;
920 921
921 if (tg3_writephy(tp, reg, val)) 922 if (tg3_writephy(tp, reg, val))
922 return -EIO; 923 ret = -EIO;
923 924
924 return 0; 925 spin_unlock_bh(&tp->lock);
926
927 return ret;
925} 928}
926 929
927static int tg3_mdio_reset(struct mii_bus *bp) 930static int tg3_mdio_reset(struct mii_bus *bp)
@@ -1011,12 +1014,6 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
1011 1014
1012static void tg3_mdio_start(struct tg3 *tp) 1015static void tg3_mdio_start(struct tg3 *tp)
1013{ 1016{
1014 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1015 mutex_lock(&tp->mdio_bus->mdio_lock);
1016 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1017 mutex_unlock(&tp->mdio_bus->mdio_lock);
1018 }
1019
1020 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; 1017 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1021 tw32_f(MAC_MI_MODE, tp->mi_mode); 1018 tw32_f(MAC_MI_MODE, tp->mi_mode);
1022 udelay(80); 1019 udelay(80);
@@ -1041,15 +1038,6 @@ static void tg3_mdio_start(struct tg3 *tp)
1041 tg3_mdio_config_5785(tp); 1038 tg3_mdio_config_5785(tp);
1042} 1039}
1043 1040
1044static void tg3_mdio_stop(struct tg3 *tp)
1045{
1046 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1047 mutex_lock(&tp->mdio_bus->mdio_lock);
1048 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
1049 mutex_unlock(&tp->mdio_bus->mdio_lock);
1050 }
1051}
1052
1053static int tg3_mdio_init(struct tg3 *tp) 1041static int tg3_mdio_init(struct tg3 *tp)
1054{ 1042{
1055 int i; 1043 int i;
@@ -1141,7 +1129,6 @@ static void tg3_mdio_fini(struct tg3 *tp)
1141 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; 1129 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1142 mdiobus_unregister(tp->mdio_bus); 1130 mdiobus_unregister(tp->mdio_bus);
1143 mdiobus_free(tp->mdio_bus); 1131 mdiobus_free(tp->mdio_bus);
1144 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1145 } 1132 }
1146} 1133}
1147 1134
@@ -1363,7 +1350,7 @@ static void tg3_adjust_link(struct net_device *dev)
1363 struct tg3 *tp = netdev_priv(dev); 1350 struct tg3 *tp = netdev_priv(dev);
1364 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1351 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1365 1352
1366 spin_lock(&tp->lock); 1353 spin_lock_bh(&tp->lock);
1367 1354
1368 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | 1355 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1369 MAC_MODE_HALF_DUPLEX); 1356 MAC_MODE_HALF_DUPLEX);
@@ -1431,7 +1418,7 @@ static void tg3_adjust_link(struct net_device *dev)
1431 tp->link_config.active_speed = phydev->speed; 1418 tp->link_config.active_speed = phydev->speed;
1432 tp->link_config.active_duplex = phydev->duplex; 1419 tp->link_config.active_duplex = phydev->duplex;
1433 1420
1434 spin_unlock(&tp->lock); 1421 spin_unlock_bh(&tp->lock);
1435 1422
1436 if (linkmesg) 1423 if (linkmesg)
1437 tg3_link_report(tp); 1424 tg3_link_report(tp);
@@ -6392,8 +6379,6 @@ static int tg3_chip_reset(struct tg3 *tp)
6392 6379
6393 tg3_nvram_lock(tp); 6380 tg3_nvram_lock(tp);
6394 6381
6395 tg3_mdio_stop(tp);
6396
6397 tg3_ape_lock(tp, TG3_APE_LOCK_GRC); 6382 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6398 6383
6399 /* No matching tg3_nvram_unlock() after this because 6384 /* No matching tg3_nvram_unlock() after this because
@@ -8698,6 +8683,8 @@ static int tg3_close(struct net_device *dev)
8698 8683
8699 del_timer_sync(&tp->timer); 8684 del_timer_sync(&tp->timer);
8700 8685
8686 tg3_phy_stop(tp);
8687
8701 tg3_full_lock(tp, 1); 8688 tg3_full_lock(tp, 1);
8702#if 0 8689#if 0
8703 tg3_dump_state(tp); 8690 tg3_dump_state(tp);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 82b45d8797b4..bab7940158e6 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2412,7 +2412,6 @@ struct ring_info {
2412 2412
2413struct tx_ring_info { 2413struct tx_ring_info {
2414 struct sk_buff *skb; 2414 struct sk_buff *skb;
2415 u32 prev_vlan_tag;
2416}; 2415};
2417 2416
2418struct tg3_config_info { 2417struct tg3_config_info {
@@ -2749,7 +2748,6 @@ struct tg3 {
2749#define TG3_FLG3_5701_DMA_BUG 0x00000008 2748#define TG3_FLG3_5701_DMA_BUG 0x00000008
2750#define TG3_FLG3_USE_PHYLIB 0x00000010 2749#define TG3_FLG3_USE_PHYLIB 0x00000010
2751#define TG3_FLG3_MDIOBUS_INITED 0x00000020 2750#define TG3_FLG3_MDIOBUS_INITED 0x00000020
2752#define TG3_FLG3_MDIOBUS_PAUSED 0x00000040
2753#define TG3_FLG3_PHY_CONNECTED 0x00000080 2751#define TG3_FLG3_PHY_CONNECTED 0x00000080
2754#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100 2752#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100
2755#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 2753#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index d032bba9bc4c..0caa8008c51c 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -418,6 +418,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
418 goto halt_fail_and_release; 418 goto halt_fail_and_release;
419 } 419 }
420 memcpy(net->dev_addr, bp, ETH_ALEN); 420 memcpy(net->dev_addr, bp, ETH_ALEN);
421 memcpy(net->perm_addr, bp, ETH_ALEN);
421 422
422 /* set a nonzero filter to enable data transfers */ 423 /* set a nonzero filter to enable data transfers */
423 memset(u.set, 0, sizeof *u.set); 424 memset(u.set, 0, sizeof *u.set);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d445845f2779..8d009760277c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -948,7 +948,7 @@ free:
948 return err; 948 return err;
949} 949}
950 950
951static void virtnet_remove(struct virtio_device *vdev) 951static void __devexit virtnet_remove(struct virtio_device *vdev)
952{ 952{
953 struct virtnet_info *vi = vdev->priv; 953 struct virtnet_info *vi = vdev->priv;
954 struct sk_buff *skb; 954 struct sk_buff *skb;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 49ea9c92b7e6..d7a764a2fc1a 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -31,13 +31,12 @@ config STRIP
31 ---help--- 31 ---help---
32 Say Y if you have a Metricom radio and intend to use Starmode Radio 32 Say Y if you have a Metricom radio and intend to use Starmode Radio
33 IP. STRIP is a radio protocol developed for the MosquitoNet project 33 IP. STRIP is a radio protocol developed for the MosquitoNet project
34 (on the WWW at <http://mosquitonet.stanford.edu/>) to send Internet 34 to send Internet traffic using Metricom radios. Metricom radios are
35 traffic using Metricom radios. Metricom radios are small, battery 35 small, battery powered, 100kbit/sec packet radio transceivers, about
36 powered, 100kbit/sec packet radio transceivers, about the size and 36 the size and weight of a cellular telephone. (You may also have heard
37 weight of a cellular telephone. (You may also have heard them called 37 them called "Metricom modems" but we avoid the term "modem" because
38 "Metricom modems" but we avoid the term "modem" because it misleads 38 it misleads many people into thinking that you can plug a Metricom
39 many people into thinking that you can plug a Metricom modem into a 39 modem into a phone line and use it as a modem.)
40 phone line and use it as a modem.)
41 40
42 You can use STRIP on any Linux machine with a serial port, although 41 You can use STRIP on any Linux machine with a serial port, although
43 it is obviously most useful for people with laptop computers. If you 42 it is obviously most useful for people with laptop computers. If you
diff --git a/drivers/net/wireless/ath/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c
index b3e5cf3735b0..dbd488da18b1 100644
--- a/drivers/net/wireless/ath/ar9170/phy.c
+++ b/drivers/net/wireless/ath/ar9170/phy.c
@@ -1141,7 +1141,8 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
1141 u8 vpds[2][AR5416_PD_GAIN_ICEPTS]; 1141 u8 vpds[2][AR5416_PD_GAIN_ICEPTS];
1142 u8 pwrs[2][AR5416_PD_GAIN_ICEPTS]; 1142 u8 pwrs[2][AR5416_PD_GAIN_ICEPTS];
1143 int chain, idx, i; 1143 int chain, idx, i;
1144 u8 f; 1144 u32 phy_data = 0;
1145 u8 f, tmp;
1145 1146
1146 switch (channel->band) { 1147 switch (channel->band) {
1147 case IEEE80211_BAND_2GHZ: 1148 case IEEE80211_BAND_2GHZ:
@@ -1208,9 +1209,6 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
1208 } 1209 }
1209 1210
1210 for (i = 0; i < 76; i++) { 1211 for (i = 0; i < 76; i++) {
1211 u32 phy_data;
1212 u8 tmp;
1213
1214 if (i < 25) { 1212 if (i < 25) {
1215 tmp = ar9170_interpolate_val(i, &pwrs[0][0], 1213 tmp = ar9170_interpolate_val(i, &pwrs[0][0],
1216 &vpds[0][0]); 1214 &vpds[0][0]);
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index e96091b31499..9c1397996e0a 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -340,10 +340,15 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
340 q->mmio_base + B43_PIO_TXDATA, 340 q->mmio_base + B43_PIO_TXDATA,
341 sizeof(u16)); 341 sizeof(u16));
342 if (data_len & 1) { 342 if (data_len & 1) {
343 u8 tail[2] = { 0, };
344
343 /* Write the last byte. */ 345 /* Write the last byte. */
344 ctl &= ~B43_PIO_TXCTL_WRITEHI; 346 ctl &= ~B43_PIO_TXCTL_WRITEHI;
345 b43_piotx_write16(q, B43_PIO_TXCTL, ctl); 347 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
346 b43_piotx_write16(q, B43_PIO_TXDATA, data[data_len - 1]); 348 tail[0] = data[data_len - 1];
349 ssb_block_write(dev->dev, tail, 2,
350 q->mmio_base + B43_PIO_TXDATA,
351 sizeof(u16));
347 } 352 }
348 353
349 return ctl; 354 return ctl;
@@ -386,26 +391,31 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
386 q->mmio_base + B43_PIO8_TXDATA, 391 q->mmio_base + B43_PIO8_TXDATA,
387 sizeof(u32)); 392 sizeof(u32));
388 if (data_len & 3) { 393 if (data_len & 3) {
389 u32 value = 0; 394 u8 tail[4] = { 0, };
390 395
391 /* Write the last few bytes. */ 396 /* Write the last few bytes. */
392 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 | 397 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
393 B43_PIO8_TXCTL_24_31); 398 B43_PIO8_TXCTL_24_31);
394 data = &(data[data_len - 1]);
395 switch (data_len & 3) { 399 switch (data_len & 3) {
396 case 3: 400 case 3:
397 ctl |= B43_PIO8_TXCTL_16_23; 401 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
398 value |= (u32)(*data) << 16; 402 tail[0] = data[data_len - 3];
399 data--; 403 tail[1] = data[data_len - 2];
404 tail[2] = data[data_len - 1];
405 break;
400 case 2: 406 case 2:
401 ctl |= B43_PIO8_TXCTL_8_15; 407 ctl |= B43_PIO8_TXCTL_8_15;
402 value |= (u32)(*data) << 8; 408 tail[0] = data[data_len - 2];
403 data--; 409 tail[1] = data[data_len - 1];
410 break;
404 case 1: 411 case 1:
405 value |= (u32)(*data); 412 tail[0] = data[data_len - 1];
413 break;
406 } 414 }
407 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); 415 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
408 b43_piotx_write32(q, B43_PIO8_TXDATA, value); 416 ssb_block_write(dev->dev, tail, 4,
417 q->mmio_base + B43_PIO8_TXDATA,
418 sizeof(u32));
409 } 419 }
410 420
411 return ctl; 421 return ctl;
@@ -693,21 +703,25 @@ data_ready:
693 q->mmio_base + B43_PIO8_RXDATA, 703 q->mmio_base + B43_PIO8_RXDATA,
694 sizeof(u32)); 704 sizeof(u32));
695 if (len & 3) { 705 if (len & 3) {
696 u32 value; 706 u8 tail[4] = { 0, };
697 char *data;
698 707
699 /* Read the last few bytes. */ 708 /* Read the last few bytes. */
700 value = b43_piorx_read32(q, B43_PIO8_RXDATA); 709 ssb_block_read(dev->dev, tail, 4,
701 data = &(skb->data[len + padding - 1]); 710 q->mmio_base + B43_PIO8_RXDATA,
711 sizeof(u32));
702 switch (len & 3) { 712 switch (len & 3) {
703 case 3: 713 case 3:
704 *data = (value >> 16); 714 skb->data[len + padding - 3] = tail[0];
705 data--; 715 skb->data[len + padding - 2] = tail[1];
716 skb->data[len + padding - 1] = tail[2];
717 break;
706 case 2: 718 case 2:
707 *data = (value >> 8); 719 skb->data[len + padding - 2] = tail[0];
708 data--; 720 skb->data[len + padding - 1] = tail[1];
721 break;
709 case 1: 722 case 1:
710 *data = value; 723 skb->data[len + padding - 1] = tail[0];
724 break;
711 } 725 }
712 } 726 }
713 } else { 727 } else {
@@ -715,11 +729,13 @@ data_ready:
715 q->mmio_base + B43_PIO_RXDATA, 729 q->mmio_base + B43_PIO_RXDATA,
716 sizeof(u16)); 730 sizeof(u16));
717 if (len & 1) { 731 if (len & 1) {
718 u16 value; 732 u8 tail[2] = { 0, };
719 733
720 /* Read the last byte. */ 734 /* Read the last byte. */
721 value = b43_piorx_read16(q, B43_PIO_RXDATA); 735 ssb_block_read(dev->dev, tail, 2,
722 skb->data[len + padding - 1] = value; 736 q->mmio_base + B43_PIO_RXDATA,
737 sizeof(u16));
738 skb->data[len + padding - 1] = tail[0];
723 } 739 }
724 } 740 }
725 741
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 896f532182f0..38cfd79e0590 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -631,6 +631,9 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
631 data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000; 631 data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000;
632 if (WARN_ON(!data->beacon_int)) 632 if (WARN_ON(!data->beacon_int))
633 data->beacon_int = 1; 633 data->beacon_int = 1;
634 if (data->started)
635 mod_timer(&data->beacon_timer,
636 jiffies + data->beacon_int);
634 } 637 }
635 638
636 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 639 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 1cbd9b4a3efc..b8f5ee33445e 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2381,6 +2381,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2381 /* Huawei-3Com */ 2381 /* Huawei-3Com */
2382 { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) }, 2382 { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) },
2383 /* Hercules */ 2383 /* Hercules */
2384 { USB_DEVICE(0x06f8, 0xe002), USB_DEVICE_DATA(&rt73usb_ops) },
2384 { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) }, 2385 { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) },
2385 { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) }, 2386 { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) },
2386 /* Linksys */ 2387 /* Linksys */
diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c
index f424146a2bc9..ac8aa09ba0da 100644
--- a/drivers/pcmcia/sa1100_assabet.c
+++ b/drivers/pcmcia/sa1100_assabet.c
@@ -130,7 +130,7 @@ static struct pcmcia_low_level assabet_pcmcia_ops = {
130 .socket_suspend = assabet_pcmcia_socket_suspend, 130 .socket_suspend = assabet_pcmcia_socket_suspend,
131}; 131};
132 132
133int __init pcmcia_assabet_init(struct device *dev) 133int pcmcia_assabet_init(struct device *dev)
134{ 134{
135 int ret = -ENODEV; 135 int ret = -ENODEV;
136 136
diff --git a/drivers/pcmcia/sa1100_neponset.c b/drivers/pcmcia/sa1100_neponset.c
index 4c41e86ccff9..0c76d337815b 100644
--- a/drivers/pcmcia/sa1100_neponset.c
+++ b/drivers/pcmcia/sa1100_neponset.c
@@ -123,7 +123,7 @@ static struct pcmcia_low_level neponset_pcmcia_ops = {
123 .socket_suspend = sa1111_pcmcia_socket_suspend, 123 .socket_suspend = sa1111_pcmcia_socket_suspend,
124}; 124};
125 125
126int __init pcmcia_neponset_init(struct sa1111_dev *sadev) 126int pcmcia_neponset_init(struct sa1111_dev *sadev)
127{ 127{
128 int ret = -ENODEV; 128 int ret = -ENODEV;
129 129
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index afdbdaaf80cb..a2a742c8ff7e 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1211,15 +1211,6 @@ static int sony_nc_add(struct acpi_device *device)
1211 } 1211 }
1212 } 1212 }
1213 1213
1214 /* try to _INI the device if such method exists (ACPI spec 3.0-6.5.1
1215 * should be respected as we already checked for the device presence above */
1216 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, METHOD_NAME__INI, &handle))) {
1217 dprintk("Invoking _INI\n");
1218 if (ACPI_FAILURE(acpi_evaluate_object(sony_nc_acpi_handle, METHOD_NAME__INI,
1219 NULL, NULL)))
1220 dprintk("_INI Method failed\n");
1221 }
1222
1223 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", 1214 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
1224 &handle))) { 1215 &handle))) {
1225 if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL)) 1216 if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
@@ -1399,27 +1390,20 @@ struct sonypi_eventtypes {
1399 struct sonypi_event *events; 1390 struct sonypi_event *events;
1400}; 1391};
1401 1392
1402struct device_ctrl { 1393struct sony_pic_dev {
1394 struct acpi_device *acpi_dev;
1395 struct sony_pic_irq *cur_irq;
1396 struct sony_pic_ioport *cur_ioport;
1397 struct list_head interrupts;
1398 struct list_head ioports;
1399 struct mutex lock;
1400 struct sonypi_eventtypes *event_types;
1401 int (*handle_irq)(const u8, const u8);
1403 int model; 1402 int model;
1404 int (*handle_irq)(const u8, const u8);
1405 u16 evport_offset; 1403 u16 evport_offset;
1406 u8 has_camera; 1404 u8 camera_power;
1407 u8 has_bluetooth; 1405 u8 bluetooth_power;
1408 u8 has_wwan; 1406 u8 wwan_power;
1409 struct sonypi_eventtypes *event_types;
1410};
1411
1412struct sony_pic_dev {
1413 struct device_ctrl *control;
1414 struct acpi_device *acpi_dev;
1415 struct sony_pic_irq *cur_irq;
1416 struct sony_pic_ioport *cur_ioport;
1417 struct list_head interrupts;
1418 struct list_head ioports;
1419 struct mutex lock;
1420 u8 camera_power;
1421 u8 bluetooth_power;
1422 u8 wwan_power;
1423}; 1407};
1424 1408
1425static struct sony_pic_dev spic_dev = { 1409static struct sony_pic_dev spic_dev = {
@@ -1427,6 +1411,8 @@ static struct sony_pic_dev spic_dev = {
1427 .ioports = LIST_HEAD_INIT(spic_dev.ioports), 1411 .ioports = LIST_HEAD_INIT(spic_dev.ioports),
1428}; 1412};
1429 1413
1414static int spic_drv_registered;
1415
1430/* Event masks */ 1416/* Event masks */
1431#define SONYPI_JOGGER_MASK 0x00000001 1417#define SONYPI_JOGGER_MASK 0x00000001
1432#define SONYPI_CAPTURE_MASK 0x00000002 1418#define SONYPI_CAPTURE_MASK 0x00000002
@@ -1724,27 +1710,6 @@ static int type3_handle_irq(const u8 data_mask, const u8 ev)
1724 return 1; 1710 return 1;
1725} 1711}
1726 1712
1727static struct device_ctrl spic_types[] = {
1728 {
1729 .model = SONYPI_DEVICE_TYPE1,
1730 .handle_irq = NULL,
1731 .evport_offset = SONYPI_TYPE1_OFFSET,
1732 .event_types = type1_events,
1733 },
1734 {
1735 .model = SONYPI_DEVICE_TYPE2,
1736 .handle_irq = NULL,
1737 .evport_offset = SONYPI_TYPE2_OFFSET,
1738 .event_types = type2_events,
1739 },
1740 {
1741 .model = SONYPI_DEVICE_TYPE3,
1742 .handle_irq = type3_handle_irq,
1743 .evport_offset = SONYPI_TYPE3_OFFSET,
1744 .event_types = type3_events,
1745 },
1746};
1747
1748static void sony_pic_detect_device_type(struct sony_pic_dev *dev) 1713static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
1749{ 1714{
1750 struct pci_dev *pcidev; 1715 struct pci_dev *pcidev;
@@ -1752,48 +1717,63 @@ static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
1752 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1717 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1753 PCI_DEVICE_ID_INTEL_82371AB_3, NULL); 1718 PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
1754 if (pcidev) { 1719 if (pcidev) {
1755 dev->control = &spic_types[0]; 1720 dev->model = SONYPI_DEVICE_TYPE1;
1721 dev->evport_offset = SONYPI_TYPE1_OFFSET;
1722 dev->event_types = type1_events;
1756 goto out; 1723 goto out;
1757 } 1724 }
1758 1725
1759 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1726 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1760 PCI_DEVICE_ID_INTEL_ICH6_1, NULL); 1727 PCI_DEVICE_ID_INTEL_ICH6_1, NULL);
1761 if (pcidev) { 1728 if (pcidev) {
1762 dev->control = &spic_types[2]; 1729 dev->model = SONYPI_DEVICE_TYPE2;
1730 dev->evport_offset = SONYPI_TYPE2_OFFSET;
1731 dev->event_types = type2_events;
1763 goto out; 1732 goto out;
1764 } 1733 }
1765 1734
1766 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1735 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1767 PCI_DEVICE_ID_INTEL_ICH7_1, NULL); 1736 PCI_DEVICE_ID_INTEL_ICH7_1, NULL);
1768 if (pcidev) { 1737 if (pcidev) {
1769 dev->control = &spic_types[2]; 1738 dev->model = SONYPI_DEVICE_TYPE3;
1739 dev->handle_irq = type3_handle_irq;
1740 dev->evport_offset = SONYPI_TYPE3_OFFSET;
1741 dev->event_types = type3_events;
1770 goto out; 1742 goto out;
1771 } 1743 }
1772 1744
1773 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1745 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1774 PCI_DEVICE_ID_INTEL_ICH8_4, NULL); 1746 PCI_DEVICE_ID_INTEL_ICH8_4, NULL);
1775 if (pcidev) { 1747 if (pcidev) {
1776 dev->control = &spic_types[2]; 1748 dev->model = SONYPI_DEVICE_TYPE3;
1749 dev->handle_irq = type3_handle_irq;
1750 dev->evport_offset = SONYPI_TYPE3_OFFSET;
1751 dev->event_types = type3_events;
1777 goto out; 1752 goto out;
1778 } 1753 }
1779 1754
1780 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1755 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1781 PCI_DEVICE_ID_INTEL_ICH9_1, NULL); 1756 PCI_DEVICE_ID_INTEL_ICH9_1, NULL);
1782 if (pcidev) { 1757 if (pcidev) {
1783 dev->control = &spic_types[2]; 1758 dev->model = SONYPI_DEVICE_TYPE3;
1759 dev->handle_irq = type3_handle_irq;
1760 dev->evport_offset = SONYPI_TYPE3_OFFSET;
1761 dev->event_types = type3_events;
1784 goto out; 1762 goto out;
1785 } 1763 }
1786 1764
1787 /* default */ 1765 /* default */
1788 dev->control = &spic_types[1]; 1766 dev->model = SONYPI_DEVICE_TYPE2;
1767 dev->evport_offset = SONYPI_TYPE2_OFFSET;
1768 dev->event_types = type2_events;
1789 1769
1790out: 1770out:
1791 if (pcidev) 1771 if (pcidev)
1792 pci_dev_put(pcidev); 1772 pci_dev_put(pcidev);
1793 1773
1794 printk(KERN_INFO DRV_PFX "detected Type%d model\n", 1774 printk(KERN_INFO DRV_PFX "detected Type%d model\n",
1795 dev->control->model == SONYPI_DEVICE_TYPE1 ? 1 : 1775 dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
1796 dev->control->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); 1776 dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
1797} 1777}
1798 1778
1799/* camera tests and poweron/poweroff */ 1779/* camera tests and poweron/poweroff */
@@ -2566,7 +2546,7 @@ static int sony_pic_enable(struct acpi_device *device,
2566 buffer.pointer = resource; 2546 buffer.pointer = resource;
2567 2547
2568 /* setup Type 1 resources */ 2548 /* setup Type 1 resources */
2569 if (spic_dev.control->model == SONYPI_DEVICE_TYPE1) { 2549 if (spic_dev.model == SONYPI_DEVICE_TYPE1) {
2570 2550
2571 /* setup io resources */ 2551 /* setup io resources */
2572 resource->res1.type = ACPI_RESOURCE_TYPE_IO; 2552 resource->res1.type = ACPI_RESOURCE_TYPE_IO;
@@ -2649,29 +2629,28 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id)
2649 data_mask = inb_p(dev->cur_ioport->io2.minimum); 2629 data_mask = inb_p(dev->cur_ioport->io2.minimum);
2650 else 2630 else
2651 data_mask = inb_p(dev->cur_ioport->io1.minimum + 2631 data_mask = inb_p(dev->cur_ioport->io1.minimum +
2652 dev->control->evport_offset); 2632 dev->evport_offset);
2653 2633
2654 dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", 2634 dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
2655 ev, data_mask, dev->cur_ioport->io1.minimum, 2635 ev, data_mask, dev->cur_ioport->io1.minimum,
2656 dev->control->evport_offset); 2636 dev->evport_offset);
2657 2637
2658 if (ev == 0x00 || ev == 0xff) 2638 if (ev == 0x00 || ev == 0xff)
2659 return IRQ_HANDLED; 2639 return IRQ_HANDLED;
2660 2640
2661 for (i = 0; dev->control->event_types[i].mask; i++) { 2641 for (i = 0; dev->event_types[i].mask; i++) {
2662 2642
2663 if ((data_mask & dev->control->event_types[i].data) != 2643 if ((data_mask & dev->event_types[i].data) !=
2664 dev->control->event_types[i].data) 2644 dev->event_types[i].data)
2665 continue; 2645 continue;
2666 2646
2667 if (!(mask & dev->control->event_types[i].mask)) 2647 if (!(mask & dev->event_types[i].mask))
2668 continue; 2648 continue;
2669 2649
2670 for (j = 0; dev->control->event_types[i].events[j].event; j++) { 2650 for (j = 0; dev->event_types[i].events[j].event; j++) {
2671 if (ev == dev->control->event_types[i].events[j].data) { 2651 if (ev == dev->event_types[i].events[j].data) {
2672 device_event = 2652 device_event =
2673 dev->control-> 2653 dev->event_types[i].events[j].event;
2674 event_types[i].events[j].event;
2675 goto found; 2654 goto found;
2676 } 2655 }
2677 } 2656 }
@@ -2679,13 +2658,12 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id)
2679 /* Still not able to decode the event try to pass 2658 /* Still not able to decode the event try to pass
2680 * it over to the minidriver 2659 * it over to the minidriver
2681 */ 2660 */
2682 if (dev->control->handle_irq && 2661 if (dev->handle_irq && dev->handle_irq(data_mask, ev) == 0)
2683 dev->control->handle_irq(data_mask, ev) == 0)
2684 return IRQ_HANDLED; 2662 return IRQ_HANDLED;
2685 2663
2686 dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", 2664 dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
2687 ev, data_mask, dev->cur_ioport->io1.minimum, 2665 ev, data_mask, dev->cur_ioport->io1.minimum,
2688 dev->control->evport_offset); 2666 dev->evport_offset);
2689 return IRQ_HANDLED; 2667 return IRQ_HANDLED;
2690 2668
2691found: 2669found:
@@ -2816,7 +2794,7 @@ static int sony_pic_add(struct acpi_device *device)
2816 /* request IRQ */ 2794 /* request IRQ */
2817 list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) { 2795 list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) {
2818 if (!request_irq(irq->irq.interrupts[0], sony_pic_irq, 2796 if (!request_irq(irq->irq.interrupts[0], sony_pic_irq,
2819 IRQF_SHARED, "sony-laptop", &spic_dev)) { 2797 IRQF_DISABLED, "sony-laptop", &spic_dev)) {
2820 dprintk("IRQ: %d - triggering: %d - " 2798 dprintk("IRQ: %d - triggering: %d - "
2821 "polarity: %d - shr: %d\n", 2799 "polarity: %d - shr: %d\n",
2822 irq->irq.interrupts[0], 2800 irq->irq.interrupts[0],
@@ -2949,6 +2927,7 @@ static int __init sony_laptop_init(void)
2949 "Unable to register SPIC driver."); 2927 "Unable to register SPIC driver.");
2950 goto out; 2928 goto out;
2951 } 2929 }
2930 spic_drv_registered = 1;
2952 } 2931 }
2953 2932
2954 result = acpi_bus_register_driver(&sony_nc_driver); 2933 result = acpi_bus_register_driver(&sony_nc_driver);
@@ -2960,7 +2939,7 @@ static int __init sony_laptop_init(void)
2960 return 0; 2939 return 0;
2961 2940
2962out_unregister_pic: 2941out_unregister_pic:
2963 if (!no_spic) 2942 if (spic_drv_registered)
2964 acpi_bus_unregister_driver(&sony_pic_driver); 2943 acpi_bus_unregister_driver(&sony_pic_driver);
2965out: 2944out:
2966 return result; 2945 return result;
@@ -2969,7 +2948,7 @@ out:
2969static void __exit sony_laptop_exit(void) 2948static void __exit sony_laptop_exit(void)
2970{ 2949{
2971 acpi_bus_unregister_driver(&sony_nc_driver); 2950 acpi_bus_unregister_driver(&sony_nc_driver);
2972 if (!no_spic) 2951 if (spic_drv_registered)
2973 acpi_bus_unregister_driver(&sony_pic_driver); 2952 acpi_bus_unregister_driver(&sony_pic_driver);
2974} 2953}
2975 2954
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 1b78f639ead3..76769978285f 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -125,7 +125,7 @@ static int qstat_seq_open(struct inode *inode, struct file *filp)
125 filp->f_path.dentry->d_inode->i_private); 125 filp->f_path.dentry->d_inode->i_private);
126} 126}
127 127
128static struct file_operations debugfs_fops = { 128static const struct file_operations debugfs_fops = {
129 .owner = THIS_MODULE, 129 .owner = THIS_MODULE,
130 .open = qstat_seq_open, 130 .open = qstat_seq_open,
131 .read = seq_read, 131 .read = seq_read,
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
index eff943923c6f..968e3c7c2632 100644
--- a/drivers/s390/cio/qdio_perf.c
+++ b/drivers/s390/cio/qdio_perf.c
@@ -84,7 +84,7 @@ static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
84 return single_open(filp, qdio_perf_proc_show, NULL); 84 return single_open(filp, qdio_perf_proc_show, NULL);
85} 85}
86 86
87static struct file_operations qdio_perf_proc_fops = { 87static const struct file_operations qdio_perf_proc_fops = {
88 .owner = THIS_MODULE, 88 .owner = THIS_MODULE,
89 .open = qdio_perf_seq_open, 89 .open = qdio_perf_seq_open,
90 .read = seq_read, 90 .read = seq_read,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0cb049f5cc56..747a5e5c1276 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1317,7 +1317,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
1317 } 1317 }
1318} 1318}
1319 1319
1320static struct file_operations sg_fops = { 1320static const struct file_operations sg_fops = {
1321 .owner = THIS_MODULE, 1321 .owner = THIS_MODULE,
1322 .read = sg_read, 1322 .read = sg_read,
1323 .write = sg_write, 1323 .write = sg_write,
@@ -2194,9 +2194,11 @@ static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2194static int sg_proc_single_open_adio(struct inode *inode, struct file *file); 2194static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2195static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, 2195static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2196 size_t count, loff_t *off); 2196 size_t count, loff_t *off);
2197static struct file_operations adio_fops = { 2197static const struct file_operations adio_fops = {
2198 /* .owner, .read and .llseek added in sg_proc_init() */ 2198 .owner = THIS_MODULE,
2199 .open = sg_proc_single_open_adio, 2199 .open = sg_proc_single_open_adio,
2200 .read = seq_read,
2201 .llseek = seq_lseek,
2200 .write = sg_proc_write_adio, 2202 .write = sg_proc_write_adio,
2201 .release = single_release, 2203 .release = single_release,
2202}; 2204};
@@ -2204,23 +2206,32 @@ static struct file_operations adio_fops = {
2204static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); 2206static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2205static ssize_t sg_proc_write_dressz(struct file *filp, 2207static ssize_t sg_proc_write_dressz(struct file *filp,
2206 const char __user *buffer, size_t count, loff_t *off); 2208 const char __user *buffer, size_t count, loff_t *off);
2207static struct file_operations dressz_fops = { 2209static const struct file_operations dressz_fops = {
2210 .owner = THIS_MODULE,
2208 .open = sg_proc_single_open_dressz, 2211 .open = sg_proc_single_open_dressz,
2212 .read = seq_read,
2213 .llseek = seq_lseek,
2209 .write = sg_proc_write_dressz, 2214 .write = sg_proc_write_dressz,
2210 .release = single_release, 2215 .release = single_release,
2211}; 2216};
2212 2217
2213static int sg_proc_seq_show_version(struct seq_file *s, void *v); 2218static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2214static int sg_proc_single_open_version(struct inode *inode, struct file *file); 2219static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2215static struct file_operations version_fops = { 2220static const struct file_operations version_fops = {
2221 .owner = THIS_MODULE,
2216 .open = sg_proc_single_open_version, 2222 .open = sg_proc_single_open_version,
2223 .read = seq_read,
2224 .llseek = seq_lseek,
2217 .release = single_release, 2225 .release = single_release,
2218}; 2226};
2219 2227
2220static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); 2228static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2221static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file); 2229static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2222static struct file_operations devhdr_fops = { 2230static const struct file_operations devhdr_fops = {
2231 .owner = THIS_MODULE,
2223 .open = sg_proc_single_open_devhdr, 2232 .open = sg_proc_single_open_devhdr,
2233 .read = seq_read,
2234 .llseek = seq_lseek,
2224 .release = single_release, 2235 .release = single_release,
2225}; 2236};
2226 2237
@@ -2229,8 +2240,11 @@ static int sg_proc_open_dev(struct inode *inode, struct file *file);
2229static void * dev_seq_start(struct seq_file *s, loff_t *pos); 2240static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2230static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); 2241static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2231static void dev_seq_stop(struct seq_file *s, void *v); 2242static void dev_seq_stop(struct seq_file *s, void *v);
2232static struct file_operations dev_fops = { 2243static const struct file_operations dev_fops = {
2244 .owner = THIS_MODULE,
2233 .open = sg_proc_open_dev, 2245 .open = sg_proc_open_dev,
2246 .read = seq_read,
2247 .llseek = seq_lseek,
2234 .release = seq_release, 2248 .release = seq_release,
2235}; 2249};
2236static const struct seq_operations dev_seq_ops = { 2250static const struct seq_operations dev_seq_ops = {
@@ -2242,8 +2256,11 @@ static const struct seq_operations dev_seq_ops = {
2242 2256
2243static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); 2257static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2244static int sg_proc_open_devstrs(struct inode *inode, struct file *file); 2258static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2245static struct file_operations devstrs_fops = { 2259static const struct file_operations devstrs_fops = {
2260 .owner = THIS_MODULE,
2246 .open = sg_proc_open_devstrs, 2261 .open = sg_proc_open_devstrs,
2262 .read = seq_read,
2263 .llseek = seq_lseek,
2247 .release = seq_release, 2264 .release = seq_release,
2248}; 2265};
2249static const struct seq_operations devstrs_seq_ops = { 2266static const struct seq_operations devstrs_seq_ops = {
@@ -2255,8 +2272,11 @@ static const struct seq_operations devstrs_seq_ops = {
2255 2272
2256static int sg_proc_seq_show_debug(struct seq_file *s, void *v); 2273static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2257static int sg_proc_open_debug(struct inode *inode, struct file *file); 2274static int sg_proc_open_debug(struct inode *inode, struct file *file);
2258static struct file_operations debug_fops = { 2275static const struct file_operations debug_fops = {
2276 .owner = THIS_MODULE,
2259 .open = sg_proc_open_debug, 2277 .open = sg_proc_open_debug,
2278 .read = seq_read,
2279 .llseek = seq_lseek,
2260 .release = seq_release, 2280 .release = seq_release,
2261}; 2281};
2262static const struct seq_operations debug_seq_ops = { 2282static const struct seq_operations debug_seq_ops = {
@@ -2269,7 +2289,7 @@ static const struct seq_operations debug_seq_ops = {
2269 2289
2270struct sg_proc_leaf { 2290struct sg_proc_leaf {
2271 const char * name; 2291 const char * name;
2272 struct file_operations * fops; 2292 const struct file_operations * fops;
2273}; 2293};
2274 2294
2275static struct sg_proc_leaf sg_proc_leaf_arr[] = { 2295static struct sg_proc_leaf sg_proc_leaf_arr[] = {
@@ -2295,9 +2315,6 @@ sg_proc_init(void)
2295 for (k = 0; k < num_leaves; ++k) { 2315 for (k = 0; k < num_leaves; ++k) {
2296 leaf = &sg_proc_leaf_arr[k]; 2316 leaf = &sg_proc_leaf_arr[k];
2297 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; 2317 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2298 leaf->fops->owner = THIS_MODULE;
2299 leaf->fops->read = seq_read;
2300 leaf->fops->llseek = seq_lseek;
2301 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops); 2318 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
2302 } 2319 }
2303 return 0; 2320 return 0;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 2209620d2349..b1ae774016f1 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -64,6 +64,8 @@ static int serial_index(struct uart_port *port)
64 return (serial8250_reg.minor - 64) + port->line; 64 return (serial8250_reg.minor - 64) + port->line;
65} 65}
66 66
67static unsigned int skip_txen_test; /* force skip of txen test at init time */
68
67/* 69/*
68 * Debugging. 70 * Debugging.
69 */ 71 */
@@ -2108,7 +2110,7 @@ static int serial8250_startup(struct uart_port *port)
2108 is variable. So, let's just don't test if we receive 2110 is variable. So, let's just don't test if we receive
2109 TX irq. This way, we'll never enable UART_BUG_TXEN. 2111 TX irq. This way, we'll never enable UART_BUG_TXEN.
2110 */ 2112 */
2111 if (up->port.flags & UPF_NO_TXEN_TEST) 2113 if (skip_txen_test || up->port.flags & UPF_NO_TXEN_TEST)
2112 goto dont_test_tx_en; 2114 goto dont_test_tx_en;
2113 2115
2114 /* 2116 /*
@@ -3248,6 +3250,9 @@ MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices"
3248module_param(nr_uarts, uint, 0644); 3250module_param(nr_uarts, uint, 0644);
3249MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")"); 3251MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")");
3250 3252
3253module_param(skip_txen_test, uint, 0644);
3254MODULE_PARM_DESC(skip_txen_test, "Skip checking for the TXEN bug at init time");
3255
3251#ifdef CONFIG_SERIAL_8250_RSA 3256#ifdef CONFIG_SERIAL_8250_RSA
3252module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444); 3257module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444);
3253MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA"); 3258MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index e70712044a7e..e52257257279 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -862,7 +862,7 @@ config SERIAL_IMX_CONSOLE
862 862
863config SERIAL_UARTLITE 863config SERIAL_UARTLITE
864 tristate "Xilinx uartlite serial port support" 864 tristate "Xilinx uartlite serial port support"
865 depends on PPC32 || MICROBLAZE 865 depends on PPC32 || MICROBLAZE || MFD_TIMBERDALE
866 select SERIAL_CORE 866 select SERIAL_CORE
867 help 867 help
868 Say Y here if you want to use the Xilinx uartlite serial controller. 868 Say Y here if you want to use the Xilinx uartlite serial controller.
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 2d7feecaf492..0028b6f89ce6 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -307,7 +307,7 @@ static void stop_processor(struct icom_port *icom_port)
307 if (port < 4) { 307 if (port < 4) {
308 temp = readl(stop_proc[port].global_control_reg); 308 temp = readl(stop_proc[port].global_control_reg);
309 temp = 309 temp =
310 (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id; 310 (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id;
311 writel(temp, stop_proc[port].global_control_reg); 311 writel(temp, stop_proc[port].global_control_reg);
312 312
313 /* write flush */ 313 /* write flush */
@@ -336,7 +336,7 @@ static void start_processor(struct icom_port *icom_port)
336 if (port < 4) { 336 if (port < 4) {
337 temp = readl(start_proc[port].global_control_reg); 337 temp = readl(start_proc[port].global_control_reg);
338 temp = 338 temp =
339 (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id; 339 (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id;
340 writel(temp, start_proc[port].global_control_reg); 340 writel(temp, start_proc[port].global_control_reg);
341 341
342 /* write flush */ 342 /* write flush */
@@ -509,8 +509,8 @@ static void load_code(struct icom_port *icom_port)
509 dev_err(&icom_port->adapter->pci_dev->dev,"Port not opertional\n"); 509 dev_err(&icom_port->adapter->pci_dev->dev,"Port not opertional\n");
510 } 510 }
511 511
512 if (new_page != NULL) 512 if (new_page != NULL)
513 pci_free_consistent(dev, 4096, new_page, temp_pci); 513 pci_free_consistent(dev, 4096, new_page, temp_pci);
514} 514}
515 515
516static int startup(struct icom_port *icom_port) 516static int startup(struct icom_port *icom_port)
@@ -1493,15 +1493,15 @@ static int __devinit icom_probe(struct pci_dev *dev,
1493 const struct pci_device_id *ent) 1493 const struct pci_device_id *ent)
1494{ 1494{
1495 int index; 1495 int index;
1496 unsigned int command_reg; 1496 unsigned int command_reg;
1497 int retval; 1497 int retval;
1498 struct icom_adapter *icom_adapter; 1498 struct icom_adapter *icom_adapter;
1499 struct icom_port *icom_port; 1499 struct icom_port *icom_port;
1500 1500
1501 retval = pci_enable_device(dev); 1501 retval = pci_enable_device(dev);
1502 if (retval) { 1502 if (retval) {
1503 dev_err(&dev->dev, "Device enable FAILED\n"); 1503 dev_err(&dev->dev, "Device enable FAILED\n");
1504 return retval; 1504 return retval;
1505 } 1505 }
1506 1506
1507 if ( (retval = pci_request_regions(dev, "icom"))) { 1507 if ( (retval = pci_request_regions(dev, "icom"))) {
@@ -1510,23 +1510,23 @@ static int __devinit icom_probe(struct pci_dev *dev,
1510 return retval; 1510 return retval;
1511 } 1511 }
1512 1512
1513 pci_set_master(dev); 1513 pci_set_master(dev);
1514 1514
1515 if ( (retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg))) { 1515 if ( (retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg))) {
1516 dev_err(&dev->dev, "PCI Config read FAILED\n"); 1516 dev_err(&dev->dev, "PCI Config read FAILED\n");
1517 return retval; 1517 return retval;
1518 } 1518 }
1519 1519
1520 pci_write_config_dword(dev, PCI_COMMAND, 1520 pci_write_config_dword(dev, PCI_COMMAND,
1521 command_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER 1521 command_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
1522 | PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 1522 | PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1523 1523
1524 if (ent->driver_data == ADAPTER_V1) { 1524 if (ent->driver_data == ADAPTER_V1) {
1525 pci_write_config_dword(dev, 0x44, 0x8300830A); 1525 pci_write_config_dword(dev, 0x44, 0x8300830A);
1526 } else { 1526 } else {
1527 pci_write_config_dword(dev, 0x44, 0x42004200); 1527 pci_write_config_dword(dev, 0x44, 0x42004200);
1528 pci_write_config_dword(dev, 0x48, 0x42004200); 1528 pci_write_config_dword(dev, 0x48, 0x42004200);
1529 } 1529 }
1530 1530
1531 1531
1532 retval = icom_alloc_adapter(&icom_adapter); 1532 retval = icom_alloc_adapter(&icom_adapter);
@@ -1536,10 +1536,10 @@ static int __devinit icom_probe(struct pci_dev *dev,
1536 goto probe_exit0; 1536 goto probe_exit0;
1537 } 1537 }
1538 1538
1539 icom_adapter->base_addr_pci = pci_resource_start(dev, 0); 1539 icom_adapter->base_addr_pci = pci_resource_start(dev, 0);
1540 icom_adapter->pci_dev = dev; 1540 icom_adapter->pci_dev = dev;
1541 icom_adapter->version = ent->driver_data; 1541 icom_adapter->version = ent->driver_data;
1542 icom_adapter->subsystem_id = ent->subdevice; 1542 icom_adapter->subsystem_id = ent->subdevice;
1543 1543
1544 1544
1545 retval = icom_init_ports(icom_adapter); 1545 retval = icom_init_ports(icom_adapter);
@@ -1548,7 +1548,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1548 goto probe_exit1; 1548 goto probe_exit1;
1549 } 1549 }
1550 1550
1551 icom_adapter->base_addr = pci_ioremap_bar(dev, 0); 1551 icom_adapter->base_addr = pci_ioremap_bar(dev, 0);
1552 1552
1553 if (!icom_adapter->base_addr) 1553 if (!icom_adapter->base_addr)
1554 goto probe_exit1; 1554 goto probe_exit1;
@@ -1562,7 +1562,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1562 1562
1563 retval = icom_load_ports(icom_adapter); 1563 retval = icom_load_ports(icom_adapter);
1564 1564
1565 for (index = 0; index < icom_adapter->numb_ports; index++) { 1565 for (index = 0; index < icom_adapter->numb_ports; index++) {
1566 icom_port = &icom_adapter->port_info[index]; 1566 icom_port = &icom_adapter->port_info[index];
1567 1567
1568 if (icom_port->status == ICOM_PORT_ACTIVE) { 1568 if (icom_port->status == ICOM_PORT_ACTIVE) {
@@ -1579,7 +1579,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1579 icom_port->status = ICOM_PORT_OFF; 1579 icom_port->status = ICOM_PORT_OFF;
1580 dev_err(&dev->dev, "Device add failed\n"); 1580 dev_err(&dev->dev, "Device add failed\n");
1581 } else 1581 } else
1582 dev_info(&dev->dev, "Device added\n"); 1582 dev_info(&dev->dev, "Device added\n");
1583 } 1583 }
1584 } 1584 }
1585 1585
@@ -1595,9 +1595,7 @@ probe_exit0:
1595 pci_release_regions(dev); 1595 pci_release_regions(dev);
1596 pci_disable_device(dev); 1596 pci_disable_device(dev);
1597 1597
1598 return retval; 1598 return retval;
1599
1600
1601} 1599}
1602 1600
1603static void __devexit icom_remove(struct pci_dev *dev) 1601static void __devexit icom_remove(struct pci_dev *dev)
diff --git a/drivers/serial/sa1100.c b/drivers/serial/sa1100.c
index 7f5e26873220..2199d819a987 100644
--- a/drivers/serial/sa1100.c
+++ b/drivers/serial/sa1100.c
@@ -638,7 +638,7 @@ static void __init sa1100_init_ports(void)
638 PPSR |= PPC_TXD1 | PPC_TXD3; 638 PPSR |= PPC_TXD1 | PPC_TXD3;
639} 639}
640 640
641void __init sa1100_register_uart_fns(struct sa1100_port_fns *fns) 641void __devinit sa1100_register_uart_fns(struct sa1100_port_fns *fns)
642{ 642{
643 if (fns->get_mctrl) 643 if (fns->get_mctrl)
644 sa1100_pops.get_mctrl = fns->get_mctrl; 644 sa1100_pops.get_mctrl = fns->get_mctrl;
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index a3bb49031a7f..ff4617e21426 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -873,10 +873,10 @@ static struct pcmcia_device_id serial_ids[] = {
873 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), 873 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"),
874 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), 874 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"),
875 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), 875 PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"),
876 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"), 876 PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
877 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"), 877 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
878 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"), 878 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"),
879 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "DP83903.cis"), 879 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "cis/DP83903.cis"),
880 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), 880 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"),
881 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), 881 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"),
882 PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ 882 PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */
@@ -884,9 +884,9 @@ static struct pcmcia_device_id serial_ids[] = {
884 PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ 884 PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */
885 PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ 885 PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */
886 PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"), 886 PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"),
887 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "COMpad2.cis"), 887 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "cis/COMpad2.cis"),
888 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"), 888 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"),
889 PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"), 889 PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"),
890 PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"), 890 PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"),
891 PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"), 891 PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"),
892 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b), 892 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b),
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c
index 0f7cf4c453e6..c50e9fbbf743 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/serial/serial_txx9.c
@@ -221,21 +221,26 @@ sio_quot_set(struct uart_txx9_port *up, int quot)
221 sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6); 221 sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6);
222} 222}
223 223
224static struct uart_txx9_port *to_uart_txx9_port(struct uart_port *port)
225{
226 return container_of(port, struct uart_txx9_port, port);
227}
228
224static void serial_txx9_stop_tx(struct uart_port *port) 229static void serial_txx9_stop_tx(struct uart_port *port)
225{ 230{
226 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 231 struct uart_txx9_port *up = to_uart_txx9_port(port);
227 sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE); 232 sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
228} 233}
229 234
230static void serial_txx9_start_tx(struct uart_port *port) 235static void serial_txx9_start_tx(struct uart_port *port)
231{ 236{
232 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 237 struct uart_txx9_port *up = to_uart_txx9_port(port);
233 sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE); 238 sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
234} 239}
235 240
236static void serial_txx9_stop_rx(struct uart_port *port) 241static void serial_txx9_stop_rx(struct uart_port *port)
237{ 242{
238 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 243 struct uart_txx9_port *up = to_uart_txx9_port(port);
239 up->port.read_status_mask &= ~TXX9_SIDISR_RDIS; 244 up->port.read_status_mask &= ~TXX9_SIDISR_RDIS;
240} 245}
241 246
@@ -246,7 +251,7 @@ static void serial_txx9_enable_ms(struct uart_port *port)
246 251
247static void serial_txx9_initialize(struct uart_port *port) 252static void serial_txx9_initialize(struct uart_port *port)
248{ 253{
249 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 254 struct uart_txx9_port *up = to_uart_txx9_port(port);
250 unsigned int tmout = 10000; 255 unsigned int tmout = 10000;
251 256
252 sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST); 257 sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST);
@@ -414,7 +419,7 @@ static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id)
414 419
415static unsigned int serial_txx9_tx_empty(struct uart_port *port) 420static unsigned int serial_txx9_tx_empty(struct uart_port *port)
416{ 421{
417 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 422 struct uart_txx9_port *up = to_uart_txx9_port(port);
418 unsigned long flags; 423 unsigned long flags;
419 unsigned int ret; 424 unsigned int ret;
420 425
@@ -427,7 +432,7 @@ static unsigned int serial_txx9_tx_empty(struct uart_port *port)
427 432
428static unsigned int serial_txx9_get_mctrl(struct uart_port *port) 433static unsigned int serial_txx9_get_mctrl(struct uart_port *port)
429{ 434{
430 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 435 struct uart_txx9_port *up = to_uart_txx9_port(port);
431 unsigned int ret; 436 unsigned int ret;
432 437
433 /* no modem control lines */ 438 /* no modem control lines */
@@ -440,7 +445,7 @@ static unsigned int serial_txx9_get_mctrl(struct uart_port *port)
440 445
441static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl) 446static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl)
442{ 447{
443 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 448 struct uart_txx9_port *up = to_uart_txx9_port(port);
444 449
445 if (mctrl & TIOCM_RTS) 450 if (mctrl & TIOCM_RTS)
446 sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC); 451 sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC);
@@ -450,7 +455,7 @@ static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl)
450 455
451static void serial_txx9_break_ctl(struct uart_port *port, int break_state) 456static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
452{ 457{
453 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 458 struct uart_txx9_port *up = to_uart_txx9_port(port);
454 unsigned long flags; 459 unsigned long flags;
455 460
456 spin_lock_irqsave(&up->port.lock, flags); 461 spin_lock_irqsave(&up->port.lock, flags);
@@ -494,7 +499,7 @@ static int serial_txx9_get_poll_char(struct uart_port *port)
494{ 499{
495 unsigned int ier; 500 unsigned int ier;
496 unsigned char c; 501 unsigned char c;
497 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 502 struct uart_txx9_port *up = to_uart_txx9_port(port);
498 503
499 /* 504 /*
500 * First save the IER then disable the interrupts 505 * First save the IER then disable the interrupts
@@ -520,7 +525,7 @@ static int serial_txx9_get_poll_char(struct uart_port *port)
520static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c) 525static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
521{ 526{
522 unsigned int ier; 527 unsigned int ier;
523 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 528 struct uart_txx9_port *up = to_uart_txx9_port(port);
524 529
525 /* 530 /*
526 * First save the IER then disable the interrupts 531 * First save the IER then disable the interrupts
@@ -551,7 +556,7 @@ static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
551 556
552static int serial_txx9_startup(struct uart_port *port) 557static int serial_txx9_startup(struct uart_port *port)
553{ 558{
554 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 559 struct uart_txx9_port *up = to_uart_txx9_port(port);
555 unsigned long flags; 560 unsigned long flags;
556 int retval; 561 int retval;
557 562
@@ -596,7 +601,7 @@ static int serial_txx9_startup(struct uart_port *port)
596 601
597static void serial_txx9_shutdown(struct uart_port *port) 602static void serial_txx9_shutdown(struct uart_port *port)
598{ 603{
599 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 604 struct uart_txx9_port *up = to_uart_txx9_port(port);
600 unsigned long flags; 605 unsigned long flags;
601 606
602 /* 607 /*
@@ -636,7 +641,7 @@ static void
636serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios, 641serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
637 struct ktermios *old) 642 struct ktermios *old)
638{ 643{
639 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 644 struct uart_txx9_port *up = to_uart_txx9_port(port);
640 unsigned int cval, fcr = 0; 645 unsigned int cval, fcr = 0;
641 unsigned long flags; 646 unsigned long flags;
642 unsigned int baud, quot; 647 unsigned int baud, quot;
@@ -814,19 +819,19 @@ static void serial_txx9_release_resource(struct uart_txx9_port *up)
814 819
815static void serial_txx9_release_port(struct uart_port *port) 820static void serial_txx9_release_port(struct uart_port *port)
816{ 821{
817 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 822 struct uart_txx9_port *up = to_uart_txx9_port(port);
818 serial_txx9_release_resource(up); 823 serial_txx9_release_resource(up);
819} 824}
820 825
821static int serial_txx9_request_port(struct uart_port *port) 826static int serial_txx9_request_port(struct uart_port *port)
822{ 827{
823 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 828 struct uart_txx9_port *up = to_uart_txx9_port(port);
824 return serial_txx9_request_resource(up); 829 return serial_txx9_request_resource(up);
825} 830}
826 831
827static void serial_txx9_config_port(struct uart_port *port, int uflags) 832static void serial_txx9_config_port(struct uart_port *port, int uflags)
828{ 833{
829 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 834 struct uart_txx9_port *up = to_uart_txx9_port(port);
830 int ret; 835 int ret;
831 836
832 /* 837 /*
@@ -897,7 +902,7 @@ static void __init serial_txx9_register_ports(struct uart_driver *drv,
897 902
898static void serial_txx9_console_putchar(struct uart_port *port, int ch) 903static void serial_txx9_console_putchar(struct uart_port *port, int ch)
899{ 904{
900 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 905 struct uart_txx9_port *up = to_uart_txx9_port(port);
901 906
902 wait_for_xmitr(up); 907 wait_for_xmitr(up);
903 sio_out(up, TXX9_SITFIFO, ch); 908 sio_out(up, TXX9_SITFIFO, ch);
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index d3b496800477..b204a0929139 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -90,7 +90,11 @@ static struct sfi_table_simple *syst_va __read_mostly;
90 */ 90 */
91static u32 sfi_use_ioremap __read_mostly; 91static u32 sfi_use_ioremap __read_mostly;
92 92
93static void __iomem *sfi_map_memory(u64 phys, u32 size) 93/*
94 * sfi_un/map_memory calls early_ioremap/iounmap which is a __init function
95 * and introduces section mismatch. So use __ref to make it calm.
96 */
97static void __iomem * __ref sfi_map_memory(u64 phys, u32 size)
94{ 98{
95 if (!phys || !size) 99 if (!phys || !size)
96 return NULL; 100 return NULL;
@@ -101,7 +105,7 @@ static void __iomem *sfi_map_memory(u64 phys, u32 size)
101 return early_ioremap(phys, size); 105 return early_ioremap(phys, size);
102} 106}
103 107
104static void sfi_unmap_memory(void __iomem *virt, u32 size) 108static void __ref sfi_unmap_memory(void __iomem *virt, u32 size)
105{ 109{
106 if (!virt || !size) 110 if (!virt || !size)
107 return; 111 return;
@@ -125,7 +129,7 @@ static void sfi_print_table_header(unsigned long long pa,
125 * sfi_verify_table() 129 * sfi_verify_table()
126 * Sanity check table lengh, calculate checksum 130 * Sanity check table lengh, calculate checksum
127 */ 131 */
128static __init int sfi_verify_table(struct sfi_table_header *table) 132static int sfi_verify_table(struct sfi_table_header *table)
129{ 133{
130 134
131 u8 checksum = 0; 135 u8 checksum = 0;
@@ -213,12 +217,17 @@ static int sfi_table_check_key(struct sfi_table_header *th,
213 * the mapped virt address will be returned, and the virt space 217 * the mapped virt address will be returned, and the virt space
214 * will be released by call sfi_put_table() later 218 * will be released by call sfi_put_table() later
215 * 219 *
220 * This two cases are from two different functions with two different
221 * sections and causes section mismatch warning. So use __ref to tell
222 * modpost not to make any noise.
223 *
216 * Return value: 224 * Return value:
217 * NULL: when can't find a table matching the key 225 * NULL: when can't find a table matching the key
218 * ERR_PTR(error): error value 226 * ERR_PTR(error): error value
219 * virt table address: when a matched table is found 227 * virt table address: when a matched table is found
220 */ 228 */
221struct sfi_table_header *sfi_check_table(u64 pa, struct sfi_table_key *key) 229struct sfi_table_header *
230 __ref sfi_check_table(u64 pa, struct sfi_table_key *key)
222{ 231{
223 struct sfi_table_header *th; 232 struct sfi_table_header *th;
224 void *ret = NULL; 233 void *ret = NULL;
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 6d7a3f82c54b..21a118269cac 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o 17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o 18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
19obj-$(CONFIG_SPI_GPIO) += spi_gpio.o 19obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
20obj-$(CONFIG_SPI_IMX) += mxc_spi.o 20obj-$(CONFIG_SPI_IMX) += spi_imx.o
21obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o 21obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
22obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o 22obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
23obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o 23obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
diff --git a/drivers/spi/mxc_spi.c b/drivers/spi/spi_imx.c
index b1447236ae81..89c22efedfb0 100644
--- a/drivers/spi/mxc_spi.c
+++ b/drivers/spi/spi_imx.c
@@ -48,14 +48,14 @@
48#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ 48#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
49#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ 49#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
50 50
51struct mxc_spi_config { 51struct spi_imx_config {
52 unsigned int speed_hz; 52 unsigned int speed_hz;
53 unsigned int bpw; 53 unsigned int bpw;
54 unsigned int mode; 54 unsigned int mode;
55 int cs; 55 int cs;
56}; 56};
57 57
58struct mxc_spi_data { 58struct spi_imx_data {
59 struct spi_bitbang bitbang; 59 struct spi_bitbang bitbang;
60 60
61 struct completion xfer_done; 61 struct completion xfer_done;
@@ -66,43 +66,43 @@ struct mxc_spi_data {
66 int *chipselect; 66 int *chipselect;
67 67
68 unsigned int count; 68 unsigned int count;
69 void (*tx)(struct mxc_spi_data *); 69 void (*tx)(struct spi_imx_data *);
70 void (*rx)(struct mxc_spi_data *); 70 void (*rx)(struct spi_imx_data *);
71 void *rx_buf; 71 void *rx_buf;
72 const void *tx_buf; 72 const void *tx_buf;
73 unsigned int txfifo; /* number of words pushed in tx FIFO */ 73 unsigned int txfifo; /* number of words pushed in tx FIFO */
74 74
75 /* SoC specific functions */ 75 /* SoC specific functions */
76 void (*intctrl)(struct mxc_spi_data *, int); 76 void (*intctrl)(struct spi_imx_data *, int);
77 int (*config)(struct mxc_spi_data *, struct mxc_spi_config *); 77 int (*config)(struct spi_imx_data *, struct spi_imx_config *);
78 void (*trigger)(struct mxc_spi_data *); 78 void (*trigger)(struct spi_imx_data *);
79 int (*rx_available)(struct mxc_spi_data *); 79 int (*rx_available)(struct spi_imx_data *);
80}; 80};
81 81
82#define MXC_SPI_BUF_RX(type) \ 82#define MXC_SPI_BUF_RX(type) \
83static void mxc_spi_buf_rx_##type(struct mxc_spi_data *mxc_spi) \ 83static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
84{ \ 84{ \
85 unsigned int val = readl(mxc_spi->base + MXC_CSPIRXDATA); \ 85 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
86 \ 86 \
87 if (mxc_spi->rx_buf) { \ 87 if (spi_imx->rx_buf) { \
88 *(type *)mxc_spi->rx_buf = val; \ 88 *(type *)spi_imx->rx_buf = val; \
89 mxc_spi->rx_buf += sizeof(type); \ 89 spi_imx->rx_buf += sizeof(type); \
90 } \ 90 } \
91} 91}
92 92
93#define MXC_SPI_BUF_TX(type) \ 93#define MXC_SPI_BUF_TX(type) \
94static void mxc_spi_buf_tx_##type(struct mxc_spi_data *mxc_spi) \ 94static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
95{ \ 95{ \
96 type val = 0; \ 96 type val = 0; \
97 \ 97 \
98 if (mxc_spi->tx_buf) { \ 98 if (spi_imx->tx_buf) { \
99 val = *(type *)mxc_spi->tx_buf; \ 99 val = *(type *)spi_imx->tx_buf; \
100 mxc_spi->tx_buf += sizeof(type); \ 100 spi_imx->tx_buf += sizeof(type); \
101 } \ 101 } \
102 \ 102 \
103 mxc_spi->count -= sizeof(type); \ 103 spi_imx->count -= sizeof(type); \
104 \ 104 \
105 writel(val, mxc_spi->base + MXC_CSPITXDATA); \ 105 writel(val, spi_imx->base + MXC_CSPITXDATA); \
106} 106}
107 107
108MXC_SPI_BUF_RX(u8) 108MXC_SPI_BUF_RX(u8)
@@ -119,7 +119,7 @@ static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
119 256, 384, 512, 768, 1024}; 119 256, 384, 512, 768, 1024};
120 120
121/* MX21, MX27 */ 121/* MX21, MX27 */
122static unsigned int mxc_spi_clkdiv_1(unsigned int fin, 122static unsigned int spi_imx_clkdiv_1(unsigned int fin,
123 unsigned int fspi) 123 unsigned int fspi)
124{ 124{
125 int i, max; 125 int i, max;
@@ -137,7 +137,7 @@ static unsigned int mxc_spi_clkdiv_1(unsigned int fin,
137} 137}
138 138
139/* MX1, MX31, MX35 */ 139/* MX1, MX31, MX35 */
140static unsigned int mxc_spi_clkdiv_2(unsigned int fin, 140static unsigned int spi_imx_clkdiv_2(unsigned int fin,
141 unsigned int fspi) 141 unsigned int fspi)
142{ 142{
143 int i, div = 4; 143 int i, div = 4;
@@ -174,7 +174,7 @@ static unsigned int mxc_spi_clkdiv_2(unsigned int fin,
174 * the i.MX35 has a slightly different register layout for bits 174 * the i.MX35 has a slightly different register layout for bits
175 * we do not use here. 175 * we do not use here.
176 */ 176 */
177static void mx31_intctrl(struct mxc_spi_data *mxc_spi, int enable) 177static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
178{ 178{
179 unsigned int val = 0; 179 unsigned int val = 0;
180 180
@@ -183,24 +183,24 @@ static void mx31_intctrl(struct mxc_spi_data *mxc_spi, int enable)
183 if (enable & MXC_INT_RR) 183 if (enable & MXC_INT_RR)
184 val |= MX31_INTREG_RREN; 184 val |= MX31_INTREG_RREN;
185 185
186 writel(val, mxc_spi->base + MXC_CSPIINT); 186 writel(val, spi_imx->base + MXC_CSPIINT);
187} 187}
188 188
189static void mx31_trigger(struct mxc_spi_data *mxc_spi) 189static void mx31_trigger(struct spi_imx_data *spi_imx)
190{ 190{
191 unsigned int reg; 191 unsigned int reg;
192 192
193 reg = readl(mxc_spi->base + MXC_CSPICTRL); 193 reg = readl(spi_imx->base + MXC_CSPICTRL);
194 reg |= MX31_CSPICTRL_XCH; 194 reg |= MX31_CSPICTRL_XCH;
195 writel(reg, mxc_spi->base + MXC_CSPICTRL); 195 writel(reg, spi_imx->base + MXC_CSPICTRL);
196} 196}
197 197
198static int mx31_config(struct mxc_spi_data *mxc_spi, 198static int mx31_config(struct spi_imx_data *spi_imx,
199 struct mxc_spi_config *config) 199 struct spi_imx_config *config)
200{ 200{
201 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; 201 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
202 202
203 reg |= mxc_spi_clkdiv_2(mxc_spi->spi_clk, config->speed_hz) << 203 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
204 MX31_CSPICTRL_DR_SHIFT; 204 MX31_CSPICTRL_DR_SHIFT;
205 205
206 if (cpu_is_mx31()) 206 if (cpu_is_mx31())
@@ -223,14 +223,14 @@ static int mx31_config(struct mxc_spi_data *mxc_spi,
223 reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT; 223 reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT;
224 } 224 }
225 225
226 writel(reg, mxc_spi->base + MXC_CSPICTRL); 226 writel(reg, spi_imx->base + MXC_CSPICTRL);
227 227
228 return 0; 228 return 0;
229} 229}
230 230
231static int mx31_rx_available(struct mxc_spi_data *mxc_spi) 231static int mx31_rx_available(struct spi_imx_data *spi_imx)
232{ 232{
233 return readl(mxc_spi->base + MX31_CSPISTATUS) & MX31_STATUS_RR; 233 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
234} 234}
235 235
236#define MX27_INTREG_RR (1 << 4) 236#define MX27_INTREG_RR (1 << 4)
@@ -246,7 +246,7 @@ static int mx31_rx_available(struct mxc_spi_data *mxc_spi)
246#define MX27_CSPICTRL_DR_SHIFT 14 246#define MX27_CSPICTRL_DR_SHIFT 14
247#define MX27_CSPICTRL_CS_SHIFT 19 247#define MX27_CSPICTRL_CS_SHIFT 19
248 248
249static void mx27_intctrl(struct mxc_spi_data *mxc_spi, int enable) 249static void mx27_intctrl(struct spi_imx_data *spi_imx, int enable)
250{ 250{
251 unsigned int val = 0; 251 unsigned int val = 0;
252 252
@@ -255,24 +255,24 @@ static void mx27_intctrl(struct mxc_spi_data *mxc_spi, int enable)
255 if (enable & MXC_INT_RR) 255 if (enable & MXC_INT_RR)
256 val |= MX27_INTREG_RREN; 256 val |= MX27_INTREG_RREN;
257 257
258 writel(val, mxc_spi->base + MXC_CSPIINT); 258 writel(val, spi_imx->base + MXC_CSPIINT);
259} 259}
260 260
261static void mx27_trigger(struct mxc_spi_data *mxc_spi) 261static void mx27_trigger(struct spi_imx_data *spi_imx)
262{ 262{
263 unsigned int reg; 263 unsigned int reg;
264 264
265 reg = readl(mxc_spi->base + MXC_CSPICTRL); 265 reg = readl(spi_imx->base + MXC_CSPICTRL);
266 reg |= MX27_CSPICTRL_XCH; 266 reg |= MX27_CSPICTRL_XCH;
267 writel(reg, mxc_spi->base + MXC_CSPICTRL); 267 writel(reg, spi_imx->base + MXC_CSPICTRL);
268} 268}
269 269
270static int mx27_config(struct mxc_spi_data *mxc_spi, 270static int mx27_config(struct spi_imx_data *spi_imx,
271 struct mxc_spi_config *config) 271 struct spi_imx_config *config)
272{ 272{
273 unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER; 273 unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER;
274 274
275 reg |= mxc_spi_clkdiv_1(mxc_spi->spi_clk, config->speed_hz) << 275 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) <<
276 MX27_CSPICTRL_DR_SHIFT; 276 MX27_CSPICTRL_DR_SHIFT;
277 reg |= config->bpw - 1; 277 reg |= config->bpw - 1;
278 278
@@ -285,14 +285,14 @@ static int mx27_config(struct mxc_spi_data *mxc_spi,
285 if (config->cs < 0) 285 if (config->cs < 0)
286 reg |= (config->cs + 32) << MX27_CSPICTRL_CS_SHIFT; 286 reg |= (config->cs + 32) << MX27_CSPICTRL_CS_SHIFT;
287 287
288 writel(reg, mxc_spi->base + MXC_CSPICTRL); 288 writel(reg, spi_imx->base + MXC_CSPICTRL);
289 289
290 return 0; 290 return 0;
291} 291}
292 292
293static int mx27_rx_available(struct mxc_spi_data *mxc_spi) 293static int mx27_rx_available(struct spi_imx_data *spi_imx)
294{ 294{
295 return readl(mxc_spi->base + MXC_CSPIINT) & MX27_INTREG_RR; 295 return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR;
296} 296}
297 297
298#define MX1_INTREG_RR (1 << 3) 298#define MX1_INTREG_RR (1 << 3)
@@ -306,7 +306,7 @@ static int mx27_rx_available(struct mxc_spi_data *mxc_spi)
306#define MX1_CSPICTRL_MASTER (1 << 10) 306#define MX1_CSPICTRL_MASTER (1 << 10)
307#define MX1_CSPICTRL_DR_SHIFT 13 307#define MX1_CSPICTRL_DR_SHIFT 13
308 308
309static void mx1_intctrl(struct mxc_spi_data *mxc_spi, int enable) 309static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
310{ 310{
311 unsigned int val = 0; 311 unsigned int val = 0;
312 312
@@ -315,24 +315,24 @@ static void mx1_intctrl(struct mxc_spi_data *mxc_spi, int enable)
315 if (enable & MXC_INT_RR) 315 if (enable & MXC_INT_RR)
316 val |= MX1_INTREG_RREN; 316 val |= MX1_INTREG_RREN;
317 317
318 writel(val, mxc_spi->base + MXC_CSPIINT); 318 writel(val, spi_imx->base + MXC_CSPIINT);
319} 319}
320 320
321static void mx1_trigger(struct mxc_spi_data *mxc_spi) 321static void mx1_trigger(struct spi_imx_data *spi_imx)
322{ 322{
323 unsigned int reg; 323 unsigned int reg;
324 324
325 reg = readl(mxc_spi->base + MXC_CSPICTRL); 325 reg = readl(spi_imx->base + MXC_CSPICTRL);
326 reg |= MX1_CSPICTRL_XCH; 326 reg |= MX1_CSPICTRL_XCH;
327 writel(reg, mxc_spi->base + MXC_CSPICTRL); 327 writel(reg, spi_imx->base + MXC_CSPICTRL);
328} 328}
329 329
330static int mx1_config(struct mxc_spi_data *mxc_spi, 330static int mx1_config(struct spi_imx_data *spi_imx,
331 struct mxc_spi_config *config) 331 struct spi_imx_config *config)
332{ 332{
333 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; 333 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
334 334
335 reg |= mxc_spi_clkdiv_2(mxc_spi->spi_clk, config->speed_hz) << 335 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
336 MX1_CSPICTRL_DR_SHIFT; 336 MX1_CSPICTRL_DR_SHIFT;
337 reg |= config->bpw - 1; 337 reg |= config->bpw - 1;
338 338
@@ -341,156 +341,151 @@ static int mx1_config(struct mxc_spi_data *mxc_spi,
341 if (config->mode & SPI_CPOL) 341 if (config->mode & SPI_CPOL)
342 reg |= MX1_CSPICTRL_POL; 342 reg |= MX1_CSPICTRL_POL;
343 343
344 writel(reg, mxc_spi->base + MXC_CSPICTRL); 344 writel(reg, spi_imx->base + MXC_CSPICTRL);
345 345
346 return 0; 346 return 0;
347} 347}
348 348
349static int mx1_rx_available(struct mxc_spi_data *mxc_spi) 349static int mx1_rx_available(struct spi_imx_data *spi_imx)
350{ 350{
351 return readl(mxc_spi->base + MXC_CSPIINT) & MX1_INTREG_RR; 351 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
352} 352}
353 353
354static void mxc_spi_chipselect(struct spi_device *spi, int is_active) 354static void spi_imx_chipselect(struct spi_device *spi, int is_active)
355{ 355{
356 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master); 356 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
357 unsigned int cs = 0; 357 int gpio = spi_imx->chipselect[spi->chip_select];
358 int gpio = mxc_spi->chipselect[spi->chip_select]; 358 int active = is_active != BITBANG_CS_INACTIVE;
359 struct mxc_spi_config config; 359 int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH);
360 360
361 if (spi->mode & SPI_CS_HIGH) 361 if (gpio < 0)
362 cs = 1;
363
364 if (is_active == BITBANG_CS_INACTIVE) {
365 if (gpio >= 0)
366 gpio_set_value(gpio, !cs);
367 return; 362 return;
368 }
369
370 config.bpw = spi->bits_per_word;
371 config.speed_hz = spi->max_speed_hz;
372 config.mode = spi->mode;
373 config.cs = mxc_spi->chipselect[spi->chip_select];
374
375 mxc_spi->config(mxc_spi, &config);
376
377 /* Initialize the functions for transfer */
378 if (config.bpw <= 8) {
379 mxc_spi->rx = mxc_spi_buf_rx_u8;
380 mxc_spi->tx = mxc_spi_buf_tx_u8;
381 } else if (config.bpw <= 16) {
382 mxc_spi->rx = mxc_spi_buf_rx_u16;
383 mxc_spi->tx = mxc_spi_buf_tx_u16;
384 } else if (config.bpw <= 32) {
385 mxc_spi->rx = mxc_spi_buf_rx_u32;
386 mxc_spi->tx = mxc_spi_buf_tx_u32;
387 } else
388 BUG();
389 363
390 if (gpio >= 0) 364 gpio_set_value(gpio, dev_is_lowactive ^ active);
391 gpio_set_value(gpio, cs);
392
393 return;
394} 365}
395 366
396static void mxc_spi_push(struct mxc_spi_data *mxc_spi) 367static void spi_imx_push(struct spi_imx_data *spi_imx)
397{ 368{
398 while (mxc_spi->txfifo < 8) { 369 while (spi_imx->txfifo < 8) {
399 if (!mxc_spi->count) 370 if (!spi_imx->count)
400 break; 371 break;
401 mxc_spi->tx(mxc_spi); 372 spi_imx->tx(spi_imx);
402 mxc_spi->txfifo++; 373 spi_imx->txfifo++;
403 } 374 }
404 375
405 mxc_spi->trigger(mxc_spi); 376 spi_imx->trigger(spi_imx);
406} 377}
407 378
408static irqreturn_t mxc_spi_isr(int irq, void *dev_id) 379static irqreturn_t spi_imx_isr(int irq, void *dev_id)
409{ 380{
410 struct mxc_spi_data *mxc_spi = dev_id; 381 struct spi_imx_data *spi_imx = dev_id;
411 382
412 while (mxc_spi->rx_available(mxc_spi)) { 383 while (spi_imx->rx_available(spi_imx)) {
413 mxc_spi->rx(mxc_spi); 384 spi_imx->rx(spi_imx);
414 mxc_spi->txfifo--; 385 spi_imx->txfifo--;
415 } 386 }
416 387
417 if (mxc_spi->count) { 388 if (spi_imx->count) {
418 mxc_spi_push(mxc_spi); 389 spi_imx_push(spi_imx);
419 return IRQ_HANDLED; 390 return IRQ_HANDLED;
420 } 391 }
421 392
422 if (mxc_spi->txfifo) { 393 if (spi_imx->txfifo) {
423 /* No data left to push, but still waiting for rx data, 394 /* No data left to push, but still waiting for rx data,
424 * enable receive data available interrupt. 395 * enable receive data available interrupt.
425 */ 396 */
426 mxc_spi->intctrl(mxc_spi, MXC_INT_RR); 397 spi_imx->intctrl(spi_imx, MXC_INT_RR);
427 return IRQ_HANDLED; 398 return IRQ_HANDLED;
428 } 399 }
429 400
430 mxc_spi->intctrl(mxc_spi, 0); 401 spi_imx->intctrl(spi_imx, 0);
431 complete(&mxc_spi->xfer_done); 402 complete(&spi_imx->xfer_done);
432 403
433 return IRQ_HANDLED; 404 return IRQ_HANDLED;
434} 405}
435 406
436static int mxc_spi_setupxfer(struct spi_device *spi, 407static int spi_imx_setupxfer(struct spi_device *spi,
437 struct spi_transfer *t) 408 struct spi_transfer *t)
438{ 409{
439 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master); 410 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
440 struct mxc_spi_config config; 411 struct spi_imx_config config;
441 412
442 config.bpw = t ? t->bits_per_word : spi->bits_per_word; 413 config.bpw = t ? t->bits_per_word : spi->bits_per_word;
443 config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; 414 config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
444 config.mode = spi->mode; 415 config.mode = spi->mode;
416 config.cs = spi_imx->chipselect[spi->chip_select];
417
418 if (!config.speed_hz)
419 config.speed_hz = spi->max_speed_hz;
420 if (!config.bpw)
421 config.bpw = spi->bits_per_word;
422 if (!config.speed_hz)
423 config.speed_hz = spi->max_speed_hz;
424
425 /* Initialize the functions for transfer */
426 if (config.bpw <= 8) {
427 spi_imx->rx = spi_imx_buf_rx_u8;
428 spi_imx->tx = spi_imx_buf_tx_u8;
429 } else if (config.bpw <= 16) {
430 spi_imx->rx = spi_imx_buf_rx_u16;
431 spi_imx->tx = spi_imx_buf_tx_u16;
432 } else if (config.bpw <= 32) {
433 spi_imx->rx = spi_imx_buf_rx_u32;
434 spi_imx->tx = spi_imx_buf_tx_u32;
435 } else
436 BUG();
445 437
446 mxc_spi->config(mxc_spi, &config); 438 spi_imx->config(spi_imx, &config);
447 439
448 return 0; 440 return 0;
449} 441}
450 442
451static int mxc_spi_transfer(struct spi_device *spi, 443static int spi_imx_transfer(struct spi_device *spi,
452 struct spi_transfer *transfer) 444 struct spi_transfer *transfer)
453{ 445{
454 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master); 446 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
455 447
456 mxc_spi->tx_buf = transfer->tx_buf; 448 spi_imx->tx_buf = transfer->tx_buf;
457 mxc_spi->rx_buf = transfer->rx_buf; 449 spi_imx->rx_buf = transfer->rx_buf;
458 mxc_spi->count = transfer->len; 450 spi_imx->count = transfer->len;
459 mxc_spi->txfifo = 0; 451 spi_imx->txfifo = 0;
460 452
461 init_completion(&mxc_spi->xfer_done); 453 init_completion(&spi_imx->xfer_done);
462 454
463 mxc_spi_push(mxc_spi); 455 spi_imx_push(spi_imx);
464 456
465 mxc_spi->intctrl(mxc_spi, MXC_INT_TE); 457 spi_imx->intctrl(spi_imx, MXC_INT_TE);
466 458
467 wait_for_completion(&mxc_spi->xfer_done); 459 wait_for_completion(&spi_imx->xfer_done);
468 460
469 return transfer->len; 461 return transfer->len;
470} 462}
471 463
472static int mxc_spi_setup(struct spi_device *spi) 464static int spi_imx_setup(struct spi_device *spi)
473{ 465{
474 if (!spi->bits_per_word) 466 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
475 spi->bits_per_word = 8; 467 int gpio = spi_imx->chipselect[spi->chip_select];
476 468
477 pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__, 469 pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__,
478 spi->mode, spi->bits_per_word, spi->max_speed_hz); 470 spi->mode, spi->bits_per_word, spi->max_speed_hz);
479 471
480 mxc_spi_chipselect(spi, BITBANG_CS_INACTIVE); 472 if (gpio >= 0)
473 gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
474
475 spi_imx_chipselect(spi, BITBANG_CS_INACTIVE);
481 476
482 return 0; 477 return 0;
483} 478}
484 479
485static void mxc_spi_cleanup(struct spi_device *spi) 480static void spi_imx_cleanup(struct spi_device *spi)
486{ 481{
487} 482}
488 483
489static int __init mxc_spi_probe(struct platform_device *pdev) 484static int __init spi_imx_probe(struct platform_device *pdev)
490{ 485{
491 struct spi_imx_master *mxc_platform_info; 486 struct spi_imx_master *mxc_platform_info;
492 struct spi_master *master; 487 struct spi_master *master;
493 struct mxc_spi_data *mxc_spi; 488 struct spi_imx_data *spi_imx;
494 struct resource *res; 489 struct resource *res;
495 int i, ret; 490 int i, ret;
496 491
@@ -500,7 +495,7 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
500 return -EINVAL; 495 return -EINVAL;
501 } 496 }
502 497
503 master = spi_alloc_master(&pdev->dev, sizeof(struct mxc_spi_data)); 498 master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
504 if (!master) 499 if (!master)
505 return -ENOMEM; 500 return -ENOMEM;
506 501
@@ -509,32 +504,32 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
509 master->bus_num = pdev->id; 504 master->bus_num = pdev->id;
510 master->num_chipselect = mxc_platform_info->num_chipselect; 505 master->num_chipselect = mxc_platform_info->num_chipselect;
511 506
512 mxc_spi = spi_master_get_devdata(master); 507 spi_imx = spi_master_get_devdata(master);
513 mxc_spi->bitbang.master = spi_master_get(master); 508 spi_imx->bitbang.master = spi_master_get(master);
514 mxc_spi->chipselect = mxc_platform_info->chipselect; 509 spi_imx->chipselect = mxc_platform_info->chipselect;
515 510
516 for (i = 0; i < master->num_chipselect; i++) { 511 for (i = 0; i < master->num_chipselect; i++) {
517 if (mxc_spi->chipselect[i] < 0) 512 if (spi_imx->chipselect[i] < 0)
518 continue; 513 continue;
519 ret = gpio_request(mxc_spi->chipselect[i], DRIVER_NAME); 514 ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
520 if (ret) { 515 if (ret) {
521 i--; 516 i--;
522 while (i > 0) 517 while (i > 0)
523 if (mxc_spi->chipselect[i] >= 0) 518 if (spi_imx->chipselect[i] >= 0)
524 gpio_free(mxc_spi->chipselect[i--]); 519 gpio_free(spi_imx->chipselect[i--]);
525 dev_err(&pdev->dev, "can't get cs gpios"); 520 dev_err(&pdev->dev, "can't get cs gpios");
526 goto out_master_put; 521 goto out_master_put;
527 } 522 }
528 gpio_direction_output(mxc_spi->chipselect[i], 1);
529 } 523 }
530 524
531 mxc_spi->bitbang.chipselect = mxc_spi_chipselect; 525 spi_imx->bitbang.chipselect = spi_imx_chipselect;
532 mxc_spi->bitbang.setup_transfer = mxc_spi_setupxfer; 526 spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
533 mxc_spi->bitbang.txrx_bufs = mxc_spi_transfer; 527 spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
534 mxc_spi->bitbang.master->setup = mxc_spi_setup; 528 spi_imx->bitbang.master->setup = spi_imx_setup;
535 mxc_spi->bitbang.master->cleanup = mxc_spi_cleanup; 529 spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
530 spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
536 531
537 init_completion(&mxc_spi->xfer_done); 532 init_completion(&spi_imx->xfer_done);
538 533
539 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 534 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
540 if (!res) { 535 if (!res) {
@@ -549,58 +544,58 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
549 goto out_gpio_free; 544 goto out_gpio_free;
550 } 545 }
551 546
552 mxc_spi->base = ioremap(res->start, resource_size(res)); 547 spi_imx->base = ioremap(res->start, resource_size(res));
553 if (!mxc_spi->base) { 548 if (!spi_imx->base) {
554 ret = -EINVAL; 549 ret = -EINVAL;
555 goto out_release_mem; 550 goto out_release_mem;
556 } 551 }
557 552
558 mxc_spi->irq = platform_get_irq(pdev, 0); 553 spi_imx->irq = platform_get_irq(pdev, 0);
559 if (!mxc_spi->irq) { 554 if (!spi_imx->irq) {
560 ret = -EINVAL; 555 ret = -EINVAL;
561 goto out_iounmap; 556 goto out_iounmap;
562 } 557 }
563 558
564 ret = request_irq(mxc_spi->irq, mxc_spi_isr, 0, DRIVER_NAME, mxc_spi); 559 ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx);
565 if (ret) { 560 if (ret) {
566 dev_err(&pdev->dev, "can't get irq%d: %d\n", mxc_spi->irq, ret); 561 dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
567 goto out_iounmap; 562 goto out_iounmap;
568 } 563 }
569 564
570 if (cpu_is_mx31() || cpu_is_mx35()) { 565 if (cpu_is_mx31() || cpu_is_mx35()) {
571 mxc_spi->intctrl = mx31_intctrl; 566 spi_imx->intctrl = mx31_intctrl;
572 mxc_spi->config = mx31_config; 567 spi_imx->config = mx31_config;
573 mxc_spi->trigger = mx31_trigger; 568 spi_imx->trigger = mx31_trigger;
574 mxc_spi->rx_available = mx31_rx_available; 569 spi_imx->rx_available = mx31_rx_available;
575 } else if (cpu_is_mx27() || cpu_is_mx21()) { 570 } else if (cpu_is_mx27() || cpu_is_mx21()) {
576 mxc_spi->intctrl = mx27_intctrl; 571 spi_imx->intctrl = mx27_intctrl;
577 mxc_spi->config = mx27_config; 572 spi_imx->config = mx27_config;
578 mxc_spi->trigger = mx27_trigger; 573 spi_imx->trigger = mx27_trigger;
579 mxc_spi->rx_available = mx27_rx_available; 574 spi_imx->rx_available = mx27_rx_available;
580 } else if (cpu_is_mx1()) { 575 } else if (cpu_is_mx1()) {
581 mxc_spi->intctrl = mx1_intctrl; 576 spi_imx->intctrl = mx1_intctrl;
582 mxc_spi->config = mx1_config; 577 spi_imx->config = mx1_config;
583 mxc_spi->trigger = mx1_trigger; 578 spi_imx->trigger = mx1_trigger;
584 mxc_spi->rx_available = mx1_rx_available; 579 spi_imx->rx_available = mx1_rx_available;
585 } else 580 } else
586 BUG(); 581 BUG();
587 582
588 mxc_spi->clk = clk_get(&pdev->dev, NULL); 583 spi_imx->clk = clk_get(&pdev->dev, NULL);
589 if (IS_ERR(mxc_spi->clk)) { 584 if (IS_ERR(spi_imx->clk)) {
590 dev_err(&pdev->dev, "unable to get clock\n"); 585 dev_err(&pdev->dev, "unable to get clock\n");
591 ret = PTR_ERR(mxc_spi->clk); 586 ret = PTR_ERR(spi_imx->clk);
592 goto out_free_irq; 587 goto out_free_irq;
593 } 588 }
594 589
595 clk_enable(mxc_spi->clk); 590 clk_enable(spi_imx->clk);
596 mxc_spi->spi_clk = clk_get_rate(mxc_spi->clk); 591 spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
597 592
598 if (!cpu_is_mx31() || !cpu_is_mx35()) 593 if (!cpu_is_mx31() || !cpu_is_mx35())
599 writel(1, mxc_spi->base + MXC_RESET); 594 writel(1, spi_imx->base + MXC_RESET);
600 595
601 mxc_spi->intctrl(mxc_spi, 0); 596 spi_imx->intctrl(spi_imx, 0);
602 597
603 ret = spi_bitbang_start(&mxc_spi->bitbang); 598 ret = spi_bitbang_start(&spi_imx->bitbang);
604 if (ret) { 599 if (ret) {
605 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); 600 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
606 goto out_clk_put; 601 goto out_clk_put;
@@ -611,18 +606,18 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
611 return ret; 606 return ret;
612 607
613out_clk_put: 608out_clk_put:
614 clk_disable(mxc_spi->clk); 609 clk_disable(spi_imx->clk);
615 clk_put(mxc_spi->clk); 610 clk_put(spi_imx->clk);
616out_free_irq: 611out_free_irq:
617 free_irq(mxc_spi->irq, mxc_spi); 612 free_irq(spi_imx->irq, spi_imx);
618out_iounmap: 613out_iounmap:
619 iounmap(mxc_spi->base); 614 iounmap(spi_imx->base);
620out_release_mem: 615out_release_mem:
621 release_mem_region(res->start, resource_size(res)); 616 release_mem_region(res->start, resource_size(res));
622out_gpio_free: 617out_gpio_free:
623 for (i = 0; i < master->num_chipselect; i++) 618 for (i = 0; i < master->num_chipselect; i++)
624 if (mxc_spi->chipselect[i] >= 0) 619 if (spi_imx->chipselect[i] >= 0)
625 gpio_free(mxc_spi->chipselect[i]); 620 gpio_free(spi_imx->chipselect[i]);
626out_master_put: 621out_master_put:
627 spi_master_put(master); 622 spi_master_put(master);
628 kfree(master); 623 kfree(master);
@@ -630,24 +625,24 @@ out_master_put:
630 return ret; 625 return ret;
631} 626}
632 627
633static int __exit mxc_spi_remove(struct platform_device *pdev) 628static int __exit spi_imx_remove(struct platform_device *pdev)
634{ 629{
635 struct spi_master *master = platform_get_drvdata(pdev); 630 struct spi_master *master = platform_get_drvdata(pdev);
636 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 631 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
637 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(master); 632 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
638 int i; 633 int i;
639 634
640 spi_bitbang_stop(&mxc_spi->bitbang); 635 spi_bitbang_stop(&spi_imx->bitbang);
641 636
642 writel(0, mxc_spi->base + MXC_CSPICTRL); 637 writel(0, spi_imx->base + MXC_CSPICTRL);
643 clk_disable(mxc_spi->clk); 638 clk_disable(spi_imx->clk);
644 clk_put(mxc_spi->clk); 639 clk_put(spi_imx->clk);
645 free_irq(mxc_spi->irq, mxc_spi); 640 free_irq(spi_imx->irq, spi_imx);
646 iounmap(mxc_spi->base); 641 iounmap(spi_imx->base);
647 642
648 for (i = 0; i < master->num_chipselect; i++) 643 for (i = 0; i < master->num_chipselect; i++)
649 if (mxc_spi->chipselect[i] >= 0) 644 if (spi_imx->chipselect[i] >= 0)
650 gpio_free(mxc_spi->chipselect[i]); 645 gpio_free(spi_imx->chipselect[i]);
651 646
652 spi_master_put(master); 647 spi_master_put(master);
653 648
@@ -658,27 +653,27 @@ static int __exit mxc_spi_remove(struct platform_device *pdev)
658 return 0; 653 return 0;
659} 654}
660 655
661static struct platform_driver mxc_spi_driver = { 656static struct platform_driver spi_imx_driver = {
662 .driver = { 657 .driver = {
663 .name = DRIVER_NAME, 658 .name = DRIVER_NAME,
664 .owner = THIS_MODULE, 659 .owner = THIS_MODULE,
665 }, 660 },
666 .probe = mxc_spi_probe, 661 .probe = spi_imx_probe,
667 .remove = __exit_p(mxc_spi_remove), 662 .remove = __exit_p(spi_imx_remove),
668}; 663};
669 664
670static int __init mxc_spi_init(void) 665static int __init spi_imx_init(void)
671{ 666{
672 return platform_driver_register(&mxc_spi_driver); 667 return platform_driver_register(&spi_imx_driver);
673} 668}
674 669
675static void __exit mxc_spi_exit(void) 670static void __exit spi_imx_exit(void)
676{ 671{
677 platform_driver_unregister(&mxc_spi_driver); 672 platform_driver_unregister(&spi_imx_driver);
678} 673}
679 674
680module_init(mxc_spi_init); 675module_init(spi_imx_init);
681module_exit(mxc_spi_exit); 676module_exit(spi_imx_exit);
682 677
683MODULE_DESCRIPTION("SPI Master Controller driver"); 678MODULE_DESCRIPTION("SPI Master Controller driver");
684MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 679MODULE_AUTHOR("Sascha Hauer, Pengutronix");
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index f921bd1109e1..5d23983f02fc 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -537,7 +537,7 @@ static int spidev_release(struct inode *inode, struct file *filp)
537 return status; 537 return status;
538} 538}
539 539
540static struct file_operations spidev_fops = { 540static const struct file_operations spidev_fops = {
541 .owner = THIS_MODULE, 541 .owner = THIS_MODULE,
542 /* REVISIT switch to aio primitives, so that userspace 542 /* REVISIT switch to aio primitives, so that userspace
543 * gets more complete API coverage. It'll simplify things 543 * gets more complete API coverage. It'll simplify things
diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
index ac8577358ba0..c24e4e0367a2 100644
--- a/drivers/staging/dst/dcore.c
+++ b/drivers/staging/dst/dcore.c
@@ -102,7 +102,7 @@ static int dst_request(struct request_queue *q, struct bio *bio)
102 struct dst_node *n = q->queuedata; 102 struct dst_node *n = q->queuedata;
103 int err = -EIO; 103 int err = -EIO;
104 104
105 if (bio_empty_barrier(bio) && !q->prepare_discard_fn) { 105 if (bio_empty_barrier(bio) && !blk_queue_discard(q)) {
106 /* 106 /*
107 * This is a dirty^Wnice hack, but if we complete this 107 * This is a dirty^Wnice hack, but if we complete this
108 * operation with -EOPNOTSUPP like intended, XFS 108 * operation with -EOPNOTSUPP like intended, XFS
@@ -847,7 +847,7 @@ static dst_command_func dst_commands[] = {
847/* 847/*
848 * Configuration parser. 848 * Configuration parser.
849 */ 849 */
850static void cn_dst_callback(struct cn_msg *msg) 850static void cn_dst_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
851{ 851{
852 struct dst_ctl *ctl; 852 struct dst_ctl *ctl;
853 int err; 853 int err;
@@ -855,6 +855,11 @@ static void cn_dst_callback(struct cn_msg *msg)
855 struct dst_node *n = NULL, *tmp; 855 struct dst_node *n = NULL, *tmp;
856 unsigned int hash; 856 unsigned int hash;
857 857
858 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
859 err = -EPERM;
860 goto out;
861 }
862
858 if (msg->len < sizeof(struct dst_ctl)) { 863 if (msg->len < sizeof(struct dst_ctl)) {
859 err = -EBADMSG; 864 err = -EBADMSG;
860 goto out; 865 goto out;
diff --git a/drivers/staging/iio/light/tsl2561.c b/drivers/staging/iio/light/tsl2561.c
index ea8a5efc19bc..fc2107f4c049 100644
--- a/drivers/staging/iio/light/tsl2561.c
+++ b/drivers/staging/iio/light/tsl2561.c
@@ -239,10 +239,6 @@ static int __devexit tsl2561_remove(struct i2c_client *client)
239 return tsl2561_powerdown(client); 239 return tsl2561_powerdown(client);
240} 240}
241 241
242static unsigned short normal_i2c[] = { 0x29, 0x39, 0x49, I2C_CLIENT_END };
243
244I2C_CLIENT_INSMOD;
245
246static const struct i2c_device_id tsl2561_id[] = { 242static const struct i2c_device_id tsl2561_id[] = {
247 { "tsl2561", 0 }, 243 { "tsl2561", 0 },
248 { } 244 { }
diff --git a/drivers/staging/pohmelfs/config.c b/drivers/staging/pohmelfs/config.c
index 90f962ee5fd8..5d04bf5b021a 100644
--- a/drivers/staging/pohmelfs/config.c
+++ b/drivers/staging/pohmelfs/config.c
@@ -527,10 +527,13 @@ out_unlock:
527 return err; 527 return err;
528} 528}
529 529
530static void pohmelfs_cn_callback(struct cn_msg *msg) 530static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
531{ 531{
532 int err; 532 int err;
533 533
534 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
535 return;
536
534 switch (msg->flags) { 537 switch (msg->flags) {
535 case POHMELFS_FLAGS_ADD: 538 case POHMELFS_FLAGS_ADD:
536 case POHMELFS_FLAGS_DEL: 539 case POHMELFS_FLAGS_DEL:
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 333ee02e7b2b..864f0ba6a344 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -993,7 +993,7 @@ skip_io_on_zombie:
993 return retval; 993 return retval;
994} 994}
995 995
996static struct file_operations fops = { 996static const struct file_operations fops = {
997 .owner = THIS_MODULE, 997 .owner = THIS_MODULE,
998 .read = usbtmc_read, 998 .read = usbtmc_read,
999 .write = usbtmc_write, 999 .write = usbtmc_write,
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index c44367fea185..bf0f6520c6df 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -30,6 +30,7 @@
30#include <linux/wait.h> 30#include <linux/wait.h>
31#include <linux/compiler.h> 31#include <linux/compiler.h>
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33#include <linux/sched.h>
33#include <linux/slab.h> 34#include <linux/slab.h>
34#include <linux/poll.h> 35#include <linux/poll.h>
35#include <linux/smp_lock.h> 36#include <linux/smp_lock.h>
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 29500154d00c..2d867fd22413 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -875,7 +875,7 @@ printer_ioctl(struct file *fd, unsigned int code, unsigned long arg)
875} 875}
876 876
877/* used after endpoint configuration */ 877/* used after endpoint configuration */
878static struct file_operations printer_io_operations = { 878static const struct file_operations printer_io_operations = {
879 .owner = THIS_MODULE, 879 .owner = THIS_MODULE,
880 .open = printer_open, 880 .open = printer_open,
881 .read = printer_read, 881 .read = printer_read,
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index cf2d45946c57..2273c815941f 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -134,7 +134,7 @@ static int pzl_open(struct inode *inode, struct file *file)
134 return single_open(file, pzl_print, inode->i_private); 134 return single_open(file, pzl_print, inode->i_private);
135} 135}
136 136
137static struct file_operations di_fops = { 137static const struct file_operations di_fops = {
138 .open = di_open, 138 .open = di_open,
139 .read = seq_read, 139 .read = seq_read,
140 .llseek = seq_lseek, 140 .llseek = seq_lseek,
@@ -142,7 +142,7 @@ static struct file_operations di_fops = {
142 .owner = THIS_MODULE, 142 .owner = THIS_MODULE,
143}; 143};
144 144
145static struct file_operations asl_fops = { 145static const struct file_operations asl_fops = {
146 .open = asl_open, 146 .open = asl_open,
147 .read = seq_read, 147 .read = seq_read,
148 .llseek = seq_lseek, 148 .llseek = seq_lseek,
@@ -150,7 +150,7 @@ static struct file_operations asl_fops = {
150 .owner = THIS_MODULE, 150 .owner = THIS_MODULE,
151}; 151};
152 152
153static struct file_operations pzl_fops = { 153static const struct file_operations pzl_fops = {
154 .open = pzl_open, 154 .open = pzl_open,
155 .read = seq_read, 155 .read = seq_read,
156 .llseek = seq_lseek, 156 .llseek = seq_lseek,
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index d645f3899fe1..32d0199d0c32 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -429,8 +429,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
429 return read_count; 429 return read_count;
430} 430}
431 431
432static struct 432static const struct file_operations usb_rio_fops = {
433file_operations usb_rio_fops = {
434 .owner = THIS_MODULE, 433 .owner = THIS_MODULE,
435 .read = read_rio, 434 .read = read_rio,
436 .write = write_rio, 435 .write = write_rio,
diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c
index 4a42993700c1..2eecec0c13c9 100644
--- a/drivers/uwb/uwb-debug.c
+++ b/drivers/uwb/uwb-debug.c
@@ -205,7 +205,7 @@ static ssize_t command_write(struct file *file, const char __user *buf,
205 return ret < 0 ? ret : len; 205 return ret < 0 ? ret : len;
206} 206}
207 207
208static struct file_operations command_fops = { 208static const struct file_operations command_fops = {
209 .open = command_open, 209 .open = command_open,
210 .write = command_write, 210 .write = command_write,
211 .read = NULL, 211 .read = NULL,
@@ -255,7 +255,7 @@ static int reservations_open(struct inode *inode, struct file *file)
255 return single_open(file, reservations_print, inode->i_private); 255 return single_open(file, reservations_print, inode->i_private);
256} 256}
257 257
258static struct file_operations reservations_fops = { 258static const struct file_operations reservations_fops = {
259 .open = reservations_open, 259 .open = reservations_open,
260 .read = seq_read, 260 .read = seq_read,
261 .llseek = seq_lseek, 261 .llseek = seq_lseek,
@@ -283,7 +283,7 @@ static int drp_avail_open(struct inode *inode, struct file *file)
283 return single_open(file, drp_avail_print, inode->i_private); 283 return single_open(file, drp_avail_print, inode->i_private);
284} 284}
285 285
286static struct file_operations drp_avail_fops = { 286static const struct file_operations drp_avail_fops = {
287 .open = drp_avail_open, 287 .open = drp_avail_open,
288 .read = seq_read, 288 .read = seq_read,
289 .llseek = seq_lseek, 289 .llseek = seq_lseek,
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 42e1005e2916..d065894ce38f 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -26,7 +26,6 @@
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/uaccess.h> 28#include <linux/uaccess.h>
29#include <linux/device.h>
30#include <linux/interrupt.h> 29#include <linux/interrupt.h>
31#include <linux/clk.h> 30#include <linux/clk.h>
32#include <video/da8xx-fb.h> 31#include <video/da8xx-fb.h>
diff --git a/drivers/video/msm/mddi.c b/drivers/video/msm/mddi.c
index f2de5a1acd6d..5c5a1ad1d397 100644
--- a/drivers/video/msm/mddi.c
+++ b/drivers/video/msm/mddi.c
@@ -27,8 +27,6 @@
27#include <mach/msm_iomap.h> 27#include <mach/msm_iomap.h>
28#include <mach/irqs.h> 28#include <mach/irqs.h>
29#include <mach/board.h> 29#include <mach/board.h>
30#include <linux/delay.h>
31
32#include <mach/msm_fb.h> 30#include <mach/msm_fb.h>
33#include "mddi_hw.h" 31#include "mddi_hw.h"
34 32
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c
index d5e59556f9e2..70dadf9d2334 100644
--- a/drivers/video/omap/blizzard.c
+++ b/drivers/video/omap/blizzard.c
@@ -93,7 +93,7 @@ struct blizzard_reg_list {
93}; 93};
94 94
95/* These need to be saved / restored separately from the rest. */ 95/* These need to be saved / restored separately from the rest. */
96static struct blizzard_reg_list blizzard_pll_regs[] = { 96static const struct blizzard_reg_list blizzard_pll_regs[] = {
97 { 97 {
98 .start = 0x04, /* Don't save PLL ctrl (0x0C) */ 98 .start = 0x04, /* Don't save PLL ctrl (0x0C) */
99 .end = 0x0a, 99 .end = 0x0a,
@@ -104,7 +104,7 @@ static struct blizzard_reg_list blizzard_pll_regs[] = {
104 }, 104 },
105}; 105};
106 106
107static struct blizzard_reg_list blizzard_gen_regs[] = { 107static const struct blizzard_reg_list blizzard_gen_regs[] = {
108 { 108 {
109 .start = 0x18, /* SDRAM control */ 109 .start = 0x18, /* SDRAM control */
110 .end = 0x20, 110 .end = 0x20,
@@ -191,7 +191,7 @@ struct blizzard_struct {
191 191
192 struct omapfb_device *fbdev; 192 struct omapfb_device *fbdev;
193 struct lcd_ctrl_extif *extif; 193 struct lcd_ctrl_extif *extif;
194 struct lcd_ctrl *int_ctrl; 194 const struct lcd_ctrl *int_ctrl;
195 195
196 void (*power_up)(struct device *dev); 196 void (*power_up)(struct device *dev);
197 void (*power_down)(struct device *dev); 197 void (*power_down)(struct device *dev);
@@ -1372,7 +1372,7 @@ static void blizzard_get_caps(int plane, struct omapfb_caps *caps)
1372 (1 << OMAPFB_COLOR_YUV420); 1372 (1 << OMAPFB_COLOR_YUV420);
1373} 1373}
1374 1374
1375static void _save_regs(struct blizzard_reg_list *list, int cnt) 1375static void _save_regs(const struct blizzard_reg_list *list, int cnt)
1376{ 1376{
1377 int i; 1377 int i;
1378 1378
@@ -1383,7 +1383,7 @@ static void _save_regs(struct blizzard_reg_list *list, int cnt)
1383 } 1383 }
1384} 1384}
1385 1385
1386static void _restore_regs(struct blizzard_reg_list *list, int cnt) 1386static void _restore_regs(const struct blizzard_reg_list *list, int cnt)
1387{ 1387{
1388 int i; 1388 int i;
1389 1389
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 125e605b8c68..0d0c8c8b9b56 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -393,7 +393,7 @@ static void omapfb_sync(struct fb_info *fbi)
393 * Set fb_info.fix fields and also updates fbdev. 393 * Set fb_info.fix fields and also updates fbdev.
394 * When calling this fb_info.var must be set up already. 394 * When calling this fb_info.var must be set up already.
395 */ 395 */
396static void set_fb_fix(struct fb_info *fbi) 396static void set_fb_fix(struct fb_info *fbi, int from_init)
397{ 397{
398 struct fb_fix_screeninfo *fix = &fbi->fix; 398 struct fb_fix_screeninfo *fix = &fbi->fix;
399 struct fb_var_screeninfo *var = &fbi->var; 399 struct fb_var_screeninfo *var = &fbi->var;
@@ -403,10 +403,16 @@ static void set_fb_fix(struct fb_info *fbi)
403 403
404 rg = &plane->fbdev->mem_desc.region[plane->idx]; 404 rg = &plane->fbdev->mem_desc.region[plane->idx];
405 fbi->screen_base = rg->vaddr; 405 fbi->screen_base = rg->vaddr;
406 mutex_lock(&fbi->mm_lock); 406
407 fix->smem_start = rg->paddr; 407 if (!from_init) {
408 fix->smem_len = rg->size; 408 mutex_lock(&fbi->mm_lock);
409 mutex_unlock(&fbi->mm_lock); 409 fix->smem_start = rg->paddr;
410 fix->smem_len = rg->size;
411 mutex_unlock(&fbi->mm_lock);
412 } else {
413 fix->smem_start = rg->paddr;
414 fix->smem_len = rg->size;
415 }
410 416
411 fix->type = FB_TYPE_PACKED_PIXELS; 417 fix->type = FB_TYPE_PACKED_PIXELS;
412 bpp = var->bits_per_pixel; 418 bpp = var->bits_per_pixel;
@@ -704,7 +710,7 @@ static int omapfb_set_par(struct fb_info *fbi)
704 int r = 0; 710 int r = 0;
705 711
706 omapfb_rqueue_lock(fbdev); 712 omapfb_rqueue_lock(fbdev);
707 set_fb_fix(fbi); 713 set_fb_fix(fbi, 0);
708 r = ctrl_change_mode(fbi); 714 r = ctrl_change_mode(fbi);
709 omapfb_rqueue_unlock(fbdev); 715 omapfb_rqueue_unlock(fbdev);
710 716
@@ -904,7 +910,7 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
904 if (old_size != size) { 910 if (old_size != size) {
905 if (size) { 911 if (size) {
906 memcpy(&fbi->var, new_var, sizeof(fbi->var)); 912 memcpy(&fbi->var, new_var, sizeof(fbi->var));
907 set_fb_fix(fbi); 913 set_fb_fix(fbi, 0);
908 } else { 914 } else {
909 /* 915 /*
910 * Set these explicitly to indicate that the 916 * Set these explicitly to indicate that the
@@ -1504,7 +1510,7 @@ static int fbinfo_init(struct omapfb_device *fbdev, struct fb_info *info)
1504 var->bits_per_pixel = fbdev->panel->bpp; 1510 var->bits_per_pixel = fbdev->panel->bpp;
1505 1511
1506 set_fb_var(info, var); 1512 set_fb_var(info, var);
1507 set_fb_fix(info); 1513 set_fb_fix(info, 1);
1508 1514
1509 r = fb_alloc_cmap(&info->cmap, 16, 0); 1515 r = fb_alloc_cmap(&info->cmap, 16, 0);
1510 if (r != 0) 1516 if (r != 0)
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index e98baf6916b8..e35232a18571 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -67,11 +67,14 @@ static DEFINE_MUTEX(uvfb_lock);
67 * find the kernel part of the task struct, copy the registers and 67 * find the kernel part of the task struct, copy the registers and
68 * the buffer contents and then complete the task. 68 * the buffer contents and then complete the task.
69 */ 69 */
70static void uvesafb_cn_callback(struct cn_msg *msg) 70static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
71{ 71{
72 struct uvesafb_task *utask; 72 struct uvesafb_task *utask;
73 struct uvesafb_ktask *task; 73 struct uvesafb_ktask *task;
74 74
75 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
76 return;
77
75 if (msg->seq >= UVESAFB_TASKS_MAX) 78 if (msg->seq >= UVESAFB_TASKS_MAX)
76 return; 79 return;
77 80
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index df52cb355f7d..406caa6a71cb 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -24,19 +24,6 @@
24#include "../w1_int.h" 24#include "../w1_int.h"
25 25
26/** 26/**
27 * Address is selected using 2 pins, resulting in 4 possible addresses.
28 * 0x18, 0x19, 0x1a, 0x1b
29 * However, the chip cannot be detected without doing an i2c write,
30 * so use the force module parameter.
31 */
32static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
33
34/**
35 * Insmod parameters
36 */
37I2C_CLIENT_INSMOD_1(ds2482);
38
39/**
40 * The DS2482 registers - there are 3 registers that are addressed by a read 27 * The DS2482 registers - there are 3 registers that are addressed by a read
41 * pointer. The read pointer is set by the last command executed. 28 * pointer. The read pointer is set by the last command executed.
42 * 29 *
@@ -96,8 +83,6 @@ static const u8 ds2482_chan_rd[8] =
96 83
97static int ds2482_probe(struct i2c_client *client, 84static int ds2482_probe(struct i2c_client *client,
98 const struct i2c_device_id *id); 85 const struct i2c_device_id *id);
99static int ds2482_detect(struct i2c_client *client, int kind,
100 struct i2c_board_info *info);
101static int ds2482_remove(struct i2c_client *client); 86static int ds2482_remove(struct i2c_client *client);
102 87
103 88
@@ -117,8 +102,6 @@ static struct i2c_driver ds2482_driver = {
117 .probe = ds2482_probe, 102 .probe = ds2482_probe,
118 .remove = ds2482_remove, 103 .remove = ds2482_remove,
119 .id_table = ds2482_id, 104 .id_table = ds2482_id,
120 .detect = ds2482_detect,
121 .address_data = &addr_data,
122}; 105};
123 106
124/* 107/*
@@ -425,19 +408,6 @@ static u8 ds2482_w1_reset_bus(void *data)
425} 408}
426 409
427 410
428static int ds2482_detect(struct i2c_client *client, int kind,
429 struct i2c_board_info *info)
430{
431 if (!i2c_check_functionality(client->adapter,
432 I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
433 I2C_FUNC_SMBUS_BYTE))
434 return -ENODEV;
435
436 strlcpy(info->type, "ds2482", I2C_NAME_SIZE);
437
438 return 0;
439}
440
441static int ds2482_probe(struct i2c_client *client, 411static int ds2482_probe(struct i2c_client *client,
442 const struct i2c_device_id *id) 412 const struct i2c_device_id *id)
443{ 413{
@@ -446,6 +416,11 @@ static int ds2482_probe(struct i2c_client *client,
446 int temp1; 416 int temp1;
447 int idx; 417 int idx;
448 418
419 if (!i2c_check_functionality(client->adapter,
420 I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
421 I2C_FUNC_SMBUS_BYTE))
422 return -ENODEV;
423
449 if (!(data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL))) { 424 if (!(data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL))) {
450 err = -ENOMEM; 425 err = -ENOMEM;
451 goto exit; 426 goto exit;
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 52ccb3d3a963..45c126fea31d 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -306,7 +306,7 @@ static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rm
306 return error; 306 return error;
307} 307}
308 308
309static void w1_cn_callback(struct cn_msg *msg) 309static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
310{ 310{
311 struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1); 311 struct w1_netlink_msg *m = (struct w1_netlink_msg *)(msg + 1);
312 struct w1_netlink_cmd *cmd; 312 struct w1_netlink_cmd *cmd;
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
index a9592d981b10..6c4269b836b7 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenfs/xenbus.c
@@ -43,6 +43,7 @@
43#include <linux/fs.h> 43#include <linux/fs.h>
44#include <linux/poll.h> 44#include <linux/poll.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/sched.h>
46#include <linux/spinlock.h> 47#include <linux/spinlock.h>
47#include <linux/mount.h> 48#include <linux/mount.h>
48#include <linux/pagemap.h> 49#include <linux/pagemap.h>
diff --git a/firmware/Makefile b/firmware/Makefile
index 5ea80b19785b..a6c7c3e47e42 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -67,10 +67,13 @@ fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin
67fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \ 67fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \
68 e100/d102e_ucode.bin 68 e100/d102e_ucode.bin
69fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin 69fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin
70fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis cis/PCMLM28.cis 70fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis cis/PCMLM28.cis \
71 cis/DP83903.cis cis/NE2K.cis \
72 cis/tamarack.cis
71fw-shipped-$(CONFIG_PCMCIA_3C589) += cis/3CXEM556.cis 73fw-shipped-$(CONFIG_PCMCIA_3C589) += cis/3CXEM556.cis
72fw-shipped-$(CONFIG_PCMCIA_3C574) += cis/3CCFEM556.cis 74fw-shipped-$(CONFIG_PCMCIA_3C574) += cis/3CCFEM556.cis
73fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis 75fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis \
76 cis/COMpad2.cis cis/COMpad4.cis
74fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin 77fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin
75fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \ 78fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
76 advansys/3550.bin advansys/38C0800.bin 79 advansys/3550.bin advansys/38C0800.bin
diff --git a/firmware/WHENCE b/firmware/WHENCE
index 3f8c4f6bc43f..c437e14f0b11 100644
--- a/firmware/WHENCE
+++ b/firmware/WHENCE
@@ -597,6 +597,9 @@ Driver: PCMCIA_PCNET - NE2000 compatible PCMCIA adapter
597 597
598File: cis/LA-PCM.cis 598File: cis/LA-PCM.cis
599 cis/PCMLM28.cis 599 cis/PCMLM28.cis
600 cis/DP83903.cis
601 cis/NE2K.cis
602 cis/tamarack.cis
600 603
601Licence: GPL 604Licence: GPL
602 605
@@ -628,6 +631,8 @@ Driver: SERIAL_8250_CS - Serial PCMCIA adapter
628 631
629File: cis/MT5634ZLX.cis 632File: cis/MT5634ZLX.cis
630 cis/RS-COM-2P.cis 633 cis/RS-COM-2P.cis
634 cis/COMpad2.cis
635 cis/COMpad4.cis
631 636
632Licence: GPL 637Licence: GPL
633 638
diff --git a/firmware/cis/COMpad2.cis.ihex b/firmware/cis/COMpad2.cis.ihex
new file mode 100644
index 000000000000..1671c5e48caa
--- /dev/null
+++ b/firmware/cis/COMpad2.cis.ihex
@@ -0,0 +1,11 @@
1:1000000001030000FF151F0401414456414E5445B1
2:10001000434800434F4D7061642D33322F38350013
3:10002000312E300000FF210202011A0501050001F6
4:10003000031B0EC18118AA61E80207E8030730B864
5:100040009E1B08820108AA6030030F1B0883010869
6:10005000AA6040030F1B08840108AA6050030F1B0D
7:0D00600008850108AA6060030F1400FF006E
8:00000001FF
9#
10# Replacement CIS for Advantech COMpad-32/85
11#
diff --git a/firmware/cis/COMpad4.cis.ihex b/firmware/cis/COMpad4.cis.ihex
new file mode 100644
index 000000000000..27bbec1921b3
--- /dev/null
+++ b/firmware/cis/COMpad4.cis.ihex
@@ -0,0 +1,9 @@
1:1000000001030000FF151F0401414456414E5445B1
2:10001000434800434F4D7061642D33322F383542D1
3:100020002D34000000FF210202011A050102000127
4:10003000011B0BC18118AA6040021F30B89E1B082B
5:0C004000820108AA6040031F1400FF00AA
6:00000001FF
7#
8# Replacement CIS for Advantech COMpad-32/85B-4
9#
diff --git a/firmware/cis/DP83903.cis.ihex b/firmware/cis/DP83903.cis.ihex
new file mode 100644
index 000000000000..6d73ea3cf1b8
--- /dev/null
+++ b/firmware/cis/DP83903.cis.ihex
@@ -0,0 +1,14 @@
1:1000000001030000FF152904014D756C74696675C4
2:100010006E6374696F6E20436172640000004E531A
3:1000200043204D46204C414E2F4D6F64656D00FFBF
4:1000300020047501000021020000060B02004900A7
5:100040000000006A000000FF00130343495321022F
6:1000500006001A060517201077021B0C970179017C
7:10006000556530FFFF284000FF001303434953212B
8:100070000202001A060507401077021B09870119C2
9:0800800001552330FFFFFF00D2
10:00000001FF
11#
12# This CIS is for cards based on the National Semiconductor
13# DP83903 Multiple Function Interface Chip
14#
diff --git a/firmware/cis/NE2K.cis.ihex b/firmware/cis/NE2K.cis.ihex
new file mode 100644
index 000000000000..1bb40fc4759f
--- /dev/null
+++ b/firmware/cis/NE2K.cis.ihex
@@ -0,0 +1,8 @@
1:1000000001030000FF1515040150434D4349410011
2:1000100045746865726E6574000000FF2102060079
3:100020001A050120F803031B09E001190155653089
4:06003000FFFF1400FF00B9
5:00000001FF
6#
7# Replacement CIS for various busted NE2000-compatible cards
8#
diff --git a/firmware/cis/tamarack.cis.ihex b/firmware/cis/tamarack.cis.ihex
new file mode 100644
index 000000000000..1e86547fb361
--- /dev/null
+++ b/firmware/cis/tamarack.cis.ihex
@@ -0,0 +1,10 @@
1:100000000103D400FF17034100FF152404015441EC
2:100010004D415241434B0045746865726E657400F2
3:10002000410030303437343331313830303100FF33
4:10003000210206001A050120F803031B14E08119B0
5:100040003F554D5D06864626E551000F100F30FFE7
6:05005000FF1400FF0099
7:00000001FF
8#
9# Replacement CIS for Surecom, Tamarack NE2000 cards
10#
diff --git a/fs/afs/cache.h b/fs/afs/cache.h
deleted file mode 100644
index 5c4f6b499e90..000000000000
--- a/fs/afs/cache.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/* AFS local cache management interface
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/fscache.h>
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 106be66dafd2..6ece2a13bf71 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -18,10 +18,10 @@
18#include <linux/key.h> 18#include <linux/key.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/fscache.h>
21 22
22#include "afs.h" 23#include "afs.h"
23#include "afs_vl.h" 24#include "afs_vl.h"
24#include "cache.h"
25 25
26#define AFS_CELL_MAX_ADDRS 15 26#define AFS_CELL_MAX_ADDRS 15
27 27
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index d11c51fc2a3f..2ca7a7cafdbf 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -8,8 +8,10 @@
8 * 8 *
9 */ 9 */
10 10
11#include <linux/cred.h>
11#include <linux/file.h> 12#include <linux/file.h>
12#include <linux/poll.h> 13#include <linux/poll.h>
14#include <linux/sched.h>
13#include <linux/slab.h> 15#include <linux/slab.h>
14#include <linux/init.h> 16#include <linux/init.h>
15#include <linux/fs.h> 17#include <linux/fs.h>
diff --git a/fs/bio.c b/fs/bio.c
index 76738005c8e8..402cb84a92a1 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -249,6 +249,7 @@ void bio_free(struct bio *bio, struct bio_set *bs)
249 249
250 mempool_free(p, bs->bio_pool); 250 mempool_free(p, bs->bio_pool);
251} 251}
252EXPORT_SYMBOL(bio_free);
252 253
253void bio_init(struct bio *bio) 254void bio_init(struct bio *bio)
254{ 255{
@@ -257,6 +258,7 @@ void bio_init(struct bio *bio)
257 bio->bi_comp_cpu = -1; 258 bio->bi_comp_cpu = -1;
258 atomic_set(&bio->bi_cnt, 1); 259 atomic_set(&bio->bi_cnt, 1);
259} 260}
261EXPORT_SYMBOL(bio_init);
260 262
261/** 263/**
262 * bio_alloc_bioset - allocate a bio for I/O 264 * bio_alloc_bioset - allocate a bio for I/O
@@ -311,6 +313,7 @@ err_free:
311 mempool_free(p, bs->bio_pool); 313 mempool_free(p, bs->bio_pool);
312 return NULL; 314 return NULL;
313} 315}
316EXPORT_SYMBOL(bio_alloc_bioset);
314 317
315static void bio_fs_destructor(struct bio *bio) 318static void bio_fs_destructor(struct bio *bio)
316{ 319{
@@ -337,6 +340,7 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
337 340
338 return bio; 341 return bio;
339} 342}
343EXPORT_SYMBOL(bio_alloc);
340 344
341static void bio_kmalloc_destructor(struct bio *bio) 345static void bio_kmalloc_destructor(struct bio *bio)
342{ 346{
@@ -380,6 +384,7 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
380 384
381 return bio; 385 return bio;
382} 386}
387EXPORT_SYMBOL(bio_kmalloc);
383 388
384void zero_fill_bio(struct bio *bio) 389void zero_fill_bio(struct bio *bio)
385{ 390{
@@ -416,6 +421,7 @@ void bio_put(struct bio *bio)
416 bio->bi_destructor(bio); 421 bio->bi_destructor(bio);
417 } 422 }
418} 423}
424EXPORT_SYMBOL(bio_put);
419 425
420inline int bio_phys_segments(struct request_queue *q, struct bio *bio) 426inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
421{ 427{
@@ -424,6 +430,7 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
424 430
425 return bio->bi_phys_segments; 431 return bio->bi_phys_segments;
426} 432}
433EXPORT_SYMBOL(bio_phys_segments);
427 434
428/** 435/**
429 * __bio_clone - clone a bio 436 * __bio_clone - clone a bio
@@ -451,6 +458,7 @@ void __bio_clone(struct bio *bio, struct bio *bio_src)
451 bio->bi_size = bio_src->bi_size; 458 bio->bi_size = bio_src->bi_size;
452 bio->bi_idx = bio_src->bi_idx; 459 bio->bi_idx = bio_src->bi_idx;
453} 460}
461EXPORT_SYMBOL(__bio_clone);
454 462
455/** 463/**
456 * bio_clone - clone a bio 464 * bio_clone - clone a bio
@@ -482,6 +490,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
482 490
483 return b; 491 return b;
484} 492}
493EXPORT_SYMBOL(bio_clone);
485 494
486/** 495/**
487 * bio_get_nr_vecs - return approx number of vecs 496 * bio_get_nr_vecs - return approx number of vecs
@@ -505,6 +514,7 @@ int bio_get_nr_vecs(struct block_device *bdev)
505 514
506 return nr_pages; 515 return nr_pages;
507} 516}
517EXPORT_SYMBOL(bio_get_nr_vecs);
508 518
509static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page 519static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
510 *page, unsigned int len, unsigned int offset, 520 *page, unsigned int len, unsigned int offset,
@@ -635,6 +645,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
635 return __bio_add_page(q, bio, page, len, offset, 645 return __bio_add_page(q, bio, page, len, offset,
636 queue_max_hw_sectors(q)); 646 queue_max_hw_sectors(q));
637} 647}
648EXPORT_SYMBOL(bio_add_pc_page);
638 649
639/** 650/**
640 * bio_add_page - attempt to add page to bio 651 * bio_add_page - attempt to add page to bio
@@ -655,6 +666,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
655 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 666 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
656 return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); 667 return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
657} 668}
669EXPORT_SYMBOL(bio_add_page);
658 670
659struct bio_map_data { 671struct bio_map_data {
660 struct bio_vec *iovecs; 672 struct bio_vec *iovecs;
@@ -776,6 +788,7 @@ int bio_uncopy_user(struct bio *bio)
776 bio_put(bio); 788 bio_put(bio);
777 return ret; 789 return ret;
778} 790}
791EXPORT_SYMBOL(bio_uncopy_user);
779 792
780/** 793/**
781 * bio_copy_user_iov - copy user data to bio 794 * bio_copy_user_iov - copy user data to bio
@@ -920,6 +933,7 @@ struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
920 933
921 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask); 934 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
922} 935}
936EXPORT_SYMBOL(bio_copy_user);
923 937
924static struct bio *__bio_map_user_iov(struct request_queue *q, 938static struct bio *__bio_map_user_iov(struct request_queue *q,
925 struct block_device *bdev, 939 struct block_device *bdev,
@@ -1050,6 +1064,7 @@ struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
1050 1064
1051 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask); 1065 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
1052} 1066}
1067EXPORT_SYMBOL(bio_map_user);
1053 1068
1054/** 1069/**
1055 * bio_map_user_iov - map user sg_iovec table into bio 1070 * bio_map_user_iov - map user sg_iovec table into bio
@@ -1117,13 +1132,13 @@ void bio_unmap_user(struct bio *bio)
1117 __bio_unmap_user(bio); 1132 __bio_unmap_user(bio);
1118 bio_put(bio); 1133 bio_put(bio);
1119} 1134}
1135EXPORT_SYMBOL(bio_unmap_user);
1120 1136
1121static void bio_map_kern_endio(struct bio *bio, int err) 1137static void bio_map_kern_endio(struct bio *bio, int err)
1122{ 1138{
1123 bio_put(bio); 1139 bio_put(bio);
1124} 1140}
1125 1141
1126
1127static struct bio *__bio_map_kern(struct request_queue *q, void *data, 1142static struct bio *__bio_map_kern(struct request_queue *q, void *data,
1128 unsigned int len, gfp_t gfp_mask) 1143 unsigned int len, gfp_t gfp_mask)
1129{ 1144{
@@ -1189,6 +1204,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1189 bio_put(bio); 1204 bio_put(bio);
1190 return ERR_PTR(-EINVAL); 1205 return ERR_PTR(-EINVAL);
1191} 1206}
1207EXPORT_SYMBOL(bio_map_kern);
1192 1208
1193static void bio_copy_kern_endio(struct bio *bio, int err) 1209static void bio_copy_kern_endio(struct bio *bio, int err)
1194{ 1210{
@@ -1250,6 +1266,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1250 1266
1251 return bio; 1267 return bio;
1252} 1268}
1269EXPORT_SYMBOL(bio_copy_kern);
1253 1270
1254/* 1271/*
1255 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 1272 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
@@ -1400,6 +1417,7 @@ void bio_endio(struct bio *bio, int error)
1400 if (bio->bi_end_io) 1417 if (bio->bi_end_io)
1401 bio->bi_end_io(bio, error); 1418 bio->bi_end_io(bio, error);
1402} 1419}
1420EXPORT_SYMBOL(bio_endio);
1403 1421
1404void bio_pair_release(struct bio_pair *bp) 1422void bio_pair_release(struct bio_pair *bp)
1405{ 1423{
@@ -1410,6 +1428,7 @@ void bio_pair_release(struct bio_pair *bp)
1410 mempool_free(bp, bp->bio2.bi_private); 1428 mempool_free(bp, bp->bio2.bi_private);
1411 } 1429 }
1412} 1430}
1431EXPORT_SYMBOL(bio_pair_release);
1413 1432
1414static void bio_pair_end_1(struct bio *bi, int err) 1433static void bio_pair_end_1(struct bio *bi, int err)
1415{ 1434{
@@ -1477,6 +1496,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
1477 1496
1478 return bp; 1497 return bp;
1479} 1498}
1499EXPORT_SYMBOL(bio_split);
1480 1500
1481/** 1501/**
1482 * bio_sector_offset - Find hardware sector offset in bio 1502 * bio_sector_offset - Find hardware sector offset in bio
@@ -1547,6 +1567,7 @@ void bioset_free(struct bio_set *bs)
1547 1567
1548 kfree(bs); 1568 kfree(bs);
1549} 1569}
1570EXPORT_SYMBOL(bioset_free);
1550 1571
1551/** 1572/**
1552 * bioset_create - Create a bio_set 1573 * bioset_create - Create a bio_set
@@ -1592,6 +1613,7 @@ bad:
1592 bioset_free(bs); 1613 bioset_free(bs);
1593 return NULL; 1614 return NULL;
1594} 1615}
1616EXPORT_SYMBOL(bioset_create);
1595 1617
1596static void __init biovec_init_slabs(void) 1618static void __init biovec_init_slabs(void)
1597{ 1619{
@@ -1636,29 +1658,4 @@ static int __init init_bio(void)
1636 1658
1637 return 0; 1659 return 0;
1638} 1660}
1639
1640subsys_initcall(init_bio); 1661subsys_initcall(init_bio);
1641
1642EXPORT_SYMBOL(bio_alloc);
1643EXPORT_SYMBOL(bio_kmalloc);
1644EXPORT_SYMBOL(bio_put);
1645EXPORT_SYMBOL(bio_free);
1646EXPORT_SYMBOL(bio_endio);
1647EXPORT_SYMBOL(bio_init);
1648EXPORT_SYMBOL(__bio_clone);
1649EXPORT_SYMBOL(bio_clone);
1650EXPORT_SYMBOL(bio_phys_segments);
1651EXPORT_SYMBOL(bio_add_page);
1652EXPORT_SYMBOL(bio_add_pc_page);
1653EXPORT_SYMBOL(bio_get_nr_vecs);
1654EXPORT_SYMBOL(bio_map_user);
1655EXPORT_SYMBOL(bio_unmap_user);
1656EXPORT_SYMBOL(bio_map_kern);
1657EXPORT_SYMBOL(bio_copy_kern);
1658EXPORT_SYMBOL(bio_pair_release);
1659EXPORT_SYMBOL(bio_split);
1660EXPORT_SYMBOL(bio_copy_user);
1661EXPORT_SYMBOL(bio_uncopy_user);
1662EXPORT_SYMBOL(bioset_create);
1663EXPORT_SYMBOL(bioset_free);
1664EXPORT_SYMBOL(bio_alloc_bioset);
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index f128427b995b..69b355ae7f49 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -27,7 +27,7 @@
27#include "btrfs_inode.h" 27#include "btrfs_inode.h"
28#include "xattr.h" 28#include "xattr.h"
29 29
30#ifdef CONFIG_FS_POSIX_ACL 30#ifdef CONFIG_BTRFS_POSIX_ACL
31 31
32static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) 32static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
33{ 33{
@@ -313,7 +313,7 @@ struct xattr_handler btrfs_xattr_acl_access_handler = {
313 .set = btrfs_xattr_acl_access_set, 313 .set = btrfs_xattr_acl_access_set,
314}; 314};
315 315
316#else /* CONFIG_FS_POSIX_ACL */ 316#else /* CONFIG_BTRFS_POSIX_ACL */
317 317
318int btrfs_acl_chmod(struct inode *inode) 318int btrfs_acl_chmod(struct inode *inode)
319{ 319{
@@ -325,4 +325,4 @@ int btrfs_init_acl(struct inode *inode, struct inode *dir)
325 return 0; 325 return 0;
326} 326}
327 327
328#endif /* CONFIG_FS_POSIX_ACL */ 328#endif /* CONFIG_BTRFS_POSIX_ACL */
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 82ee56bba299..a54d354cefcb 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -128,6 +128,14 @@ struct btrfs_inode {
128 u64 last_unlink_trans; 128 u64 last_unlink_trans;
129 129
130 /* 130 /*
131 * These two counters are for delalloc metadata reservations. We keep
132 * track of how many extents we've accounted for vs how many extents we
133 * have.
134 */
135 int delalloc_reserved_extents;
136 int delalloc_extents;
137
138 /*
131 * ordered_data_close is set by truncate when a file that used 139 * ordered_data_close is set by truncate when a file that used
132 * to have good data has been truncated to zero. When it is set 140 * to have good data has been truncated to zero. When it is set
133 * the btrfs file release call will add this inode to the 141 * the btrfs file release call will add this inode to the
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 80599b4e42bd..dd8ced9814c4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -675,18 +675,19 @@ struct btrfs_space_info {
675 current allocations */ 675 current allocations */
676 u64 bytes_readonly; /* total bytes that are read only */ 676 u64 bytes_readonly; /* total bytes that are read only */
677 u64 bytes_super; /* total bytes reserved for the super blocks */ 677 u64 bytes_super; /* total bytes reserved for the super blocks */
678 678 u64 bytes_root; /* the number of bytes needed to commit a
679 /* delalloc accounting */ 679 transaction */
680 u64 bytes_delalloc; /* number of bytes reserved for allocation,
681 this space is not necessarily reserved yet
682 by the allocator */
683 u64 bytes_may_use; /* number of bytes that may be used for 680 u64 bytes_may_use; /* number of bytes that may be used for
684 delalloc */ 681 delalloc/allocations */
682 u64 bytes_delalloc; /* number of bytes currently reserved for
683 delayed allocation */
685 684
686 int full; /* indicates that we cannot allocate any more 685 int full; /* indicates that we cannot allocate any more
687 chunks for this space */ 686 chunks for this space */
688 int force_alloc; /* set if we need to force a chunk alloc for 687 int force_alloc; /* set if we need to force a chunk alloc for
689 this space */ 688 this space */
689 int force_delalloc; /* make people start doing filemap_flush until
690 we're under a threshold */
690 691
691 struct list_head list; 692 struct list_head list;
692 693
@@ -695,6 +696,9 @@ struct btrfs_space_info {
695 spinlock_t lock; 696 spinlock_t lock;
696 struct rw_semaphore groups_sem; 697 struct rw_semaphore groups_sem;
697 atomic_t caching_threads; 698 atomic_t caching_threads;
699
700 int allocating_chunk;
701 wait_queue_head_t wait;
698}; 702};
699 703
700/* 704/*
@@ -2022,7 +2026,12 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
2022void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde); 2026void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde);
2023void btrfs_clear_space_info_full(struct btrfs_fs_info *info); 2027void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
2024 2028
2025int btrfs_check_metadata_free_space(struct btrfs_root *root); 2029int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items);
2030int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items);
2031int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
2032 struct inode *inode, int num_items);
2033int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root,
2034 struct inode *inode, int num_items);
2026int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode, 2035int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
2027 u64 bytes); 2036 u64 bytes);
2028void btrfs_free_reserved_data_space(struct btrfs_root *root, 2037void btrfs_free_reserved_data_space(struct btrfs_root *root,
@@ -2326,7 +2335,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync);
2326int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 2335int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
2327 int skip_pinned); 2336 int skip_pinned);
2328int btrfs_check_file(struct btrfs_root *root, struct inode *inode); 2337int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
2329extern struct file_operations btrfs_file_operations; 2338extern const struct file_operations btrfs_file_operations;
2330int btrfs_drop_extents(struct btrfs_trans_handle *trans, 2339int btrfs_drop_extents(struct btrfs_trans_handle *trans,
2331 struct btrfs_root *root, struct inode *inode, 2340 struct btrfs_root *root, struct inode *inode,
2332 u64 start, u64 end, u64 locked_end, 2341 u64 start, u64 end, u64 locked_end,
@@ -2357,7 +2366,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options);
2357int btrfs_sync_fs(struct super_block *sb, int wait); 2366int btrfs_sync_fs(struct super_block *sb, int wait);
2358 2367
2359/* acl.c */ 2368/* acl.c */
2360#ifdef CONFIG_FS_POSIX_ACL 2369#ifdef CONFIG_BTRFS_POSIX_ACL
2361int btrfs_check_acl(struct inode *inode, int mask); 2370int btrfs_check_acl(struct inode *inode, int mask);
2362#else 2371#else
2363#define btrfs_check_acl NULL 2372#define btrfs_check_acl NULL
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 644e796fd643..af0435f79fa6 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -822,14 +822,14 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
822 822
823int btrfs_write_tree_block(struct extent_buffer *buf) 823int btrfs_write_tree_block(struct extent_buffer *buf)
824{ 824{
825 return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start, 825 return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
826 buf->start + buf->len - 1, WB_SYNC_ALL); 826 buf->start + buf->len - 1);
827} 827}
828 828
829int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) 829int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
830{ 830{
831 return btrfs_wait_on_page_writeback_range(buf->first_page->mapping, 831 return filemap_fdatawait_range(buf->first_page->mapping,
832 buf->start, buf->start + buf->len - 1); 832 buf->start, buf->start + buf->len - 1);
833} 833}
834 834
835struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, 835struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
@@ -1630,7 +1630,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1630 fs_info->sb = sb; 1630 fs_info->sb = sb;
1631 fs_info->max_extent = (u64)-1; 1631 fs_info->max_extent = (u64)-1;
1632 fs_info->max_inline = 8192 * 1024; 1632 fs_info->max_inline = 8192 * 1024;
1633 fs_info->metadata_ratio = 8; 1633 fs_info->metadata_ratio = 0;
1634 1634
1635 fs_info->thread_pool_size = min_t(unsigned long, 1635 fs_info->thread_pool_size = min_t(unsigned long,
1636 num_online_cpus() + 2, 8); 1636 num_online_cpus() + 2, 8);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 993f93ff7ba6..359a754c782c 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -68,6 +68,8 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans,
68 struct extent_buffer **must_clean); 68 struct extent_buffer **must_clean);
69static int find_next_key(struct btrfs_path *path, int level, 69static int find_next_key(struct btrfs_path *path, int level,
70 struct btrfs_key *key); 70 struct btrfs_key *key);
71static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
72 int dump_block_groups);
71 73
72static noinline int 74static noinline int
73block_group_cache_done(struct btrfs_block_group_cache *cache) 75block_group_cache_done(struct btrfs_block_group_cache *cache)
@@ -2765,67 +2767,346 @@ void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2765 alloc_target); 2767 alloc_target);
2766} 2768}
2767 2769
2770static u64 calculate_bytes_needed(struct btrfs_root *root, int num_items)
2771{
2772 u64 num_bytes;
2773 int level;
2774
2775 level = BTRFS_MAX_LEVEL - 2;
2776 /*
2777 * NOTE: these calculations are absolutely the worst possible case.
2778 * This assumes that _every_ item we insert will require a new leaf, and
2779 * that the tree has grown to its maximum level size.
2780 */
2781
2782 /*
2783 * for every item we insert we could insert both an extent item and a
2784 * extent ref item. Then for ever item we insert, we will need to cow
2785 * both the original leaf, plus the leaf to the left and right of it.
2786 *
2787 * Unless we are talking about the extent root, then we just want the
2788 * number of items * 2, since we just need the extent item plus its ref.
2789 */
2790 if (root == root->fs_info->extent_root)
2791 num_bytes = num_items * 2;
2792 else
2793 num_bytes = (num_items + (2 * num_items)) * 3;
2794
2795 /*
2796 * num_bytes is total number of leaves we could need times the leaf
2797 * size, and then for every leaf we could end up cow'ing 2 nodes per
2798 * level, down to the leaf level.
2799 */
2800 num_bytes = (num_bytes * root->leafsize) +
2801 (num_bytes * (level * 2)) * root->nodesize;
2802
2803 return num_bytes;
2804}
2805
2768/* 2806/*
2769 * for now this just makes sure we have at least 5% of our metadata space free 2807 * Unreserve metadata space for delalloc. If we have less reserved credits than
2770 * for use. 2808 * we have extents, this function does nothing.
2771 */ 2809 */
2772int btrfs_check_metadata_free_space(struct btrfs_root *root) 2810int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
2811 struct inode *inode, int num_items)
2773{ 2812{
2774 struct btrfs_fs_info *info = root->fs_info; 2813 struct btrfs_fs_info *info = root->fs_info;
2775 struct btrfs_space_info *meta_sinfo; 2814 struct btrfs_space_info *meta_sinfo;
2776 u64 alloc_target, thresh; 2815 u64 num_bytes;
2777 int committed = 0, ret; 2816 u64 alloc_target;
2817 bool bug = false;
2778 2818
2779 /* get the space info for where the metadata will live */ 2819 /* get the space info for where the metadata will live */
2780 alloc_target = btrfs_get_alloc_profile(root, 0); 2820 alloc_target = btrfs_get_alloc_profile(root, 0);
2781 meta_sinfo = __find_space_info(info, alloc_target); 2821 meta_sinfo = __find_space_info(info, alloc_target);
2782 if (!meta_sinfo)
2783 goto alloc;
2784 2822
2785again: 2823 num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
2824 num_items);
2825
2786 spin_lock(&meta_sinfo->lock); 2826 spin_lock(&meta_sinfo->lock);
2787 if (!meta_sinfo->full) 2827 if (BTRFS_I(inode)->delalloc_reserved_extents <=
2788 thresh = meta_sinfo->total_bytes * 80; 2828 BTRFS_I(inode)->delalloc_extents) {
2789 else 2829 spin_unlock(&meta_sinfo->lock);
2790 thresh = meta_sinfo->total_bytes * 95; 2830 return 0;
2831 }
2832
2833 BTRFS_I(inode)->delalloc_reserved_extents--;
2834 BUG_ON(BTRFS_I(inode)->delalloc_reserved_extents < 0);
2835
2836 if (meta_sinfo->bytes_delalloc < num_bytes) {
2837 bug = true;
2838 meta_sinfo->bytes_delalloc = 0;
2839 } else {
2840 meta_sinfo->bytes_delalloc -= num_bytes;
2841 }
2842 spin_unlock(&meta_sinfo->lock);
2791 2843
2844 BUG_ON(bug);
2845
2846 return 0;
2847}
2848
2849static void check_force_delalloc(struct btrfs_space_info *meta_sinfo)
2850{
2851 u64 thresh;
2852
2853 thresh = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2854 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
2855 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
2856 meta_sinfo->bytes_may_use;
2857
2858 thresh = meta_sinfo->total_bytes - thresh;
2859 thresh *= 80;
2792 do_div(thresh, 100); 2860 do_div(thresh, 100);
2861 if (thresh <= meta_sinfo->bytes_delalloc)
2862 meta_sinfo->force_delalloc = 1;
2863 else
2864 meta_sinfo->force_delalloc = 0;
2865}
2793 2866
2794 if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved + 2867static int maybe_allocate_chunk(struct btrfs_root *root,
2795 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly + 2868 struct btrfs_space_info *info)
2796 meta_sinfo->bytes_super > thresh) { 2869{
2797 struct btrfs_trans_handle *trans; 2870 struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
2798 if (!meta_sinfo->full) { 2871 struct btrfs_trans_handle *trans;
2799 meta_sinfo->force_alloc = 1; 2872 bool wait = false;
2873 int ret = 0;
2874 u64 min_metadata;
2875 u64 free_space;
2876
2877 free_space = btrfs_super_total_bytes(disk_super);
2878 /*
2879 * we allow the metadata to grow to a max of either 5gb or 5% of the
2880 * space in the volume.
2881 */
2882 min_metadata = min((u64)5 * 1024 * 1024 * 1024,
2883 div64_u64(free_space * 5, 100));
2884 if (info->total_bytes >= min_metadata) {
2885 spin_unlock(&info->lock);
2886 return 0;
2887 }
2888
2889 if (info->full) {
2890 spin_unlock(&info->lock);
2891 return 0;
2892 }
2893
2894 if (!info->allocating_chunk) {
2895 info->force_alloc = 1;
2896 info->allocating_chunk = 1;
2897 init_waitqueue_head(&info->wait);
2898 } else {
2899 wait = true;
2900 }
2901
2902 spin_unlock(&info->lock);
2903
2904 if (wait) {
2905 wait_event(info->wait,
2906 !info->allocating_chunk);
2907 return 1;
2908 }
2909
2910 trans = btrfs_start_transaction(root, 1);
2911 if (!trans) {
2912 ret = -ENOMEM;
2913 goto out;
2914 }
2915
2916 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2917 4096 + 2 * 1024 * 1024,
2918 info->flags, 0);
2919 btrfs_end_transaction(trans, root);
2920 if (ret)
2921 goto out;
2922out:
2923 spin_lock(&info->lock);
2924 info->allocating_chunk = 0;
2925 spin_unlock(&info->lock);
2926 wake_up(&info->wait);
2927
2928 if (ret)
2929 return 0;
2930 return 1;
2931}
2932
2933/*
2934 * Reserve metadata space for delalloc.
2935 */
2936int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root,
2937 struct inode *inode, int num_items)
2938{
2939 struct btrfs_fs_info *info = root->fs_info;
2940 struct btrfs_space_info *meta_sinfo;
2941 u64 num_bytes;
2942 u64 used;
2943 u64 alloc_target;
2944 int flushed = 0;
2945 int force_delalloc;
2946
2947 /* get the space info for where the metadata will live */
2948 alloc_target = btrfs_get_alloc_profile(root, 0);
2949 meta_sinfo = __find_space_info(info, alloc_target);
2950
2951 num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
2952 num_items);
2953again:
2954 spin_lock(&meta_sinfo->lock);
2955
2956 force_delalloc = meta_sinfo->force_delalloc;
2957
2958 if (unlikely(!meta_sinfo->bytes_root))
2959 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
2960
2961 if (!flushed)
2962 meta_sinfo->bytes_delalloc += num_bytes;
2963
2964 used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2965 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
2966 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
2967 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
2968
2969 if (used > meta_sinfo->total_bytes) {
2970 flushed++;
2971
2972 if (flushed == 1) {
2973 if (maybe_allocate_chunk(root, meta_sinfo))
2974 goto again;
2975 flushed++;
2976 } else {
2800 spin_unlock(&meta_sinfo->lock); 2977 spin_unlock(&meta_sinfo->lock);
2801alloc: 2978 }
2802 trans = btrfs_start_transaction(root, 1);
2803 if (!trans)
2804 return -ENOMEM;
2805 2979
2806 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 2980 if (flushed == 2) {
2807 2 * 1024 * 1024, alloc_target, 0); 2981 filemap_flush(inode->i_mapping);
2808 btrfs_end_transaction(trans, root); 2982 goto again;
2809 if (!meta_sinfo) { 2983 } else if (flushed == 3) {
2810 meta_sinfo = __find_space_info(info, 2984 btrfs_start_delalloc_inodes(root);
2811 alloc_target); 2985 btrfs_wait_ordered_extents(root, 0);
2812 }
2813 goto again; 2986 goto again;
2814 } 2987 }
2988 spin_lock(&meta_sinfo->lock);
2989 meta_sinfo->bytes_delalloc -= num_bytes;
2815 spin_unlock(&meta_sinfo->lock); 2990 spin_unlock(&meta_sinfo->lock);
2991 printk(KERN_ERR "enospc, has %d, reserved %d\n",
2992 BTRFS_I(inode)->delalloc_extents,
2993 BTRFS_I(inode)->delalloc_reserved_extents);
2994 dump_space_info(meta_sinfo, 0, 0);
2995 return -ENOSPC;
2996 }
2816 2997
2817 if (!committed) { 2998 BTRFS_I(inode)->delalloc_reserved_extents++;
2818 committed = 1; 2999 check_force_delalloc(meta_sinfo);
2819 trans = btrfs_join_transaction(root, 1); 3000 spin_unlock(&meta_sinfo->lock);
2820 if (!trans) 3001
2821 return -ENOMEM; 3002 if (!flushed && force_delalloc)
2822 ret = btrfs_commit_transaction(trans, root); 3003 filemap_flush(inode->i_mapping);
2823 if (ret) 3004
2824 return ret; 3005 return 0;
3006}
3007
3008/*
3009 * unreserve num_items number of items worth of metadata space. This needs to
3010 * be paired with btrfs_reserve_metadata_space.
3011 *
3012 * NOTE: if you have the option, run this _AFTER_ you do a
3013 * btrfs_end_transaction, since btrfs_end_transaction will run delayed ref
3014 * oprations which will result in more used metadata, so we want to make sure we
3015 * can do that without issue.
3016 */
3017int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items)
3018{
3019 struct btrfs_fs_info *info = root->fs_info;
3020 struct btrfs_space_info *meta_sinfo;
3021 u64 num_bytes;
3022 u64 alloc_target;
3023 bool bug = false;
3024
3025 /* get the space info for where the metadata will live */
3026 alloc_target = btrfs_get_alloc_profile(root, 0);
3027 meta_sinfo = __find_space_info(info, alloc_target);
3028
3029 num_bytes = calculate_bytes_needed(root, num_items);
3030
3031 spin_lock(&meta_sinfo->lock);
3032 if (meta_sinfo->bytes_may_use < num_bytes) {
3033 bug = true;
3034 meta_sinfo->bytes_may_use = 0;
3035 } else {
3036 meta_sinfo->bytes_may_use -= num_bytes;
3037 }
3038 spin_unlock(&meta_sinfo->lock);
3039
3040 BUG_ON(bug);
3041
3042 return 0;
3043}
3044
3045/*
3046 * Reserve some metadata space for use. We'll calculate the worste case number
3047 * of bytes that would be needed to modify num_items number of items. If we
3048 * have space, fantastic, if not, you get -ENOSPC. Please call
3049 * btrfs_unreserve_metadata_space when you are done for the _SAME_ number of
3050 * items you reserved, since whatever metadata you needed should have already
3051 * been allocated.
3052 *
3053 * This will commit the transaction to make more space if we don't have enough
3054 * metadata space. THe only time we don't do this is if we're reserving space
3055 * inside of a transaction, then we will just return -ENOSPC and it is the
3056 * callers responsibility to handle it properly.
3057 */
3058int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items)
3059{
3060 struct btrfs_fs_info *info = root->fs_info;
3061 struct btrfs_space_info *meta_sinfo;
3062 u64 num_bytes;
3063 u64 used;
3064 u64 alloc_target;
3065 int retries = 0;
3066
3067 /* get the space info for where the metadata will live */
3068 alloc_target = btrfs_get_alloc_profile(root, 0);
3069 meta_sinfo = __find_space_info(info, alloc_target);
3070
3071 num_bytes = calculate_bytes_needed(root, num_items);
3072again:
3073 spin_lock(&meta_sinfo->lock);
3074
3075 if (unlikely(!meta_sinfo->bytes_root))
3076 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
3077
3078 if (!retries)
3079 meta_sinfo->bytes_may_use += num_bytes;
3080
3081 used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
3082 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
3083 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
3084 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
3085
3086 if (used > meta_sinfo->total_bytes) {
3087 retries++;
3088 if (retries == 1) {
3089 if (maybe_allocate_chunk(root, meta_sinfo))
3090 goto again;
3091 retries++;
3092 } else {
3093 spin_unlock(&meta_sinfo->lock);
3094 }
3095
3096 if (retries == 2) {
3097 btrfs_start_delalloc_inodes(root);
3098 btrfs_wait_ordered_extents(root, 0);
2825 goto again; 3099 goto again;
2826 } 3100 }
3101 spin_lock(&meta_sinfo->lock);
3102 meta_sinfo->bytes_may_use -= num_bytes;
3103 spin_unlock(&meta_sinfo->lock);
3104
3105 dump_space_info(meta_sinfo, 0, 0);
2827 return -ENOSPC; 3106 return -ENOSPC;
2828 } 3107 }
3108
3109 check_force_delalloc(meta_sinfo);
2829 spin_unlock(&meta_sinfo->lock); 3110 spin_unlock(&meta_sinfo->lock);
2830 3111
2831 return 0; 3112 return 0;
@@ -2888,7 +3169,7 @@ alloc:
2888 spin_unlock(&data_sinfo->lock); 3169 spin_unlock(&data_sinfo->lock);
2889 3170
2890 /* commit the current transaction and try again */ 3171 /* commit the current transaction and try again */
2891 if (!committed) { 3172 if (!committed && !root->fs_info->open_ioctl_trans) {
2892 committed = 1; 3173 committed = 1;
2893 trans = btrfs_join_transaction(root, 1); 3174 trans = btrfs_join_transaction(root, 1);
2894 if (!trans) 3175 if (!trans)
@@ -2916,7 +3197,7 @@ alloc:
2916 BTRFS_I(inode)->reserved_bytes += bytes; 3197 BTRFS_I(inode)->reserved_bytes += bytes;
2917 spin_unlock(&data_sinfo->lock); 3198 spin_unlock(&data_sinfo->lock);
2918 3199
2919 return btrfs_check_metadata_free_space(root); 3200 return 0;
2920} 3201}
2921 3202
2922/* 3203/*
@@ -3015,17 +3296,15 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3015 BUG_ON(!space_info); 3296 BUG_ON(!space_info);
3016 3297
3017 spin_lock(&space_info->lock); 3298 spin_lock(&space_info->lock);
3018 if (space_info->force_alloc) { 3299 if (space_info->force_alloc)
3019 force = 1; 3300 force = 1;
3020 space_info->force_alloc = 0;
3021 }
3022 if (space_info->full) { 3301 if (space_info->full) {
3023 spin_unlock(&space_info->lock); 3302 spin_unlock(&space_info->lock);
3024 goto out; 3303 goto out;
3025 } 3304 }
3026 3305
3027 thresh = space_info->total_bytes - space_info->bytes_readonly; 3306 thresh = space_info->total_bytes - space_info->bytes_readonly;
3028 thresh = div_factor(thresh, 6); 3307 thresh = div_factor(thresh, 8);
3029 if (!force && 3308 if (!force &&
3030 (space_info->bytes_used + space_info->bytes_pinned + 3309 (space_info->bytes_used + space_info->bytes_pinned +
3031 space_info->bytes_reserved + alloc_bytes) < thresh) { 3310 space_info->bytes_reserved + alloc_bytes) < thresh) {
@@ -3039,7 +3318,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3039 * we keep a reasonable number of metadata chunks allocated in the 3318 * we keep a reasonable number of metadata chunks allocated in the
3040 * FS as well. 3319 * FS as well.
3041 */ 3320 */
3042 if (flags & BTRFS_BLOCK_GROUP_DATA) { 3321 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3043 fs_info->data_chunk_allocations++; 3322 fs_info->data_chunk_allocations++;
3044 if (!(fs_info->data_chunk_allocations % 3323 if (!(fs_info->data_chunk_allocations %
3045 fs_info->metadata_ratio)) 3324 fs_info->metadata_ratio))
@@ -3047,8 +3326,11 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3047 } 3326 }
3048 3327
3049 ret = btrfs_alloc_chunk(trans, extent_root, flags); 3328 ret = btrfs_alloc_chunk(trans, extent_root, flags);
3329 spin_lock(&space_info->lock);
3050 if (ret) 3330 if (ret)
3051 space_info->full = 1; 3331 space_info->full = 1;
3332 space_info->force_alloc = 0;
3333 spin_unlock(&space_info->lock);
3052out: 3334out:
3053 mutex_unlock(&extent_root->fs_info->chunk_mutex); 3335 mutex_unlock(&extent_root->fs_info->chunk_mutex);
3054 return ret; 3336 return ret;
@@ -4063,21 +4345,32 @@ loop:
4063 return ret; 4345 return ret;
4064} 4346}
4065 4347
4066static void dump_space_info(struct btrfs_space_info *info, u64 bytes) 4348static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
4349 int dump_block_groups)
4067{ 4350{
4068 struct btrfs_block_group_cache *cache; 4351 struct btrfs_block_group_cache *cache;
4069 4352
4353 spin_lock(&info->lock);
4070 printk(KERN_INFO "space_info has %llu free, is %sfull\n", 4354 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
4071 (unsigned long long)(info->total_bytes - info->bytes_used - 4355 (unsigned long long)(info->total_bytes - info->bytes_used -
4072 info->bytes_pinned - info->bytes_reserved), 4356 info->bytes_pinned - info->bytes_reserved -
4357 info->bytes_super),
4073 (info->full) ? "" : "not "); 4358 (info->full) ? "" : "not ");
4074 printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu," 4359 printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
4075 " may_use=%llu, used=%llu\n", 4360 " may_use=%llu, used=%llu, root=%llu, super=%llu, reserved=%llu"
4361 "\n",
4076 (unsigned long long)info->total_bytes, 4362 (unsigned long long)info->total_bytes,
4077 (unsigned long long)info->bytes_pinned, 4363 (unsigned long long)info->bytes_pinned,
4078 (unsigned long long)info->bytes_delalloc, 4364 (unsigned long long)info->bytes_delalloc,
4079 (unsigned long long)info->bytes_may_use, 4365 (unsigned long long)info->bytes_may_use,
4080 (unsigned long long)info->bytes_used); 4366 (unsigned long long)info->bytes_used,
4367 (unsigned long long)info->bytes_root,
4368 (unsigned long long)info->bytes_super,
4369 (unsigned long long)info->bytes_reserved);
4370 spin_unlock(&info->lock);
4371
4372 if (!dump_block_groups)
4373 return;
4081 4374
4082 down_read(&info->groups_sem); 4375 down_read(&info->groups_sem);
4083 list_for_each_entry(cache, &info->block_groups, list) { 4376 list_for_each_entry(cache, &info->block_groups, list) {
@@ -4145,7 +4438,7 @@ again:
4145 printk(KERN_ERR "btrfs allocation failed flags %llu, " 4438 printk(KERN_ERR "btrfs allocation failed flags %llu, "
4146 "wanted %llu\n", (unsigned long long)data, 4439 "wanted %llu\n", (unsigned long long)data,
4147 (unsigned long long)num_bytes); 4440 (unsigned long long)num_bytes);
4148 dump_space_info(sinfo, num_bytes); 4441 dump_space_info(sinfo, num_bytes, 1);
4149 } 4442 }
4150 4443
4151 return ret; 4444 return ret;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 0cb88f8146ea..de1793ba004a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -280,6 +280,14 @@ static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
280 return NULL; 280 return NULL;
281} 281}
282 282
283static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
284 struct extent_state *other)
285{
286 if (tree->ops && tree->ops->merge_extent_hook)
287 tree->ops->merge_extent_hook(tree->mapping->host, new,
288 other);
289}
290
283/* 291/*
284 * utility function to look for merge candidates inside a given range. 292 * utility function to look for merge candidates inside a given range.
285 * Any extents with matching state are merged together into a single 293 * Any extents with matching state are merged together into a single
@@ -303,6 +311,7 @@ static int merge_state(struct extent_io_tree *tree,
303 other = rb_entry(other_node, struct extent_state, rb_node); 311 other = rb_entry(other_node, struct extent_state, rb_node);
304 if (other->end == state->start - 1 && 312 if (other->end == state->start - 1 &&
305 other->state == state->state) { 313 other->state == state->state) {
314 merge_cb(tree, state, other);
306 state->start = other->start; 315 state->start = other->start;
307 other->tree = NULL; 316 other->tree = NULL;
308 rb_erase(&other->rb_node, &tree->state); 317 rb_erase(&other->rb_node, &tree->state);
@@ -314,33 +323,37 @@ static int merge_state(struct extent_io_tree *tree,
314 other = rb_entry(other_node, struct extent_state, rb_node); 323 other = rb_entry(other_node, struct extent_state, rb_node);
315 if (other->start == state->end + 1 && 324 if (other->start == state->end + 1 &&
316 other->state == state->state) { 325 other->state == state->state) {
326 merge_cb(tree, state, other);
317 other->start = state->start; 327 other->start = state->start;
318 state->tree = NULL; 328 state->tree = NULL;
319 rb_erase(&state->rb_node, &tree->state); 329 rb_erase(&state->rb_node, &tree->state);
320 free_extent_state(state); 330 free_extent_state(state);
331 state = NULL;
321 } 332 }
322 } 333 }
334
323 return 0; 335 return 0;
324} 336}
325 337
326static void set_state_cb(struct extent_io_tree *tree, 338static int set_state_cb(struct extent_io_tree *tree,
327 struct extent_state *state, 339 struct extent_state *state,
328 unsigned long bits) 340 unsigned long bits)
329{ 341{
330 if (tree->ops && tree->ops->set_bit_hook) { 342 if (tree->ops && tree->ops->set_bit_hook) {
331 tree->ops->set_bit_hook(tree->mapping->host, state->start, 343 return tree->ops->set_bit_hook(tree->mapping->host,
332 state->end, state->state, bits); 344 state->start, state->end,
345 state->state, bits);
333 } 346 }
347
348 return 0;
334} 349}
335 350
336static void clear_state_cb(struct extent_io_tree *tree, 351static void clear_state_cb(struct extent_io_tree *tree,
337 struct extent_state *state, 352 struct extent_state *state,
338 unsigned long bits) 353 unsigned long bits)
339{ 354{
340 if (tree->ops && tree->ops->clear_bit_hook) { 355 if (tree->ops && tree->ops->clear_bit_hook)
341 tree->ops->clear_bit_hook(tree->mapping->host, state->start, 356 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
342 state->end, state->state, bits);
343 }
344} 357}
345 358
346/* 359/*
@@ -358,6 +371,7 @@ static int insert_state(struct extent_io_tree *tree,
358 int bits) 371 int bits)
359{ 372{
360 struct rb_node *node; 373 struct rb_node *node;
374 int ret;
361 375
362 if (end < start) { 376 if (end < start) {
363 printk(KERN_ERR "btrfs end < start %llu %llu\n", 377 printk(KERN_ERR "btrfs end < start %llu %llu\n",
@@ -365,11 +379,14 @@ static int insert_state(struct extent_io_tree *tree,
365 (unsigned long long)start); 379 (unsigned long long)start);
366 WARN_ON(1); 380 WARN_ON(1);
367 } 381 }
368 if (bits & EXTENT_DIRTY)
369 tree->dirty_bytes += end - start + 1;
370 state->start = start; 382 state->start = start;
371 state->end = end; 383 state->end = end;
372 set_state_cb(tree, state, bits); 384 ret = set_state_cb(tree, state, bits);
385 if (ret)
386 return ret;
387
388 if (bits & EXTENT_DIRTY)
389 tree->dirty_bytes += end - start + 1;
373 state->state |= bits; 390 state->state |= bits;
374 node = tree_insert(&tree->state, end, &state->rb_node); 391 node = tree_insert(&tree->state, end, &state->rb_node);
375 if (node) { 392 if (node) {
@@ -387,6 +404,15 @@ static int insert_state(struct extent_io_tree *tree,
387 return 0; 404 return 0;
388} 405}
389 406
407static int split_cb(struct extent_io_tree *tree, struct extent_state *orig,
408 u64 split)
409{
410 if (tree->ops && tree->ops->split_extent_hook)
411 return tree->ops->split_extent_hook(tree->mapping->host,
412 orig, split);
413 return 0;
414}
415
390/* 416/*
391 * split a given extent state struct in two, inserting the preallocated 417 * split a given extent state struct in two, inserting the preallocated
392 * struct 'prealloc' as the newly created second half. 'split' indicates an 418 * struct 'prealloc' as the newly created second half. 'split' indicates an
@@ -405,6 +431,9 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
405 struct extent_state *prealloc, u64 split) 431 struct extent_state *prealloc, u64 split)
406{ 432{
407 struct rb_node *node; 433 struct rb_node *node;
434
435 split_cb(tree, orig, split);
436
408 prealloc->start = orig->start; 437 prealloc->start = orig->start;
409 prealloc->end = split - 1; 438 prealloc->end = split - 1;
410 prealloc->state = orig->state; 439 prealloc->state = orig->state;
@@ -542,8 +571,8 @@ hit_next:
542 if (err) 571 if (err)
543 goto out; 572 goto out;
544 if (state->end <= end) { 573 if (state->end <= end) {
545 set |= clear_state_bit(tree, state, bits, 574 set |= clear_state_bit(tree, state, bits, wake,
546 wake, delete); 575 delete);
547 if (last_end == (u64)-1) 576 if (last_end == (u64)-1)
548 goto out; 577 goto out;
549 start = last_end + 1; 578 start = last_end + 1;
@@ -561,12 +590,11 @@ hit_next:
561 prealloc = alloc_extent_state(GFP_ATOMIC); 590 prealloc = alloc_extent_state(GFP_ATOMIC);
562 err = split_state(tree, state, prealloc, end + 1); 591 err = split_state(tree, state, prealloc, end + 1);
563 BUG_ON(err == -EEXIST); 592 BUG_ON(err == -EEXIST);
564
565 if (wake) 593 if (wake)
566 wake_up(&state->wq); 594 wake_up(&state->wq);
567 595
568 set |= clear_state_bit(tree, prealloc, bits, 596 set |= clear_state_bit(tree, prealloc, bits, wake, delete);
569 wake, delete); 597
570 prealloc = NULL; 598 prealloc = NULL;
571 goto out; 599 goto out;
572 } 600 }
@@ -667,16 +695,23 @@ out:
667 return 0; 695 return 0;
668} 696}
669 697
670static void set_state_bits(struct extent_io_tree *tree, 698static int set_state_bits(struct extent_io_tree *tree,
671 struct extent_state *state, 699 struct extent_state *state,
672 int bits) 700 int bits)
673{ 701{
702 int ret;
703
704 ret = set_state_cb(tree, state, bits);
705 if (ret)
706 return ret;
707
674 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { 708 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
675 u64 range = state->end - state->start + 1; 709 u64 range = state->end - state->start + 1;
676 tree->dirty_bytes += range; 710 tree->dirty_bytes += range;
677 } 711 }
678 set_state_cb(tree, state, bits);
679 state->state |= bits; 712 state->state |= bits;
713
714 return 0;
680} 715}
681 716
682static void cache_state(struct extent_state *state, 717static void cache_state(struct extent_state *state,
@@ -758,7 +793,10 @@ hit_next:
758 goto out; 793 goto out;
759 } 794 }
760 795
761 set_state_bits(tree, state, bits); 796 err = set_state_bits(tree, state, bits);
797 if (err)
798 goto out;
799
762 cache_state(state, cached_state); 800 cache_state(state, cached_state);
763 merge_state(tree, state); 801 merge_state(tree, state);
764 if (last_end == (u64)-1) 802 if (last_end == (u64)-1)
@@ -805,7 +843,9 @@ hit_next:
805 if (err) 843 if (err)
806 goto out; 844 goto out;
807 if (state->end <= end) { 845 if (state->end <= end) {
808 set_state_bits(tree, state, bits); 846 err = set_state_bits(tree, state, bits);
847 if (err)
848 goto out;
809 cache_state(state, cached_state); 849 cache_state(state, cached_state);
810 merge_state(tree, state); 850 merge_state(tree, state);
811 if (last_end == (u64)-1) 851 if (last_end == (u64)-1)
@@ -829,11 +869,13 @@ hit_next:
829 this_end = last_start - 1; 869 this_end = last_start - 1;
830 err = insert_state(tree, prealloc, start, this_end, 870 err = insert_state(tree, prealloc, start, this_end,
831 bits); 871 bits);
832 cache_state(prealloc, cached_state);
833 prealloc = NULL;
834 BUG_ON(err == -EEXIST); 872 BUG_ON(err == -EEXIST);
835 if (err) 873 if (err) {
874 prealloc = NULL;
836 goto out; 875 goto out;
876 }
877 cache_state(prealloc, cached_state);
878 prealloc = NULL;
837 start = this_end + 1; 879 start = this_end + 1;
838 goto search_again; 880 goto search_again;
839 } 881 }
@@ -852,7 +894,11 @@ hit_next:
852 err = split_state(tree, state, prealloc, end + 1); 894 err = split_state(tree, state, prealloc, end + 1);
853 BUG_ON(err == -EEXIST); 895 BUG_ON(err == -EEXIST);
854 896
855 set_state_bits(tree, prealloc, bits); 897 err = set_state_bits(tree, prealloc, bits);
898 if (err) {
899 prealloc = NULL;
900 goto out;
901 }
856 cache_state(prealloc, cached_state); 902 cache_state(prealloc, cached_state);
857 merge_state(tree, prealloc); 903 merge_state(tree, prealloc);
858 prealloc = NULL; 904 prealloc = NULL;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 14ed16fd862d..4794ec891fed 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -60,8 +60,13 @@ struct extent_io_ops {
60 struct extent_state *state, int uptodate); 60 struct extent_state *state, int uptodate);
61 int (*set_bit_hook)(struct inode *inode, u64 start, u64 end, 61 int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
62 unsigned long old, unsigned long bits); 62 unsigned long old, unsigned long bits);
63 int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end, 63 int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
64 unsigned long old, unsigned long bits); 64 unsigned long bits);
65 int (*merge_extent_hook)(struct inode *inode,
66 struct extent_state *new,
67 struct extent_state *other);
68 int (*split_extent_hook)(struct inode *inode,
69 struct extent_state *orig, u64 split);
65 int (*write_cache_pages_lock_hook)(struct page *page); 70 int (*write_cache_pages_lock_hook)(struct page *page);
66}; 71};
67 72
@@ -79,10 +84,14 @@ struct extent_state {
79 u64 start; 84 u64 start;
80 u64 end; /* inclusive */ 85 u64 end; /* inclusive */
81 struct rb_node rb_node; 86 struct rb_node rb_node;
87
88 /* ADD NEW ELEMENTS AFTER THIS */
82 struct extent_io_tree *tree; 89 struct extent_io_tree *tree;
83 wait_queue_head_t wq; 90 wait_queue_head_t wq;
84 atomic_t refs; 91 atomic_t refs;
85 unsigned long state; 92 unsigned long state;
93 u64 split_start;
94 u64 split_end;
86 95
87 /* for use by the FS */ 96 /* for use by the FS */
88 u64 private; 97 u64 private;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a3492a3ad96b..f19e1259a971 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -123,7 +123,10 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
123 root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 123 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
124 124
125 end_of_last_block = start_pos + num_bytes - 1; 125 end_of_last_block = start_pos + num_bytes - 1;
126 btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); 126 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
127 if (err)
128 return err;
129
127 for (i = 0; i < num_pages; i++) { 130 for (i = 0; i < num_pages; i++) {
128 struct page *p = pages[i]; 131 struct page *p = pages[i];
129 SetPageUptodate(p); 132 SetPageUptodate(p);
@@ -917,21 +920,35 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
917 start_pos = pos; 920 start_pos = pos;
918 921
919 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 922 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
923
924 /* do the reserve before the mutex lock in case we have to do some
925 * flushing. We wouldn't deadlock, but this is more polite.
926 */
927 err = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
928 if (err)
929 goto out_nolock;
930
931 mutex_lock(&inode->i_mutex);
932
920 current->backing_dev_info = inode->i_mapping->backing_dev_info; 933 current->backing_dev_info = inode->i_mapping->backing_dev_info;
921 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 934 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
922 if (err) 935 if (err)
923 goto out_nolock; 936 goto out;
937
924 if (count == 0) 938 if (count == 0)
925 goto out_nolock; 939 goto out;
926 940
927 err = file_remove_suid(file); 941 err = file_remove_suid(file);
928 if (err) 942 if (err)
929 goto out_nolock; 943 goto out;
944
930 file_update_time(file); 945 file_update_time(file);
931 946
932 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 947 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
933 948
934 mutex_lock(&inode->i_mutex); 949 /* generic_write_checks can change our pos */
950 start_pos = pos;
951
935 BTRFS_I(inode)->sequence++; 952 BTRFS_I(inode)->sequence++;
936 first_index = pos >> PAGE_CACHE_SHIFT; 953 first_index = pos >> PAGE_CACHE_SHIFT;
937 last_index = (pos + count) >> PAGE_CACHE_SHIFT; 954 last_index = (pos + count) >> PAGE_CACHE_SHIFT;
@@ -1005,9 +1022,8 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
1005 } 1022 }
1006 1023
1007 if (will_write) { 1024 if (will_write) {
1008 btrfs_fdatawrite_range(inode->i_mapping, pos, 1025 filemap_fdatawrite_range(inode->i_mapping, pos,
1009 pos + write_bytes - 1, 1026 pos + write_bytes - 1);
1010 WB_SYNC_ALL);
1011 } else { 1027 } else {
1012 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1028 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1013 num_pages); 1029 num_pages);
@@ -1028,6 +1044,7 @@ out:
1028 mutex_unlock(&inode->i_mutex); 1044 mutex_unlock(&inode->i_mutex);
1029 if (ret) 1045 if (ret)
1030 err = ret; 1046 err = ret;
1047 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1031 1048
1032out_nolock: 1049out_nolock:
1033 kfree(pages); 1050 kfree(pages);
@@ -1196,7 +1213,7 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1196 return 0; 1213 return 0;
1197} 1214}
1198 1215
1199struct file_operations btrfs_file_operations = { 1216const struct file_operations btrfs_file_operations = {
1200 .llseek = generic_file_llseek, 1217 .llseek = generic_file_llseek,
1201 .read = do_sync_read, 1218 .read = do_sync_read,
1202 .aio_read = generic_file_aio_read, 1219 .aio_read = generic_file_aio_read,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e9b76bcd1c12..112e5aa85892 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -62,7 +62,7 @@ static const struct inode_operations btrfs_special_inode_operations;
62static const struct inode_operations btrfs_file_inode_operations; 62static const struct inode_operations btrfs_file_inode_operations;
63static const struct address_space_operations btrfs_aops; 63static const struct address_space_operations btrfs_aops;
64static const struct address_space_operations btrfs_symlink_aops; 64static const struct address_space_operations btrfs_symlink_aops;
65static struct file_operations btrfs_dir_file_operations; 65static const struct file_operations btrfs_dir_file_operations;
66static struct extent_io_ops btrfs_extent_io_ops; 66static struct extent_io_ops btrfs_extent_io_ops;
67 67
68static struct kmem_cache *btrfs_inode_cachep; 68static struct kmem_cache *btrfs_inode_cachep;
@@ -1159,6 +1159,83 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1159 return ret; 1159 return ret;
1160} 1160}
1161 1161
1162static int btrfs_split_extent_hook(struct inode *inode,
1163 struct extent_state *orig, u64 split)
1164{
1165 struct btrfs_root *root = BTRFS_I(inode)->root;
1166 u64 size;
1167
1168 if (!(orig->state & EXTENT_DELALLOC))
1169 return 0;
1170
1171 size = orig->end - orig->start + 1;
1172 if (size > root->fs_info->max_extent) {
1173 u64 num_extents;
1174 u64 new_size;
1175
1176 new_size = orig->end - split + 1;
1177 num_extents = div64_u64(size + root->fs_info->max_extent - 1,
1178 root->fs_info->max_extent);
1179
1180 /*
1181 * if we break a large extent up then leave delalloc_extents be,
1182 * since we've already accounted for the large extent.
1183 */
1184 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1185 root->fs_info->max_extent) < num_extents)
1186 return 0;
1187 }
1188
1189 BTRFS_I(inode)->delalloc_extents++;
1190
1191 return 0;
1192}
1193
1194/*
1195 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1196 * extents so we can keep track of new extents that are just merged onto old
1197 * extents, such as when we are doing sequential writes, so we can properly
1198 * account for the metadata space we'll need.
1199 */
1200static int btrfs_merge_extent_hook(struct inode *inode,
1201 struct extent_state *new,
1202 struct extent_state *other)
1203{
1204 struct btrfs_root *root = BTRFS_I(inode)->root;
1205 u64 new_size, old_size;
1206 u64 num_extents;
1207
1208 /* not delalloc, ignore it */
1209 if (!(other->state & EXTENT_DELALLOC))
1210 return 0;
1211
1212 old_size = other->end - other->start + 1;
1213 if (new->start < other->start)
1214 new_size = other->end - new->start + 1;
1215 else
1216 new_size = new->end - other->start + 1;
1217
1218 /* we're not bigger than the max, unreserve the space and go */
1219 if (new_size <= root->fs_info->max_extent) {
1220 BTRFS_I(inode)->delalloc_extents--;
1221 return 0;
1222 }
1223
1224 /*
1225 * If we grew by another max_extent, just return, we want to keep that
1226 * reserved amount.
1227 */
1228 num_extents = div64_u64(old_size + root->fs_info->max_extent - 1,
1229 root->fs_info->max_extent);
1230 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1231 root->fs_info->max_extent) > num_extents)
1232 return 0;
1233
1234 BTRFS_I(inode)->delalloc_extents--;
1235
1236 return 0;
1237}
1238
1162/* 1239/*
1163 * extent_io.c set_bit_hook, used to track delayed allocation 1240 * extent_io.c set_bit_hook, used to track delayed allocation
1164 * bytes in this file, and to maintain the list of inodes that 1241 * bytes in this file, and to maintain the list of inodes that
@@ -1167,6 +1244,7 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1167static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, 1244static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1168 unsigned long old, unsigned long bits) 1245 unsigned long old, unsigned long bits)
1169{ 1246{
1247
1170 /* 1248 /*
1171 * set_bit and clear bit hooks normally require _irqsave/restore 1249 * set_bit and clear bit hooks normally require _irqsave/restore
1172 * but in this case, we are only testeing for the DELALLOC 1250 * but in this case, we are only testeing for the DELALLOC
@@ -1174,6 +1252,8 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1174 */ 1252 */
1175 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 1253 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1176 struct btrfs_root *root = BTRFS_I(inode)->root; 1254 struct btrfs_root *root = BTRFS_I(inode)->root;
1255
1256 BTRFS_I(inode)->delalloc_extents++;
1177 btrfs_delalloc_reserve_space(root, inode, end - start + 1); 1257 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1178 spin_lock(&root->fs_info->delalloc_lock); 1258 spin_lock(&root->fs_info->delalloc_lock);
1179 BTRFS_I(inode)->delalloc_bytes += end - start + 1; 1259 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
@@ -1190,22 +1270,27 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1190/* 1270/*
1191 * extent_io.c clear_bit_hook, see set_bit_hook for why 1271 * extent_io.c clear_bit_hook, see set_bit_hook for why
1192 */ 1272 */
1193static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, 1273static int btrfs_clear_bit_hook(struct inode *inode,
1194 unsigned long old, unsigned long bits) 1274 struct extent_state *state, unsigned long bits)
1195{ 1275{
1196 /* 1276 /*
1197 * set_bit and clear bit hooks normally require _irqsave/restore 1277 * set_bit and clear bit hooks normally require _irqsave/restore
1198 * but in this case, we are only testeing for the DELALLOC 1278 * but in this case, we are only testeing for the DELALLOC
1199 * bit, which is only set or cleared with irqs on 1279 * bit, which is only set or cleared with irqs on
1200 */ 1280 */
1201 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 1281 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1202 struct btrfs_root *root = BTRFS_I(inode)->root; 1282 struct btrfs_root *root = BTRFS_I(inode)->root;
1203 1283
1284 BTRFS_I(inode)->delalloc_extents--;
1285 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1286
1204 spin_lock(&root->fs_info->delalloc_lock); 1287 spin_lock(&root->fs_info->delalloc_lock);
1205 if (end - start + 1 > root->fs_info->delalloc_bytes) { 1288 if (state->end - state->start + 1 >
1289 root->fs_info->delalloc_bytes) {
1206 printk(KERN_INFO "btrfs warning: delalloc account " 1290 printk(KERN_INFO "btrfs warning: delalloc account "
1207 "%llu %llu\n", 1291 "%llu %llu\n",
1208 (unsigned long long)end - start + 1, 1292 (unsigned long long)
1293 state->end - state->start + 1,
1209 (unsigned long long) 1294 (unsigned long long)
1210 root->fs_info->delalloc_bytes); 1295 root->fs_info->delalloc_bytes);
1211 btrfs_delalloc_free_space(root, inode, (u64)-1); 1296 btrfs_delalloc_free_space(root, inode, (u64)-1);
@@ -1213,9 +1298,12 @@ static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1213 BTRFS_I(inode)->delalloc_bytes = 0; 1298 BTRFS_I(inode)->delalloc_bytes = 0;
1214 } else { 1299 } else {
1215 btrfs_delalloc_free_space(root, inode, 1300 btrfs_delalloc_free_space(root, inode,
1216 end - start + 1); 1301 state->end -
1217 root->fs_info->delalloc_bytes -= end - start + 1; 1302 state->start + 1);
1218 BTRFS_I(inode)->delalloc_bytes -= end - start + 1; 1303 root->fs_info->delalloc_bytes -= state->end -
1304 state->start + 1;
1305 BTRFS_I(inode)->delalloc_bytes -= state->end -
1306 state->start + 1;
1219 } 1307 }
1220 if (BTRFS_I(inode)->delalloc_bytes == 0 && 1308 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1221 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1309 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
@@ -2950,7 +3038,12 @@ again:
2950 goto again; 3038 goto again;
2951 } 3039 }
2952 3040
2953 btrfs_set_extent_delalloc(inode, page_start, page_end); 3041 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
3042 if (ret) {
3043 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3044 goto out_unlock;
3045 }
3046
2954 ret = 0; 3047 ret = 0;
2955 if (offset != PAGE_CACHE_SIZE) { 3048 if (offset != PAGE_CACHE_SIZE) {
2956 kaddr = kmap(page); 3049 kaddr = kmap(page);
@@ -2981,15 +3074,11 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
2981 u64 last_byte; 3074 u64 last_byte;
2982 u64 cur_offset; 3075 u64 cur_offset;
2983 u64 hole_size; 3076 u64 hole_size;
2984 int err; 3077 int err = 0;
2985 3078
2986 if (size <= hole_start) 3079 if (size <= hole_start)
2987 return 0; 3080 return 0;
2988 3081
2989 err = btrfs_check_metadata_free_space(root);
2990 if (err)
2991 return err;
2992
2993 btrfs_truncate_page(inode->i_mapping, inode->i_size); 3082 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2994 3083
2995 while (1) { 3084 while (1) {
@@ -3024,12 +3113,18 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3024 cur_offset, &hint_byte, 1); 3113 cur_offset, &hint_byte, 1);
3025 if (err) 3114 if (err)
3026 break; 3115 break;
3116
3117 err = btrfs_reserve_metadata_space(root, 1);
3118 if (err)
3119 break;
3120
3027 err = btrfs_insert_file_extent(trans, root, 3121 err = btrfs_insert_file_extent(trans, root,
3028 inode->i_ino, cur_offset, 0, 3122 inode->i_ino, cur_offset, 0,
3029 0, hole_size, 0, hole_size, 3123 0, hole_size, 0, hole_size,
3030 0, 0, 0); 3124 0, 0, 0);
3031 btrfs_drop_extent_cache(inode, hole_start, 3125 btrfs_drop_extent_cache(inode, hole_start,
3032 last_byte - 1, 0); 3126 last_byte - 1, 0);
3127 btrfs_unreserve_metadata_space(root, 1);
3033 } 3128 }
3034 free_extent_map(em); 3129 free_extent_map(em);
3035 cur_offset = last_byte; 3130 cur_offset = last_byte;
@@ -3990,11 +4085,18 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3990 if (!new_valid_dev(rdev)) 4085 if (!new_valid_dev(rdev))
3991 return -EINVAL; 4086 return -EINVAL;
3992 4087
3993 err = btrfs_check_metadata_free_space(root); 4088 /*
4089 * 2 for inode item and ref
4090 * 2 for dir items
4091 * 1 for xattr if selinux is on
4092 */
4093 err = btrfs_reserve_metadata_space(root, 5);
3994 if (err) 4094 if (err)
3995 goto fail; 4095 return err;
3996 4096
3997 trans = btrfs_start_transaction(root, 1); 4097 trans = btrfs_start_transaction(root, 1);
4098 if (!trans)
4099 goto fail;
3998 btrfs_set_trans_block_group(trans, dir); 4100 btrfs_set_trans_block_group(trans, dir);
3999 4101
4000 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); 4102 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
@@ -4032,6 +4134,7 @@ out_unlock:
4032 nr = trans->blocks_used; 4134 nr = trans->blocks_used;
4033 btrfs_end_transaction_throttle(trans, root); 4135 btrfs_end_transaction_throttle(trans, root);
4034fail: 4136fail:
4137 btrfs_unreserve_metadata_space(root, 5);
4035 if (drop_inode) { 4138 if (drop_inode) {
4036 inode_dec_link_count(inode); 4139 inode_dec_link_count(inode);
4037 iput(inode); 4140 iput(inode);
@@ -4052,10 +4155,18 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4052 u64 objectid; 4155 u64 objectid;
4053 u64 index = 0; 4156 u64 index = 0;
4054 4157
4055 err = btrfs_check_metadata_free_space(root); 4158 /*
4159 * 2 for inode item and ref
4160 * 2 for dir items
4161 * 1 for xattr if selinux is on
4162 */
4163 err = btrfs_reserve_metadata_space(root, 5);
4056 if (err) 4164 if (err)
4057 goto fail; 4165 return err;
4166
4058 trans = btrfs_start_transaction(root, 1); 4167 trans = btrfs_start_transaction(root, 1);
4168 if (!trans)
4169 goto fail;
4059 btrfs_set_trans_block_group(trans, dir); 4170 btrfs_set_trans_block_group(trans, dir);
4060 4171
4061 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); 4172 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
@@ -4096,6 +4207,7 @@ out_unlock:
4096 nr = trans->blocks_used; 4207 nr = trans->blocks_used;
4097 btrfs_end_transaction_throttle(trans, root); 4208 btrfs_end_transaction_throttle(trans, root);
4098fail: 4209fail:
4210 btrfs_unreserve_metadata_space(root, 5);
4099 if (drop_inode) { 4211 if (drop_inode) {
4100 inode_dec_link_count(inode); 4212 inode_dec_link_count(inode);
4101 iput(inode); 4213 iput(inode);
@@ -4118,10 +4230,16 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4118 if (inode->i_nlink == 0) 4230 if (inode->i_nlink == 0)
4119 return -ENOENT; 4231 return -ENOENT;
4120 4232
4121 btrfs_inc_nlink(inode); 4233 /*
4122 err = btrfs_check_metadata_free_space(root); 4234 * 1 item for inode ref
4235 * 2 items for dir items
4236 */
4237 err = btrfs_reserve_metadata_space(root, 3);
4123 if (err) 4238 if (err)
4124 goto fail; 4239 return err;
4240
4241 btrfs_inc_nlink(inode);
4242
4125 err = btrfs_set_inode_index(dir, &index); 4243 err = btrfs_set_inode_index(dir, &index);
4126 if (err) 4244 if (err)
4127 goto fail; 4245 goto fail;
@@ -4145,6 +4263,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4145 nr = trans->blocks_used; 4263 nr = trans->blocks_used;
4146 btrfs_end_transaction_throttle(trans, root); 4264 btrfs_end_transaction_throttle(trans, root);
4147fail: 4265fail:
4266 btrfs_unreserve_metadata_space(root, 3);
4148 if (drop_inode) { 4267 if (drop_inode) {
4149 inode_dec_link_count(inode); 4268 inode_dec_link_count(inode);
4150 iput(inode); 4269 iput(inode);
@@ -4164,17 +4283,21 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4164 u64 index = 0; 4283 u64 index = 0;
4165 unsigned long nr = 1; 4284 unsigned long nr = 1;
4166 4285
4167 err = btrfs_check_metadata_free_space(root); 4286 /*
4287 * 2 items for inode and ref
4288 * 2 items for dir items
4289 * 1 for xattr if selinux is on
4290 */
4291 err = btrfs_reserve_metadata_space(root, 5);
4168 if (err) 4292 if (err)
4169 goto out_unlock; 4293 return err;
4170 4294
4171 trans = btrfs_start_transaction(root, 1); 4295 trans = btrfs_start_transaction(root, 1);
4172 btrfs_set_trans_block_group(trans, dir); 4296 if (!trans) {
4173 4297 err = -ENOMEM;
4174 if (IS_ERR(trans)) {
4175 err = PTR_ERR(trans);
4176 goto out_unlock; 4298 goto out_unlock;
4177 } 4299 }
4300 btrfs_set_trans_block_group(trans, dir);
4178 4301
4179 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); 4302 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4180 if (err) { 4303 if (err) {
@@ -4223,6 +4346,7 @@ out_fail:
4223 btrfs_end_transaction_throttle(trans, root); 4346 btrfs_end_transaction_throttle(trans, root);
4224 4347
4225out_unlock: 4348out_unlock:
4349 btrfs_unreserve_metadata_space(root, 5);
4226 if (drop_on_err) 4350 if (drop_on_err)
4227 iput(inode); 4351 iput(inode);
4228 btrfs_btree_balance_dirty(root, nr); 4352 btrfs_btree_balance_dirty(root, nr);
@@ -4747,6 +4871,13 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4747 goto out; 4871 goto out;
4748 } 4872 }
4749 4873
4874 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
4875 if (ret) {
4876 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4877 ret = VM_FAULT_SIGBUS;
4878 goto out;
4879 }
4880
4750 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 4881 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
4751again: 4882again:
4752 lock_page(page); 4883 lock_page(page);
@@ -4778,7 +4909,23 @@ again:
4778 goto again; 4909 goto again;
4779 } 4910 }
4780 4911
4781 btrfs_set_extent_delalloc(inode, page_start, page_end); 4912 /*
4913 * XXX - page_mkwrite gets called every time the page is dirtied, even
4914 * if it was already dirty, so for space accounting reasons we need to
4915 * clear any delalloc bits for the range we are fixing to save. There
4916 * is probably a better way to do this, but for now keep consistent with
4917 * prepare_pages in the normal write path.
4918 */
4919 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
4920 EXTENT_DIRTY | EXTENT_DELALLOC, GFP_NOFS);
4921
4922 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
4923 if (ret) {
4924 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4925 ret = VM_FAULT_SIGBUS;
4926 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4927 goto out_unlock;
4928 }
4782 ret = 0; 4929 ret = 0;
4783 4930
4784 /* page is wholly or partially inside EOF */ 4931 /* page is wholly or partially inside EOF */
@@ -4801,6 +4948,7 @@ again:
4801 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 4948 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4802 4949
4803out_unlock: 4950out_unlock:
4951 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
4804 if (!ret) 4952 if (!ret)
4805 return VM_FAULT_LOCKED; 4953 return VM_FAULT_LOCKED;
4806 unlock_page(page); 4954 unlock_page(page);
@@ -4917,6 +5065,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
4917 return NULL; 5065 return NULL;
4918 ei->last_trans = 0; 5066 ei->last_trans = 0;
4919 ei->logged_trans = 0; 5067 ei->logged_trans = 0;
5068 ei->delalloc_extents = 0;
5069 ei->delalloc_reserved_extents = 0;
4920 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 5070 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4921 INIT_LIST_HEAD(&ei->i_orphan); 5071 INIT_LIST_HEAD(&ei->i_orphan);
4922 INIT_LIST_HEAD(&ei->ordered_operations); 5072 INIT_LIST_HEAD(&ei->ordered_operations);
@@ -5070,7 +5220,12 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5070 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 5220 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
5071 return -ENOTEMPTY; 5221 return -ENOTEMPTY;
5072 5222
5073 ret = btrfs_check_metadata_free_space(root); 5223 /*
5224 * 2 items for dir items
5225 * 1 item for orphan entry
5226 * 1 item for ref
5227 */
5228 ret = btrfs_reserve_metadata_space(root, 4);
5074 if (ret) 5229 if (ret)
5075 return ret; 5230 return ret;
5076 5231
@@ -5185,6 +5340,8 @@ out_fail:
5185 5340
5186 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 5341 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5187 up_read(&root->fs_info->subvol_sem); 5342 up_read(&root->fs_info->subvol_sem);
5343
5344 btrfs_unreserve_metadata_space(root, 4);
5188 return ret; 5345 return ret;
5189} 5346}
5190 5347
@@ -5256,11 +5413,18 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
5256 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) 5413 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
5257 return -ENAMETOOLONG; 5414 return -ENAMETOOLONG;
5258 5415
5259 err = btrfs_check_metadata_free_space(root); 5416 /*
5417 * 2 items for inode item and ref
5418 * 2 items for dir items
5419 * 1 item for xattr if selinux is on
5420 */
5421 err = btrfs_reserve_metadata_space(root, 5);
5260 if (err) 5422 if (err)
5261 goto out_fail; 5423 return err;
5262 5424
5263 trans = btrfs_start_transaction(root, 1); 5425 trans = btrfs_start_transaction(root, 1);
5426 if (!trans)
5427 goto out_fail;
5264 btrfs_set_trans_block_group(trans, dir); 5428 btrfs_set_trans_block_group(trans, dir);
5265 5429
5266 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); 5430 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
@@ -5341,6 +5505,7 @@ out_unlock:
5341 nr = trans->blocks_used; 5505 nr = trans->blocks_used;
5342 btrfs_end_transaction_throttle(trans, root); 5506 btrfs_end_transaction_throttle(trans, root);
5343out_fail: 5507out_fail:
5508 btrfs_unreserve_metadata_space(root, 5);
5344 if (drop_inode) { 5509 if (drop_inode) {
5345 inode_dec_link_count(inode); 5510 inode_dec_link_count(inode);
5346 iput(inode); 5511 iput(inode);
@@ -5362,6 +5527,11 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans,
5362 5527
5363 while (num_bytes > 0) { 5528 while (num_bytes > 0) {
5364 alloc_size = min(num_bytes, root->fs_info->max_extent); 5529 alloc_size = min(num_bytes, root->fs_info->max_extent);
5530
5531 ret = btrfs_reserve_metadata_space(root, 1);
5532 if (ret)
5533 goto out;
5534
5365 ret = btrfs_reserve_extent(trans, root, alloc_size, 5535 ret = btrfs_reserve_extent(trans, root, alloc_size,
5366 root->sectorsize, 0, alloc_hint, 5536 root->sectorsize, 0, alloc_hint,
5367 (u64)-1, &ins, 1); 5537 (u64)-1, &ins, 1);
@@ -5381,6 +5551,7 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans,
5381 num_bytes -= ins.offset; 5551 num_bytes -= ins.offset;
5382 cur_offset += ins.offset; 5552 cur_offset += ins.offset;
5383 alloc_hint = ins.objectid + ins.offset; 5553 alloc_hint = ins.objectid + ins.offset;
5554 btrfs_unreserve_metadata_space(root, 1);
5384 } 5555 }
5385out: 5556out:
5386 if (cur_offset > start) { 5557 if (cur_offset > start) {
@@ -5544,7 +5715,7 @@ static const struct inode_operations btrfs_dir_ro_inode_operations = {
5544 .permission = btrfs_permission, 5715 .permission = btrfs_permission,
5545}; 5716};
5546 5717
5547static struct file_operations btrfs_dir_file_operations = { 5718static const struct file_operations btrfs_dir_file_operations = {
5548 .llseek = generic_file_llseek, 5719 .llseek = generic_file_llseek,
5549 .read = generic_read_dir, 5720 .read = generic_read_dir,
5550 .readdir = btrfs_real_readdir, 5721 .readdir = btrfs_real_readdir,
@@ -5566,6 +5737,8 @@ static struct extent_io_ops btrfs_extent_io_ops = {
5566 .readpage_io_failed_hook = btrfs_io_failed_hook, 5737 .readpage_io_failed_hook = btrfs_io_failed_hook,
5567 .set_bit_hook = btrfs_set_bit_hook, 5738 .set_bit_hook = btrfs_set_bit_hook,
5568 .clear_bit_hook = btrfs_clear_bit_hook, 5739 .clear_bit_hook = btrfs_clear_bit_hook,
5740 .merge_extent_hook = btrfs_merge_extent_hook,
5741 .split_extent_hook = btrfs_split_extent_hook,
5569}; 5742};
5570 5743
5571/* 5744/*
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a8577a7f26ab..9a780c8d0ac8 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -239,7 +239,13 @@ static noinline int create_subvol(struct btrfs_root *root,
239 u64 index = 0; 239 u64 index = 0;
240 unsigned long nr = 1; 240 unsigned long nr = 1;
241 241
242 ret = btrfs_check_metadata_free_space(root); 242 /*
243 * 1 - inode item
244 * 2 - refs
245 * 1 - root item
246 * 2 - dir items
247 */
248 ret = btrfs_reserve_metadata_space(root, 6);
243 if (ret) 249 if (ret)
244 return ret; 250 return ret;
245 251
@@ -340,6 +346,9 @@ fail:
340 err = btrfs_commit_transaction(trans, root); 346 err = btrfs_commit_transaction(trans, root);
341 if (err && !ret) 347 if (err && !ret)
342 ret = err; 348 ret = err;
349
350 btrfs_unreserve_metadata_space(root, 6);
351 btrfs_btree_balance_dirty(root, nr);
343 return ret; 352 return ret;
344} 353}
345 354
@@ -355,19 +364,27 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
355 if (!root->ref_cows) 364 if (!root->ref_cows)
356 return -EINVAL; 365 return -EINVAL;
357 366
358 ret = btrfs_check_metadata_free_space(root); 367 /*
368 * 1 - inode item
369 * 2 - refs
370 * 1 - root item
371 * 2 - dir items
372 */
373 ret = btrfs_reserve_metadata_space(root, 6);
359 if (ret) 374 if (ret)
360 goto fail_unlock; 375 goto fail_unlock;
361 376
362 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS); 377 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
363 if (!pending_snapshot) { 378 if (!pending_snapshot) {
364 ret = -ENOMEM; 379 ret = -ENOMEM;
380 btrfs_unreserve_metadata_space(root, 6);
365 goto fail_unlock; 381 goto fail_unlock;
366 } 382 }
367 pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS); 383 pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS);
368 if (!pending_snapshot->name) { 384 if (!pending_snapshot->name) {
369 ret = -ENOMEM; 385 ret = -ENOMEM;
370 kfree(pending_snapshot); 386 kfree(pending_snapshot);
387 btrfs_unreserve_metadata_space(root, 6);
371 goto fail_unlock; 388 goto fail_unlock;
372 } 389 }
373 memcpy(pending_snapshot->name, name, namelen); 390 memcpy(pending_snapshot->name, name, namelen);
@@ -1215,15 +1232,15 @@ static long btrfs_ioctl_trans_start(struct file *file)
1215 struct inode *inode = fdentry(file)->d_inode; 1232 struct inode *inode = fdentry(file)->d_inode;
1216 struct btrfs_root *root = BTRFS_I(inode)->root; 1233 struct btrfs_root *root = BTRFS_I(inode)->root;
1217 struct btrfs_trans_handle *trans; 1234 struct btrfs_trans_handle *trans;
1218 int ret = 0; 1235 int ret;
1219 1236
1237 ret = -EPERM;
1220 if (!capable(CAP_SYS_ADMIN)) 1238 if (!capable(CAP_SYS_ADMIN))
1221 return -EPERM; 1239 goto out;
1222 1240
1223 if (file->private_data) { 1241 ret = -EINPROGRESS;
1224 ret = -EINPROGRESS; 1242 if (file->private_data)
1225 goto out; 1243 goto out;
1226 }
1227 1244
1228 ret = mnt_want_write(file->f_path.mnt); 1245 ret = mnt_want_write(file->f_path.mnt);
1229 if (ret) 1246 if (ret)
@@ -1233,12 +1250,19 @@ static long btrfs_ioctl_trans_start(struct file *file)
1233 root->fs_info->open_ioctl_trans++; 1250 root->fs_info->open_ioctl_trans++;
1234 mutex_unlock(&root->fs_info->trans_mutex); 1251 mutex_unlock(&root->fs_info->trans_mutex);
1235 1252
1253 ret = -ENOMEM;
1236 trans = btrfs_start_ioctl_transaction(root, 0); 1254 trans = btrfs_start_ioctl_transaction(root, 0);
1237 if (trans) 1255 if (!trans)
1238 file->private_data = trans; 1256 goto out_drop;
1239 else 1257
1240 ret = -ENOMEM; 1258 file->private_data = trans;
1241 /*printk(KERN_INFO "btrfs_ioctl_trans_start on %p\n", file);*/ 1259 return 0;
1260
1261out_drop:
1262 mutex_lock(&root->fs_info->trans_mutex);
1263 root->fs_info->open_ioctl_trans--;
1264 mutex_unlock(&root->fs_info->trans_mutex);
1265 mnt_drop_write(file->f_path.mnt);
1242out: 1266out:
1243 return ret; 1267 return ret;
1244} 1268}
@@ -1254,24 +1278,20 @@ long btrfs_ioctl_trans_end(struct file *file)
1254 struct inode *inode = fdentry(file)->d_inode; 1278 struct inode *inode = fdentry(file)->d_inode;
1255 struct btrfs_root *root = BTRFS_I(inode)->root; 1279 struct btrfs_root *root = BTRFS_I(inode)->root;
1256 struct btrfs_trans_handle *trans; 1280 struct btrfs_trans_handle *trans;
1257 int ret = 0;
1258 1281
1259 trans = file->private_data; 1282 trans = file->private_data;
1260 if (!trans) { 1283 if (!trans)
1261 ret = -EINVAL; 1284 return -EINVAL;
1262 goto out;
1263 }
1264 btrfs_end_transaction(trans, root);
1265 file->private_data = NULL; 1285 file->private_data = NULL;
1266 1286
1287 btrfs_end_transaction(trans, root);
1288
1267 mutex_lock(&root->fs_info->trans_mutex); 1289 mutex_lock(&root->fs_info->trans_mutex);
1268 root->fs_info->open_ioctl_trans--; 1290 root->fs_info->open_ioctl_trans--;
1269 mutex_unlock(&root->fs_info->trans_mutex); 1291 mutex_unlock(&root->fs_info->trans_mutex);
1270 1292
1271 mnt_drop_write(file->f_path.mnt); 1293 mnt_drop_write(file->f_path.mnt);
1272 1294 return 0;
1273out:
1274 return ret;
1275} 1295}
1276 1296
1277long btrfs_ioctl(struct file *file, unsigned int 1297long btrfs_ioctl(struct file *file, unsigned int
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index b5d6d24726b0..897fba835f89 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -458,7 +458,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
458 * start IO on any dirty ones so the wait doesn't stall waiting 458 * start IO on any dirty ones so the wait doesn't stall waiting
459 * for pdflush to find them 459 * for pdflush to find them
460 */ 460 */
461 btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_ALL); 461 filemap_fdatawrite_range(inode->i_mapping, start, end);
462 if (wait) { 462 if (wait) {
463 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, 463 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
464 &entry->flags)); 464 &entry->flags));
@@ -488,17 +488,15 @@ again:
488 /* start IO across the range first to instantiate any delalloc 488 /* start IO across the range first to instantiate any delalloc
489 * extents 489 * extents
490 */ 490 */
491 btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL); 491 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
492 492
493 /* The compression code will leave pages locked but return from 493 /* The compression code will leave pages locked but return from
494 * writepage without setting the page writeback. Starting again 494 * writepage without setting the page writeback. Starting again
495 * with WB_SYNC_ALL will end up waiting for the IO to actually start. 495 * with WB_SYNC_ALL will end up waiting for the IO to actually start.
496 */ 496 */
497 btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL); 497 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
498 498
499 btrfs_wait_on_page_writeback_range(inode->i_mapping, 499 filemap_fdatawait_range(inode->i_mapping, start, orig_end);
500 start >> PAGE_CACHE_SHIFT,
501 orig_end >> PAGE_CACHE_SHIFT);
502 500
503 end = orig_end; 501 end = orig_end;
504 found = 0; 502 found = 0;
@@ -716,89 +714,6 @@ out:
716} 714}
717 715
718 716
719/**
720 * taken from mm/filemap.c because it isn't exported
721 *
722 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
723 * @mapping: address space structure to write
724 * @start: offset in bytes where the range starts
725 * @end: offset in bytes where the range ends (inclusive)
726 * @sync_mode: enable synchronous operation
727 *
728 * Start writeback against all of a mapping's dirty pages that lie
729 * within the byte offsets <start, end> inclusive.
730 *
731 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
732 * opposed to a regular memory cleansing writeback. The difference between
733 * these two operations is that if a dirty page/buffer is encountered, it must
734 * be waited upon, and not just skipped over.
735 */
736int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
737 loff_t end, int sync_mode)
738{
739 struct writeback_control wbc = {
740 .sync_mode = sync_mode,
741 .nr_to_write = mapping->nrpages * 2,
742 .range_start = start,
743 .range_end = end,
744 };
745 return btrfs_writepages(mapping, &wbc);
746}
747
748/**
749 * taken from mm/filemap.c because it isn't exported
750 *
751 * wait_on_page_writeback_range - wait for writeback to complete
752 * @mapping: target address_space
753 * @start: beginning page index
754 * @end: ending page index
755 *
756 * Wait for writeback to complete against pages indexed by start->end
757 * inclusive
758 */
759int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
760 pgoff_t start, pgoff_t end)
761{
762 struct pagevec pvec;
763 int nr_pages;
764 int ret = 0;
765 pgoff_t index;
766
767 if (end < start)
768 return 0;
769
770 pagevec_init(&pvec, 0);
771 index = start;
772 while ((index <= end) &&
773 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
774 PAGECACHE_TAG_WRITEBACK,
775 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
776 unsigned i;
777
778 for (i = 0; i < nr_pages; i++) {
779 struct page *page = pvec.pages[i];
780
781 /* until radix tree lookup accepts end_index */
782 if (page->index > end)
783 continue;
784
785 wait_on_page_writeback(page);
786 if (PageError(page))
787 ret = -EIO;
788 }
789 pagevec_release(&pvec);
790 cond_resched();
791 }
792
793 /* Check for outstanding write errors */
794 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
795 ret = -ENOSPC;
796 if (test_and_clear_bit(AS_EIO, &mapping->flags))
797 ret = -EIO;
798
799 return ret;
800}
801
802/* 717/*
803 * add a given inode to the list of inodes that must be fully on 718 * add a given inode to the list of inodes that must be fully on
804 * disk before a transaction commit finishes. 719 * disk before a transaction commit finishes.
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 993a7ea45c70..f82e87488ca8 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -153,10 +153,6 @@ btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
153int btrfs_ordered_update_i_size(struct inode *inode, 153int btrfs_ordered_update_i_size(struct inode *inode,
154 struct btrfs_ordered_extent *ordered); 154 struct btrfs_ordered_extent *ordered);
155int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); 155int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
156int btrfs_wait_on_page_writeback_range(struct address_space *mapping,
157 pgoff_t start, pgoff_t end);
158int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
159 loff_t end, int sync_mode);
160int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only); 156int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only);
161int btrfs_run_ordered_operations(struct btrfs_root *root, int wait); 157int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
162int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, 158int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 67035385444c..9de9b2236419 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -344,7 +344,9 @@ static int btrfs_fill_super(struct super_block *sb,
344 sb->s_export_op = &btrfs_export_ops; 344 sb->s_export_op = &btrfs_export_ops;
345 sb->s_xattr = btrfs_xattr_handlers; 345 sb->s_xattr = btrfs_xattr_handlers;
346 sb->s_time_gran = 1; 346 sb->s_time_gran = 1;
347#ifdef CONFIG_BTRFS_POSIX_ACL
347 sb->s_flags |= MS_POSIXACL; 348 sb->s_flags |= MS_POSIXACL;
349#endif
348 350
349 tree_root = open_ctree(sb, fs_devices, (char *)data); 351 tree_root = open_ctree(sb, fs_devices, (char *)data);
350 352
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 88f866f85e7a..0b8f36d4400a 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -186,6 +186,9 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
186 h->alloc_exclude_start = 0; 186 h->alloc_exclude_start = 0;
187 h->delayed_ref_updates = 0; 187 h->delayed_ref_updates = 0;
188 188
189 if (!current->journal_info)
190 current->journal_info = h;
191
189 root->fs_info->running_transaction->use_count++; 192 root->fs_info->running_transaction->use_count++;
190 record_root_in_trans(h, root); 193 record_root_in_trans(h, root);
191 mutex_unlock(&root->fs_info->trans_mutex); 194 mutex_unlock(&root->fs_info->trans_mutex);
@@ -317,6 +320,9 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
317 wake_up(&cur_trans->writer_wait); 320 wake_up(&cur_trans->writer_wait);
318 put_transaction(cur_trans); 321 put_transaction(cur_trans);
319 mutex_unlock(&info->trans_mutex); 322 mutex_unlock(&info->trans_mutex);
323
324 if (current->journal_info == trans)
325 current->journal_info = NULL;
320 memset(trans, 0, sizeof(*trans)); 326 memset(trans, 0, sizeof(*trans));
321 kmem_cache_free(btrfs_trans_handle_cachep, trans); 327 kmem_cache_free(btrfs_trans_handle_cachep, trans);
322 328
@@ -743,6 +749,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
743 memcpy(&pending->root_key, &key, sizeof(key)); 749 memcpy(&pending->root_key, &key, sizeof(key));
744fail: 750fail:
745 kfree(new_root_item); 751 kfree(new_root_item);
752 btrfs_unreserve_metadata_space(root, 6);
746 return ret; 753 return ret;
747} 754}
748 755
@@ -1059,6 +1066,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1059 1066
1060 mutex_unlock(&root->fs_info->trans_mutex); 1067 mutex_unlock(&root->fs_info->trans_mutex);
1061 1068
1069 if (current->journal_info == trans)
1070 current->journal_info = NULL;
1071
1062 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1072 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1063 return ret; 1073 return ret;
1064} 1074}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 23e7d36ff325..7eda483d7b5a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -446,8 +446,10 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
446 goto error; 446 goto error;
447 447
448 device->name = kstrdup(orig_dev->name, GFP_NOFS); 448 device->name = kstrdup(orig_dev->name, GFP_NOFS);
449 if (!device->name) 449 if (!device->name) {
450 kfree(device);
450 goto error; 451 goto error;
452 }
451 453
452 device->devid = orig_dev->devid; 454 device->devid = orig_dev->devid;
453 device->work.func = pending_bios_fn; 455 device->work.func = pending_bios_fn;
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index a9d3bf4d2689..b0fc93f95fd0 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -260,7 +260,7 @@ err:
260 * attributes are handled directly. 260 * attributes are handled directly.
261 */ 261 */
262struct xattr_handler *btrfs_xattr_handlers[] = { 262struct xattr_handler *btrfs_xattr_handlers[] = {
263#ifdef CONFIG_FS_POSIX_ACL 263#ifdef CONFIG_BTRFS_POSIX_ACL
264 &btrfs_xattr_acl_access_handler, 264 &btrfs_xattr_acl_access_handler,
265 &btrfs_xattr_acl_default_handler, 265 &btrfs_xattr_acl_default_handler,
266#endif 266#endif
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index 0376ac66c44a..be4392ca2098 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -22,6 +22,7 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/major.h> 23#include <linux/major.h>
24#include <linux/time.h> 24#include <linux/time.h>
25#include <linux/sched.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/ioport.h> 27#include <linux/ioport.h>
27#include <linux/fcntl.h> 28#include <linux/fcntl.h>
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index d5c0ea2e8f2d..9f2d45d75b1a 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -26,20 +26,6 @@ config EXT4_FS
26 26
27 If unsure, say N. 27 If unsure, say N.
28 28
29config EXT4DEV_COMPAT
30 bool "Enable ext4dev compatibility"
31 depends on EXT4_FS
32 help
33 Starting with 2.6.28, the name of the ext4 filesystem was
34 renamed from ext4dev to ext4. Unfortunately there are some
35 legacy userspace programs (such as klibc's fstype) have
36 "ext4dev" hardcoded.
37
38 To enable backwards compatibility so that systems that are
39 still expecting to mount ext4 filesystems using ext4dev,
40 choose Y here. This feature will go away by 2.6.31, so
41 please arrange to get your userspace programs fixed!
42
43config EXT4_FS_XATTR 29config EXT4_FS_XATTR
44 bool "Ext4 extended attributes" 30 bool "Ext4 extended attributes"
45 depends on EXT4_FS 31 depends on EXT4_FS
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index ec367bce7215..5c5bc5dafff8 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1146,8 +1146,8 @@ static int check_block_validity(struct inode *inode, const char *msg,
1146} 1146}
1147 1147
1148/* 1148/*
1149 * Return the number of dirty pages in the given inode starting at 1149 * Return the number of contiguous dirty pages in a given inode
1150 * page frame idx. 1150 * starting at page frame idx.
1151 */ 1151 */
1152static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, 1152static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
1153 unsigned int max_pages) 1153 unsigned int max_pages)
@@ -1181,15 +1181,15 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
1181 unlock_page(page); 1181 unlock_page(page);
1182 break; 1182 break;
1183 } 1183 }
1184 head = page_buffers(page); 1184 if (page_has_buffers(page)) {
1185 bh = head; 1185 bh = head = page_buffers(page);
1186 do { 1186 do {
1187 if (!buffer_delay(bh) && 1187 if (!buffer_delay(bh) &&
1188 !buffer_unwritten(bh)) { 1188 !buffer_unwritten(bh))
1189 done = 1; 1189 done = 1;
1190 break; 1190 bh = bh->b_this_page;
1191 } 1191 } while (!done && (bh != head));
1192 } while ((bh = bh->b_this_page) != head); 1192 }
1193 unlock_page(page); 1193 unlock_page(page);
1194 if (done) 1194 if (done)
1195 break; 1195 break;
@@ -3378,6 +3378,7 @@ static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
3378 ssize_t ret; 3378 ssize_t ret;
3379 int orphan = 0; 3379 int orphan = 0;
3380 size_t count = iov_length(iov, nr_segs); 3380 size_t count = iov_length(iov, nr_segs);
3381 int retries = 0;
3381 3382
3382 if (rw == WRITE) { 3383 if (rw == WRITE) {
3383 loff_t final_size = offset + count; 3384 loff_t final_size = offset + count;
@@ -3400,9 +3401,12 @@ static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
3400 } 3401 }
3401 } 3402 }
3402 3403
3404retry:
3403 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 3405 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
3404 offset, nr_segs, 3406 offset, nr_segs,
3405 ext4_get_block, NULL); 3407 ext4_get_block, NULL);
3408 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3409 goto retry;
3406 3410
3407 if (orphan) { 3411 if (orphan) {
3408 int err; 3412 int err;
@@ -5612,14 +5616,12 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5612 */ 5616 */
5613void ext4_dirty_inode(struct inode *inode) 5617void ext4_dirty_inode(struct inode *inode)
5614{ 5618{
5615 handle_t *current_handle = ext4_journal_current_handle();
5616 handle_t *handle; 5619 handle_t *handle;
5617 5620
5618 handle = ext4_journal_start(inode, 2); 5621 handle = ext4_journal_start(inode, 2);
5619 if (IS_ERR(handle)) 5622 if (IS_ERR(handle))
5620 goto out; 5623 goto out;
5621 5624
5622 jbd_debug(5, "marking dirty. outer handle=%p\n", current_handle);
5623 ext4_mark_inode_dirty(handle, inode); 5625 ext4_mark_inode_dirty(handle, inode);
5624 5626
5625 ext4_journal_stop(handle); 5627 ext4_journal_stop(handle);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 12e726a7073f..312211ee05af 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3966,27 +3966,6 @@ static struct file_system_type ext4_fs_type = {
3966 .fs_flags = FS_REQUIRES_DEV, 3966 .fs_flags = FS_REQUIRES_DEV,
3967}; 3967};
3968 3968
3969#ifdef CONFIG_EXT4DEV_COMPAT
3970static int ext4dev_get_sb(struct file_system_type *fs_type, int flags,
3971 const char *dev_name, void *data,struct vfsmount *mnt)
3972{
3973 printk(KERN_WARNING "EXT4-fs (%s): Update your userspace programs "
3974 "to mount using ext4\n", dev_name);
3975 printk(KERN_WARNING "EXT4-fs (%s): ext4dev backwards compatibility "
3976 "will go away by 2.6.31\n", dev_name);
3977 return get_sb_bdev(fs_type, flags, dev_name, data, ext4_fill_super,mnt);
3978}
3979
3980static struct file_system_type ext4dev_fs_type = {
3981 .owner = THIS_MODULE,
3982 .name = "ext4dev",
3983 .get_sb = ext4dev_get_sb,
3984 .kill_sb = kill_block_super,
3985 .fs_flags = FS_REQUIRES_DEV,
3986};
3987MODULE_ALIAS("ext4dev");
3988#endif
3989
3990static int __init init_ext4_fs(void) 3969static int __init init_ext4_fs(void)
3991{ 3970{
3992 int err; 3971 int err;
@@ -4011,13 +3990,6 @@ static int __init init_ext4_fs(void)
4011 err = register_filesystem(&ext4_fs_type); 3990 err = register_filesystem(&ext4_fs_type);
4012 if (err) 3991 if (err)
4013 goto out; 3992 goto out;
4014#ifdef CONFIG_EXT4DEV_COMPAT
4015 err = register_filesystem(&ext4dev_fs_type);
4016 if (err) {
4017 unregister_filesystem(&ext4_fs_type);
4018 goto out;
4019 }
4020#endif
4021 return 0; 3993 return 0;
4022out: 3994out:
4023 destroy_inodecache(); 3995 destroy_inodecache();
@@ -4036,9 +4008,6 @@ out4:
4036static void __exit exit_ext4_fs(void) 4008static void __exit exit_ext4_fs(void)
4037{ 4009{
4038 unregister_filesystem(&ext4_fs_type); 4010 unregister_filesystem(&ext4_fs_type);
4039#ifdef CONFIG_EXT4DEV_COMPAT
4040 unregister_filesystem(&ext4dev_fs_type);
4041#endif
4042 destroy_inodecache(); 4011 destroy_inodecache();
4043 exit_ext4_xattr(); 4012 exit_ext4_xattr();
4044 exit_ext4_mballoc(); 4013 exit_ext4_mballoc();
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 761af77491f5..b0ab5219becb 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -770,7 +770,7 @@ static int jbd2_seq_info_release(struct inode *inode, struct file *file)
770 return seq_release(inode, file); 770 return seq_release(inode, file);
771} 771}
772 772
773static struct file_operations jbd2_seq_info_fops = { 773static const struct file_operations jbd2_seq_info_fops = {
774 .owner = THIS_MODULE, 774 .owner = THIS_MODULE,
775 .open = jbd2_seq_info_open, 775 .open = jbd2_seq_info_open,
776 .read = seq_read, 776 .read = seq_read,
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 00388d2a3c99..5c01fc148ce8 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -176,7 +176,7 @@ static const struct file_operations exports_operations = {
176extern int nfsd_pool_stats_open(struct inode *inode, struct file *file); 176extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
177extern int nfsd_pool_stats_release(struct inode *inode, struct file *file); 177extern int nfsd_pool_stats_release(struct inode *inode, struct file *file);
178 178
179static struct file_operations pool_stats_operations = { 179static const struct file_operations pool_stats_operations = {
180 .open = nfsd_pool_stats_open, 180 .open = nfsd_pool_stats_open,
181 .read = seq_read, 181 .read = seq_read,
182 .llseek = seq_lseek, 182 .llseek = seq_lseek,
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 1a4fa04cf071..e097099bfc8f 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -697,7 +697,7 @@ not_empty:
697 return 0; 697 return 0;
698} 698}
699 699
700struct file_operations nilfs_dir_operations = { 700const struct file_operations nilfs_dir_operations = {
701 .llseek = generic_file_llseek, 701 .llseek = generic_file_llseek,
702 .read = generic_read_dir, 702 .read = generic_read_dir,
703 .readdir = nilfs_readdir, 703 .readdir = nilfs_readdir,
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 7d7b4983dee3..30292df443ce 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -134,7 +134,7 @@ static int nilfs_file_mmap(struct file *file, struct vm_area_struct *vma)
134 * We have mostly NULL's here: the current defaults are ok for 134 * We have mostly NULL's here: the current defaults are ok for
135 * the nilfs filesystem. 135 * the nilfs filesystem.
136 */ 136 */
137struct file_operations nilfs_file_operations = { 137const struct file_operations nilfs_file_operations = {
138 .llseek = generic_file_llseek, 138 .llseek = generic_file_llseek,
139 .read = do_sync_read, 139 .read = do_sync_read,
140 .write = do_sync_write, 140 .write = do_sync_write,
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index b18c4998f8d0..f6326112d647 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -433,7 +433,7 @@ static const struct address_space_operations def_mdt_aops = {
433}; 433};
434 434
435static const struct inode_operations def_mdt_iops; 435static const struct inode_operations def_mdt_iops;
436static struct file_operations def_mdt_fops; 436static const struct file_operations def_mdt_fops;
437 437
438/* 438/*
439 * NILFS2 uses pseudo inodes for meta data files such as DAT, cpfile, sufile, 439 * NILFS2 uses pseudo inodes for meta data files such as DAT, cpfile, sufile,
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index bad7368782d0..4da6f67e9a91 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -294,9 +294,9 @@ void nilfs_clear_gcdat_inode(struct the_nilfs *);
294/* 294/*
295 * Inodes and files operations 295 * Inodes and files operations
296 */ 296 */
297extern struct file_operations nilfs_dir_operations; 297extern const struct file_operations nilfs_dir_operations;
298extern const struct inode_operations nilfs_file_inode_operations; 298extern const struct inode_operations nilfs_file_inode_operations;
299extern struct file_operations nilfs_file_operations; 299extern const struct file_operations nilfs_file_operations;
300extern const struct address_space_operations nilfs_aops; 300extern const struct address_space_operations nilfs_aops;
301extern const struct inode_operations nilfs_dir_inode_operations; 301extern const struct inode_operations nilfs_dir_inode_operations;
302extern const struct inode_operations nilfs_special_inode_operations; 302extern const struct inode_operations nilfs_special_inode_operations;
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 09cc25d04611..c452d116b892 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -966,7 +966,7 @@ static ssize_t o2hb_debug_read(struct file *file, char __user *buf,
966} 966}
967#endif /* CONFIG_DEBUG_FS */ 967#endif /* CONFIG_DEBUG_FS */
968 968
969static struct file_operations o2hb_debug_fops = { 969static const struct file_operations o2hb_debug_fops = {
970 .open = o2hb_debug_open, 970 .open = o2hb_debug_open,
971 .release = o2hb_debug_release, 971 .release = o2hb_debug_release,
972 .read = o2hb_debug_read, 972 .read = o2hb_debug_read,
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index cfb2be708abe..da794bc07a6c 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -207,7 +207,7 @@ static int nst_fop_release(struct inode *inode, struct file *file)
207 return seq_release_private(inode, file); 207 return seq_release_private(inode, file);
208} 208}
209 209
210static struct file_operations nst_seq_fops = { 210static const struct file_operations nst_seq_fops = {
211 .open = nst_fop_open, 211 .open = nst_fop_open,
212 .read = seq_read, 212 .read = seq_read,
213 .llseek = seq_lseek, 213 .llseek = seq_lseek,
@@ -388,7 +388,7 @@ static int sc_fop_release(struct inode *inode, struct file *file)
388 return seq_release_private(inode, file); 388 return seq_release_private(inode, file);
389} 389}
390 390
391static struct file_operations sc_seq_fops = { 391static const struct file_operations sc_seq_fops = {
392 .open = sc_fop_open, 392 .open = sc_fop_open,
393 .read = seq_read, 393 .read = seq_read,
394 .llseek = seq_lseek, 394 .llseek = seq_lseek,
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index ca46002ec10e..42b0bad7a612 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -478,7 +478,7 @@ bail:
478 return -ENOMEM; 478 return -ENOMEM;
479} 479}
480 480
481static struct file_operations debug_purgelist_fops = { 481static const struct file_operations debug_purgelist_fops = {
482 .open = debug_purgelist_open, 482 .open = debug_purgelist_open,
483 .release = debug_buffer_release, 483 .release = debug_buffer_release,
484 .read = debug_buffer_read, 484 .read = debug_buffer_read,
@@ -538,7 +538,7 @@ bail:
538 return -ENOMEM; 538 return -ENOMEM;
539} 539}
540 540
541static struct file_operations debug_mle_fops = { 541static const struct file_operations debug_mle_fops = {
542 .open = debug_mle_open, 542 .open = debug_mle_open,
543 .release = debug_buffer_release, 543 .release = debug_buffer_release,
544 .read = debug_buffer_read, 544 .read = debug_buffer_read,
@@ -741,7 +741,7 @@ static int debug_lockres_release(struct inode *inode, struct file *file)
741 return seq_release_private(inode, file); 741 return seq_release_private(inode, file);
742} 742}
743 743
744static struct file_operations debug_lockres_fops = { 744static const struct file_operations debug_lockres_fops = {
745 .open = debug_lockres_open, 745 .open = debug_lockres_open,
746 .release = debug_lockres_release, 746 .release = debug_lockres_release,
747 .read = seq_read, 747 .read = seq_read,
@@ -925,7 +925,7 @@ bail:
925 return -ENOMEM; 925 return -ENOMEM;
926} 926}
927 927
928static struct file_operations debug_state_fops = { 928static const struct file_operations debug_state_fops = {
929 .open = debug_state_open, 929 .open = debug_state_open,
930 .release = debug_buffer_release, 930 .release = debug_buffer_release,
931 .read = debug_buffer_read, 931 .read = debug_buffer_read,
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 4cc3c890a2cd..c0e48aeebb1c 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -373,7 +373,7 @@ static ssize_t ocfs2_debug_read(struct file *file, char __user *buf,
373} 373}
374#endif /* CONFIG_DEBUG_FS */ 374#endif /* CONFIG_DEBUG_FS */
375 375
376static struct file_operations ocfs2_osb_debug_fops = { 376static const struct file_operations ocfs2_osb_debug_fops = {
377 .open = ocfs2_osb_debug_open, 377 .open = ocfs2_osb_debug_open,
378 .release = ocfs2_debug_release, 378 .release = ocfs2_debug_release,
379 .read = ocfs2_debug_read, 379 .read = ocfs2_debug_read,
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index 3680bae335b5..b42d62419034 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -498,7 +498,7 @@ const struct inode_operations omfs_dir_inops = {
498 .rmdir = omfs_rmdir, 498 .rmdir = omfs_rmdir,
499}; 499};
500 500
501struct file_operations omfs_dir_operations = { 501const struct file_operations omfs_dir_operations = {
502 .read = generic_read_dir, 502 .read = generic_read_dir,
503 .readdir = omfs_readdir, 503 .readdir = omfs_readdir,
504 .llseek = generic_file_llseek, 504 .llseek = generic_file_llseek,
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 4845fbb18e6e..399487c09364 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -322,7 +322,7 @@ static sector_t omfs_bmap(struct address_space *mapping, sector_t block)
322 return generic_block_bmap(mapping, block, omfs_get_block); 322 return generic_block_bmap(mapping, block, omfs_get_block);
323} 323}
324 324
325struct file_operations omfs_file_operations = { 325const struct file_operations omfs_file_operations = {
326 .llseek = generic_file_llseek, 326 .llseek = generic_file_llseek,
327 .read = do_sync_read, 327 .read = do_sync_read,
328 .write = do_sync_write, 328 .write = do_sync_write,
diff --git a/fs/omfs/omfs.h b/fs/omfs/omfs.h
index df71039945ac..ebe2fdbe535e 100644
--- a/fs/omfs/omfs.h
+++ b/fs/omfs/omfs.h
@@ -44,14 +44,14 @@ extern int omfs_allocate_range(struct super_block *sb, int min_request,
44extern int omfs_clear_range(struct super_block *sb, u64 block, int count); 44extern int omfs_clear_range(struct super_block *sb, u64 block, int count);
45 45
46/* dir.c */ 46/* dir.c */
47extern struct file_operations omfs_dir_operations; 47extern const struct file_operations omfs_dir_operations;
48extern const struct inode_operations omfs_dir_inops; 48extern const struct inode_operations omfs_dir_inops;
49extern int omfs_make_empty(struct inode *inode, struct super_block *sb); 49extern int omfs_make_empty(struct inode *inode, struct super_block *sb);
50extern int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header, 50extern int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header,
51 u64 fsblock); 51 u64 fsblock);
52 52
53/* file.c */ 53/* file.c */
54extern struct file_operations omfs_file_operations; 54extern const struct file_operations omfs_file_operations;
55extern const struct inode_operations omfs_file_inops; 55extern const struct inode_operations omfs_file_inops;
56extern const struct address_space_operations omfs_aops; 56extern const struct address_space_operations omfs_aops;
57extern void omfs_make_empty_table(struct buffer_head *bh, int offset); 57extern void omfs_make_empty_table(struct buffer_head *bh, int offset);
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 7b685e10cbad..f38fee0311a7 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -248,19 +248,11 @@ ssize_t part_stat_show(struct device *dev,
248 part_stat_read(p, merges[WRITE]), 248 part_stat_read(p, merges[WRITE]),
249 (unsigned long long)part_stat_read(p, sectors[WRITE]), 249 (unsigned long long)part_stat_read(p, sectors[WRITE]),
250 jiffies_to_msecs(part_stat_read(p, ticks[WRITE])), 250 jiffies_to_msecs(part_stat_read(p, ticks[WRITE])),
251 part_in_flight(p), 251 p->in_flight,
252 jiffies_to_msecs(part_stat_read(p, io_ticks)), 252 jiffies_to_msecs(part_stat_read(p, io_ticks)),
253 jiffies_to_msecs(part_stat_read(p, time_in_queue))); 253 jiffies_to_msecs(part_stat_read(p, time_in_queue)));
254} 254}
255 255
256ssize_t part_inflight_show(struct device *dev,
257 struct device_attribute *attr, char *buf)
258{
259 struct hd_struct *p = dev_to_part(dev);
260
261 return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]);
262}
263
264#ifdef CONFIG_FAIL_MAKE_REQUEST 256#ifdef CONFIG_FAIL_MAKE_REQUEST
265ssize_t part_fail_show(struct device *dev, 257ssize_t part_fail_show(struct device *dev,
266 struct device_attribute *attr, char *buf) 258 struct device_attribute *attr, char *buf)
@@ -289,7 +281,6 @@ static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
289static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); 281static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
290static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); 282static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
291static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 283static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
292static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
293#ifdef CONFIG_FAIL_MAKE_REQUEST 284#ifdef CONFIG_FAIL_MAKE_REQUEST
294static struct device_attribute dev_attr_fail = 285static struct device_attribute dev_attr_fail =
295 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); 286 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
@@ -301,7 +292,6 @@ static struct attribute *part_attrs[] = {
301 &dev_attr_size.attr, 292 &dev_attr_size.attr,
302 &dev_attr_alignment_offset.attr, 293 &dev_attr_alignment_offset.attr,
303 &dev_attr_stat.attr, 294 &dev_attr_stat.attr,
304 &dev_attr_inflight.attr,
305#ifdef CONFIG_FAIL_MAKE_REQUEST 295#ifdef CONFIG_FAIL_MAKE_REQUEST
306 &dev_attr_fail.attr, 296 &dev_attr_fail.attr,
307#endif 297#endif
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 56013371f9f3..a44a7897fd4d 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -23,7 +23,6 @@
23#include <asm/io.h> 23#include <asm/io.h>
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/ioport.h> 25#include <linux/ioport.h>
26#include <linux/mm.h>
27#include <linux/memory.h> 26#include <linux/memory.h>
28#include <asm/sections.h> 27#include <asm/sections.h>
29 28
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 2281c2cbfe2b..5033ce0d254b 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -94,6 +94,7 @@ static const struct file_operations proc_kpagecount_operations = {
94#define KPF_COMPOUND_TAIL 16 94#define KPF_COMPOUND_TAIL 16
95#define KPF_HUGE 17 95#define KPF_HUGE 17
96#define KPF_UNEVICTABLE 18 96#define KPF_UNEVICTABLE 18
97#define KPF_HWPOISON 19
97#define KPF_NOPAGE 20 98#define KPF_NOPAGE 20
98 99
99#define KPF_KSM 21 100#define KPF_KSM 21
@@ -180,6 +181,10 @@ static u64 get_uflags(struct page *page)
180 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); 181 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
181 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); 182 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
182 183
184#ifdef CONFIG_MEMORY_FAILURE
185 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
186#endif
187
183#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR 188#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
184 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); 189 u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached);
185#endif 190#endif
diff --git a/fs/select.c b/fs/select.c
index a201fc370223..fd38ce2e32e3 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/sched.h>
18#include <linux/syscalls.h> 19#include <linux/syscalls.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 9cca3785cab8..66d6106a2067 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_GENERIC_GPIO_H 1#ifndef _ASM_GENERIC_GPIO_H
2#define _ASM_GENERIC_GPIO_H 2#define _ASM_GENERIC_GPIO_H
3 3
4#include <linux/kernel.h>
4#include <linux/types.h> 5#include <linux/types.h>
5#include <linux/errno.h> 6#include <linux/errno.h>
6 7
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index ef47dfd8e5e9..b29e20168b5f 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -61,6 +61,9 @@ struct drm_crtc_helper_funcs {
61 /* Move the crtc on the current fb to the given position *optional* */ 61 /* Move the crtc on the current fb to the given position *optional* */
62 int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, 62 int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
63 struct drm_framebuffer *old_fb); 63 struct drm_framebuffer *old_fb);
64
65 /* reload the current crtc LUT */
66 void (*load_lut)(struct drm_crtc *crtc);
64}; 67};
65 68
66struct drm_encoder_helper_funcs { 69struct drm_encoder_helper_funcs {
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 4aa5740ce59f..58c892a2cbfa 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -39,6 +39,8 @@ struct drm_fb_helper_crtc {
39struct drm_fb_helper_funcs { 39struct drm_fb_helper_funcs {
40 void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, 40 void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
41 u16 blue, int regno); 41 u16 blue, int regno);
42 void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green,
43 u16 *blue, int regno);
42}; 44};
43 45
44/* mode specified on the command line */ 46/* mode specified on the command line */
@@ -71,6 +73,7 @@ struct drm_fb_helper {
71}; 73};
72 74
73int drm_fb_helper_single_fb_probe(struct drm_device *dev, 75int drm_fb_helper_single_fb_probe(struct drm_device *dev,
76 int preferred_bpp,
74 int (*fb_create)(struct drm_device *dev, 77 int (*fb_create)(struct drm_device *dev,
75 uint32_t fb_width, 78 uint32_t fb_width,
76 uint32_t fb_height, 79 uint32_t fb_height,
@@ -98,9 +101,11 @@ int drm_fb_helper_setcolreg(unsigned regno,
98void drm_fb_helper_restore(void); 101void drm_fb_helper_restore(void);
99void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb, 102void drm_fb_helper_fill_var(struct fb_info *info, struct drm_framebuffer *fb,
100 uint32_t fb_width, uint32_t fb_height); 103 uint32_t fb_width, uint32_t fb_height);
101void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch); 104void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
105 uint32_t depth);
102 106
103int drm_fb_helper_add_connector(struct drm_connector *connector); 107int drm_fb_helper_add_connector(struct drm_connector *connector);
104int drm_fb_helper_parse_command_line(struct drm_device *dev); 108int drm_fb_helper_parse_command_line(struct drm_device *dev);
109int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
105 110
106#endif 111#endif
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 3f6e545609be..e6f3b120f51a 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -80,7 +80,7 @@
80 {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \ 80 {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200}, \
81 {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ 81 {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
82 {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ 82 {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
83 {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ 83 {0x1002, 0x515E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \
84 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 84 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
85 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 85 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
86 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 86 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
@@ -113,7 +113,7 @@
113 {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 113 {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
114 {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 114 {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
115 {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 115 {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
116 {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ 116 {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|RADEON_SINGLE_CRTC}, \
117 {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ 117 {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
118 {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ 118 {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
119 {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ 119 {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index cff4a101f266..3f384d4b163a 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -126,6 +126,7 @@ header-y += nfs_mount.h
126header-y += nl80211.h 126header-y += nl80211.h
127header-y += param.h 127header-y += param.h
128header-y += pci_regs.h 128header-y += pci_regs.h
129header-y += perf_event.h
129header-y += pfkeyv2.h 130header-y += pfkeyv2.h
130header-y += pg.h 131header-y += pg.h
131header-y += phantom.h 132header-y += phantom.h
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 086e5c362d3a..817b23705c91 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -397,7 +397,7 @@ struct atmdev_ops { /* only send is required */
397 int (*getsockopt)(struct atm_vcc *vcc,int level,int optname, 397 int (*getsockopt)(struct atm_vcc *vcc,int level,int optname,
398 void __user *optval,int optlen); 398 void __user *optval,int optlen);
399 int (*setsockopt)(struct atm_vcc *vcc,int level,int optname, 399 int (*setsockopt)(struct atm_vcc *vcc,int level,int optname,
400 void __user *optval,int optlen); 400 void __user *optval,unsigned int optlen);
401 int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); 401 int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
402 int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags); 402 int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
403 void (*phy_put)(struct atm_dev *dev,unsigned char value, 403 void (*phy_put)(struct atm_dev *dev,unsigned char value,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e23a86cae5ac..25119041e034 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -82,7 +82,6 @@ enum rq_cmd_type_bits {
82enum { 82enum {
83 REQ_LB_OP_EJECT = 0x40, /* eject request */ 83 REQ_LB_OP_EJECT = 0x40, /* eject request */
84 REQ_LB_OP_FLUSH = 0x41, /* flush request */ 84 REQ_LB_OP_FLUSH = 0x41, /* flush request */
85 REQ_LB_OP_DISCARD = 0x42, /* discard sectors */
86}; 85};
87 86
88/* 87/*
@@ -261,7 +260,6 @@ typedef void (request_fn_proc) (struct request_queue *q);
261typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 260typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
262typedef int (prep_rq_fn) (struct request_queue *, struct request *); 261typedef int (prep_rq_fn) (struct request_queue *, struct request *);
263typedef void (unplug_fn) (struct request_queue *); 262typedef void (unplug_fn) (struct request_queue *);
264typedef int (prepare_discard_fn) (struct request_queue *, struct request *);
265 263
266struct bio_vec; 264struct bio_vec;
267struct bvec_merge_data { 265struct bvec_merge_data {
@@ -313,6 +311,7 @@ struct queue_limits {
313 unsigned int alignment_offset; 311 unsigned int alignment_offset;
314 unsigned int io_min; 312 unsigned int io_min;
315 unsigned int io_opt; 313 unsigned int io_opt;
314 unsigned int max_discard_sectors;
316 315
317 unsigned short logical_block_size; 316 unsigned short logical_block_size;
318 unsigned short max_hw_segments; 317 unsigned short max_hw_segments;
@@ -340,7 +339,6 @@ struct request_queue
340 make_request_fn *make_request_fn; 339 make_request_fn *make_request_fn;
341 prep_rq_fn *prep_rq_fn; 340 prep_rq_fn *prep_rq_fn;
342 unplug_fn *unplug_fn; 341 unplug_fn *unplug_fn;
343 prepare_discard_fn *prepare_discard_fn;
344 merge_bvec_fn *merge_bvec_fn; 342 merge_bvec_fn *merge_bvec_fn;
345 prepare_flush_fn *prepare_flush_fn; 343 prepare_flush_fn *prepare_flush_fn;
346 softirq_done_fn *softirq_done_fn; 344 softirq_done_fn *softirq_done_fn;
@@ -460,6 +458,7 @@ struct request_queue
460#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 458#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
461#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 459#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
462#define QUEUE_FLAG_CQ 16 /* hardware does queuing */ 460#define QUEUE_FLAG_CQ 16 /* hardware does queuing */
461#define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */
463 462
464#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 463#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
465 (1 << QUEUE_FLAG_CLUSTER) | \ 464 (1 << QUEUE_FLAG_CLUSTER) | \
@@ -591,6 +590,7 @@ enum {
591#define blk_queue_flushing(q) ((q)->ordseq) 590#define blk_queue_flushing(q) ((q)->ordseq)
592#define blk_queue_stackable(q) \ 591#define blk_queue_stackable(q) \
593 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 592 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
593#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
594 594
595#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 595#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
596#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 596#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
@@ -929,6 +929,8 @@ extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
929extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 929extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
930extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 930extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
931extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 931extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
932extern void blk_queue_max_discard_sectors(struct request_queue *q,
933 unsigned int max_discard_sectors);
932extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 934extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
933extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); 935extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
934extern void blk_queue_alignment_offset(struct request_queue *q, 936extern void blk_queue_alignment_offset(struct request_queue *q,
@@ -955,7 +957,6 @@ extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
955extern void blk_queue_dma_alignment(struct request_queue *, int); 957extern void blk_queue_dma_alignment(struct request_queue *, int);
956extern void blk_queue_update_dma_alignment(struct request_queue *, int); 958extern void blk_queue_update_dma_alignment(struct request_queue *, int);
957extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 959extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
958extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
959extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 960extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
960extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 961extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
961extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 962extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
@@ -1080,25 +1081,37 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q)
1080 return q->limits.physical_block_size; 1081 return q->limits.physical_block_size;
1081} 1082}
1082 1083
1084static inline int bdev_physical_block_size(struct block_device *bdev)
1085{
1086 return queue_physical_block_size(bdev_get_queue(bdev));
1087}
1088
1083static inline unsigned int queue_io_min(struct request_queue *q) 1089static inline unsigned int queue_io_min(struct request_queue *q)
1084{ 1090{
1085 return q->limits.io_min; 1091 return q->limits.io_min;
1086} 1092}
1087 1093
1094static inline int bdev_io_min(struct block_device *bdev)
1095{
1096 return queue_io_min(bdev_get_queue(bdev));
1097}
1098
1088static inline unsigned int queue_io_opt(struct request_queue *q) 1099static inline unsigned int queue_io_opt(struct request_queue *q)
1089{ 1100{
1090 return q->limits.io_opt; 1101 return q->limits.io_opt;
1091} 1102}
1092 1103
1104static inline int bdev_io_opt(struct block_device *bdev)
1105{
1106 return queue_io_opt(bdev_get_queue(bdev));
1107}
1108
1093static inline int queue_alignment_offset(struct request_queue *q) 1109static inline int queue_alignment_offset(struct request_queue *q)
1094{ 1110{
1095 if (q && q->limits.misaligned) 1111 if (q->limits.misaligned)
1096 return -1; 1112 return -1;
1097 1113
1098 if (q && q->limits.alignment_offset) 1114 return q->limits.alignment_offset;
1099 return q->limits.alignment_offset;
1100
1101 return 0;
1102} 1115}
1103 1116
1104static inline int queue_sector_alignment_offset(struct request_queue *q, 1117static inline int queue_sector_alignment_offset(struct request_queue *q,
@@ -1108,6 +1121,19 @@ static inline int queue_sector_alignment_offset(struct request_queue *q,
1108 & (q->limits.io_min - 1); 1121 & (q->limits.io_min - 1);
1109} 1122}
1110 1123
1124static inline int bdev_alignment_offset(struct block_device *bdev)
1125{
1126 struct request_queue *q = bdev_get_queue(bdev);
1127
1128 if (q->limits.misaligned)
1129 return -1;
1130
1131 if (bdev != bdev->bd_contains)
1132 return bdev->bd_part->alignment_offset;
1133
1134 return q->limits.alignment_offset;
1135}
1136
1111static inline int queue_dma_alignment(struct request_queue *q) 1137static inline int queue_dma_alignment(struct request_queue *q)
1112{ 1138{
1113 return q ? q->dma_alignment : 511; 1139 return q ? q->dma_alignment : 511;
@@ -1146,7 +1172,11 @@ static inline void put_dev_sector(Sector p)
1146} 1172}
1147 1173
1148struct work_struct; 1174struct work_struct;
1175struct delayed_work;
1149int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1176int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1177int kblockd_schedule_delayed_work(struct request_queue *q,
1178 struct delayed_work *work,
1179 unsigned long delay);
1150 1180
1151#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1181#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1152 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1182 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 7e4350ece0f8..3b73b9992b26 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -198,6 +198,7 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
198 char __user *arg); 198 char __user *arg);
199extern int blk_trace_startstop(struct request_queue *q, int start); 199extern int blk_trace_startstop(struct request_queue *q, int start);
200extern int blk_trace_remove(struct request_queue *q); 200extern int blk_trace_remove(struct request_queue *q);
201extern void blk_trace_remove_sysfs(struct device *dev);
201extern int blk_trace_init_sysfs(struct device *dev); 202extern int blk_trace_init_sysfs(struct device *dev);
202 203
203extern struct attribute_group blk_trace_attr_group; 204extern struct attribute_group blk_trace_attr_group;
@@ -211,6 +212,7 @@ extern struct attribute_group blk_trace_attr_group;
211# define blk_trace_startstop(q, start) (-ENOTTY) 212# define blk_trace_startstop(q, start) (-ENOTTY)
212# define blk_trace_remove(q) (-ENOTTY) 213# define blk_trace_remove(q) (-ENOTTY)
213# define blk_add_trace_msg(q, fmt, ...) do { } while (0) 214# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
215# define blk_trace_remove_sysfs(dev) do { } while (0)
214static inline int blk_trace_init_sysfs(struct device *dev) 216static inline int blk_trace_init_sysfs(struct device *dev)
215{ 217{
216 return 0; 218 return 0;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index b62bb9294d0c..0008dee66514 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -37,7 +37,7 @@ extern void cgroup_exit(struct task_struct *p, int run_callbacks);
37extern int cgroupstats_build(struct cgroupstats *stats, 37extern int cgroupstats_build(struct cgroupstats *stats,
38 struct dentry *dentry); 38 struct dentry *dentry);
39 39
40extern struct file_operations proc_cgroup_operations; 40extern const struct file_operations proc_cgroup_operations;
41 41
42/* Define the enumeration of all cgroup subsystems */ 42/* Define the enumeration of all cgroup subsystems */
43#define SUBSYS(_x) _x ## _subsys_id, 43#define SUBSYS(_x) _x ## _subsys_id,
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 47ebf416f512..3a14615fd35c 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -132,11 +132,8 @@ struct cn_callback_id {
132}; 132};
133 133
134struct cn_callback_data { 134struct cn_callback_data {
135 void (*destruct_data) (void *); 135 struct sk_buff *skb;
136 void *ddata; 136 void (*callback) (struct cn_msg *, struct netlink_skb_parms *);
137
138 void *callback_priv;
139 void (*callback) (struct cn_msg *);
140 137
141 void *free; 138 void *free;
142}; 139};
@@ -167,11 +164,11 @@ struct cn_dev {
167 struct cn_queue_dev *cbdev; 164 struct cn_queue_dev *cbdev;
168}; 165};
169 166
170int cn_add_callback(struct cb_id *, char *, void (*callback) (struct cn_msg *)); 167int cn_add_callback(struct cb_id *, char *, void (*callback) (struct cn_msg *, struct netlink_skb_parms *));
171void cn_del_callback(struct cb_id *); 168void cn_del_callback(struct cb_id *);
172int cn_netlink_send(struct cn_msg *, u32, gfp_t); 169int cn_netlink_send(struct cn_msg *, u32, gfp_t);
173 170
174int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *)); 171int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
175void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); 172void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
176 173
177int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work); 174int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2adaa2529f18..2620a8c63571 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -300,6 +300,10 @@ struct inodes_stat_t {
300#define BLKTRACESTOP _IO(0x12,117) 300#define BLKTRACESTOP _IO(0x12,117)
301#define BLKTRACETEARDOWN _IO(0x12,118) 301#define BLKTRACETEARDOWN _IO(0x12,118)
302#define BLKDISCARD _IO(0x12,119) 302#define BLKDISCARD _IO(0x12,119)
303#define BLKIOMIN _IO(0x12,120)
304#define BLKIOOPT _IO(0x12,121)
305#define BLKALIGNOFF _IO(0x12,122)
306#define BLKPBSZGET _IO(0x12,123)
303 307
304#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ 308#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
305#define FIBMAP _IO(0x00,1) /* bmap access */ 309#define FIBMAP _IO(0x00,1) /* bmap access */
@@ -2446,7 +2450,7 @@ static int __fops ## _open(struct inode *inode, struct file *file) \
2446 __simple_attr_check_format(__fmt, 0ull); \ 2450 __simple_attr_check_format(__fmt, 0ull); \
2447 return simple_attr_open(inode, file, __get, __set, __fmt); \ 2451 return simple_attr_open(inode, file, __get, __set, __fmt); \
2448} \ 2452} \
2449static struct file_operations __fops = { \ 2453static const struct file_operations __fops = { \
2450 .owner = THIS_MODULE, \ 2454 .owner = THIS_MODULE, \
2451 .open = __fops ## _open, \ 2455 .open = __fops ## _open, \
2452 .release = simple_attr_release, \ 2456 .release = simple_attr_release, \
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index cd3d2abaf30a..0b4f97d24d7f 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -241,7 +241,7 @@ extern void ftrace_enable_daemon(void);
241# define ftrace_set_filter(buf, len, reset) do { } while (0) 241# define ftrace_set_filter(buf, len, reset) do { } while (0)
242# define ftrace_disable_daemon() do { } while (0) 242# define ftrace_disable_daemon() do { } while (0)
243# define ftrace_enable_daemon() do { } while (0) 243# define ftrace_enable_daemon() do { } while (0)
244static inline void ftrace_release(void *start, unsigned long size) { } 244static inline void ftrace_release_mod(struct module *mod) {}
245static inline int register_ftrace_command(struct ftrace_func_command *cmd) 245static inline int register_ftrace_command(struct ftrace_func_command *cmd)
246{ 246{
247 return -EINVAL; 247 return -EINVAL;
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 297df45ffd0a..7beaa21b3880 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -98,7 +98,7 @@ struct hd_struct {
98 int make_it_fail; 98 int make_it_fail;
99#endif 99#endif
100 unsigned long stamp; 100 unsigned long stamp;
101 int in_flight[2]; 101 int in_flight;
102#ifdef CONFIG_SMP 102#ifdef CONFIG_SMP
103 struct disk_stats *dkstats; 103 struct disk_stats *dkstats;
104#else 104#else
@@ -322,23 +322,18 @@ static inline void free_part_stats(struct hd_struct *part)
322#define part_stat_sub(cpu, gendiskp, field, subnd) \ 322#define part_stat_sub(cpu, gendiskp, field, subnd) \
323 part_stat_add(cpu, gendiskp, field, -subnd) 323 part_stat_add(cpu, gendiskp, field, -subnd)
324 324
325static inline void part_inc_in_flight(struct hd_struct *part, int rw) 325static inline void part_inc_in_flight(struct hd_struct *part)
326{ 326{
327 part->in_flight[rw]++; 327 part->in_flight++;
328 if (part->partno) 328 if (part->partno)
329 part_to_disk(part)->part0.in_flight[rw]++; 329 part_to_disk(part)->part0.in_flight++;
330} 330}
331 331
332static inline void part_dec_in_flight(struct hd_struct *part, int rw) 332static inline void part_dec_in_flight(struct hd_struct *part)
333{ 333{
334 part->in_flight[rw]--; 334 part->in_flight--;
335 if (part->partno) 335 if (part->partno)
336 part_to_disk(part)->part0.in_flight[rw]--; 336 part_to_disk(part)->part0.in_flight--;
337}
338
339static inline int part_in_flight(struct hd_struct *part)
340{
341 return part->in_flight[0] + part->in_flight[1];
342} 337}
343 338
344/* block/blk-core.c */ 339/* block/blk-core.c */
@@ -551,8 +546,6 @@ extern ssize_t part_size_show(struct device *dev,
551 struct device_attribute *attr, char *buf); 546 struct device_attribute *attr, char *buf);
552extern ssize_t part_stat_show(struct device *dev, 547extern ssize_t part_stat_show(struct device *dev,
553 struct device_attribute *attr, char *buf); 548 struct device_attribute *attr, char *buf);
554extern ssize_t part_inflight_show(struct device *dev,
555 struct device_attribute *attr, char *buf);
556#ifdef CONFIG_FAIL_MAKE_REQUEST 549#ifdef CONFIG_FAIL_MAKE_REQUEST
557extern ssize_t part_fail_show(struct device *dev, 550extern ssize_t part_fail_show(struct device *dev,
558 struct device_attribute *attr, char *buf); 551 struct device_attribute *attr, char *buf);
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 0d45b4e8d367..08bc776d05e2 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -145,14 +145,14 @@ static inline int ip_mroute_opt(int opt)
145#endif 145#endif
146 146
147#ifdef CONFIG_IP_MROUTE 147#ifdef CONFIG_IP_MROUTE
148extern int ip_mroute_setsockopt(struct sock *, int, char __user *, int); 148extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
149extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); 149extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
150extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); 150extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
151extern int ip_mr_init(void); 151extern int ip_mr_init(void);
152#else 152#else
153static inline 153static inline
154int ip_mroute_setsockopt(struct sock *sock, 154int ip_mroute_setsockopt(struct sock *sock,
155 int optname, char __user *optval, int optlen) 155 int optname, char __user *optval, unsigned int optlen)
156{ 156{
157 return -ENOPROTOOPT; 157 return -ENOPROTOOPT;
158} 158}
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 43dc97e32183..b191865a6ca3 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -134,7 +134,7 @@ static inline int ip6_mroute_opt(int opt)
134struct sock; 134struct sock;
135 135
136#ifdef CONFIG_IPV6_MROUTE 136#ifdef CONFIG_IPV6_MROUTE
137extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, int); 137extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
138extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *); 138extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
139extern int ip6_mr_input(struct sk_buff *skb); 139extern int ip6_mr_input(struct sk_buff *skb);
140extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg); 140extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
@@ -143,7 +143,7 @@ extern void ip6_mr_cleanup(void);
143#else 143#else
144static inline 144static inline
145int ip6_mroute_setsockopt(struct sock *sock, 145int ip6_mroute_setsockopt(struct sock *sock,
146 int optname, char __user *optval, int optlen) 146 int optname, char __user *optval, unsigned int optlen)
147{ 147{
148 return -ENOPROTOOPT; 148 return -ENOPROTOOPT;
149} 149}
diff --git a/include/linux/net.h b/include/linux/net.h
index 9040a10584f7..529a0931711d 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -178,11 +178,11 @@ struct proto_ops {
178 int (*listen) (struct socket *sock, int len); 178 int (*listen) (struct socket *sock, int len);
179 int (*shutdown) (struct socket *sock, int flags); 179 int (*shutdown) (struct socket *sock, int flags);
180 int (*setsockopt)(struct socket *sock, int level, 180 int (*setsockopt)(struct socket *sock, int level,
181 int optname, char __user *optval, int optlen); 181 int optname, char __user *optval, unsigned int optlen);
182 int (*getsockopt)(struct socket *sock, int level, 182 int (*getsockopt)(struct socket *sock, int level,
183 int optname, char __user *optval, int __user *optlen); 183 int optname, char __user *optval, int __user *optlen);
184 int (*compat_setsockopt)(struct socket *sock, int level, 184 int (*compat_setsockopt)(struct socket *sock, int level,
185 int optname, char __user *optval, int optlen); 185 int optname, char __user *optval, unsigned int optlen);
186 int (*compat_getsockopt)(struct socket *sock, int level, 186 int (*compat_getsockopt)(struct socket *sock, int level,
187 int optname, char __user *optval, int __user *optlen); 187 int optname, char __user *optval, int __user *optlen);
188 int (*sendmsg) (struct kiocb *iocb, struct socket *sock, 188 int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
@@ -256,7 +256,7 @@ extern int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
256extern int kernel_getsockopt(struct socket *sock, int level, int optname, 256extern int kernel_getsockopt(struct socket *sock, int level, int optname,
257 char *optval, int *optlen); 257 char *optval, int *optlen);
258extern int kernel_setsockopt(struct socket *sock, int level, int optname, 258extern int kernel_setsockopt(struct socket *sock, int level, int optname,
259 char *optval, int optlen); 259 char *optval, unsigned int optlen);
260extern int kernel_sendpage(struct socket *sock, struct page *page, int offset, 260extern int kernel_sendpage(struct socket *sock, struct page *page, int offset,
261 size_t size, int flags); 261 size_t size, int flags);
262extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); 262extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
@@ -313,7 +313,7 @@ SOCKCALL_WRAP(name, compat_ioctl, (struct socket *sock, unsigned int cmd, \
313SOCKCALL_WRAP(name, listen, (struct socket *sock, int len), (sock, len)) \ 313SOCKCALL_WRAP(name, listen, (struct socket *sock, int len), (sock, len)) \
314SOCKCALL_WRAP(name, shutdown, (struct socket *sock, int flags), (sock, flags)) \ 314SOCKCALL_WRAP(name, shutdown, (struct socket *sock, int flags), (sock, flags)) \
315SOCKCALL_WRAP(name, setsockopt, (struct socket *sock, int level, int optname, \ 315SOCKCALL_WRAP(name, setsockopt, (struct socket *sock, int level, int optname, \
316 char __user *optval, int optlen), (sock, level, optname, optval, optlen)) \ 316 char __user *optval, unsigned int optlen), (sock, level, optname, optval, optlen)) \
317SOCKCALL_WRAP(name, getsockopt, (struct socket *sock, int level, int optname, \ 317SOCKCALL_WRAP(name, getsockopt, (struct socket *sock, int level, int optname, \
318 char __user *optval, int __user *optlen), (sock, level, optname, optval, optlen)) \ 318 char __user *optval, int __user *optlen), (sock, level, optname, optval, optlen)) \
319SOCKCALL_WRAP(name, sendmsg, (struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len), \ 319SOCKCALL_WRAP(name, sendmsg, (struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len), \
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 48cfe51bfddc..6132b5e6d9d3 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -221,12 +221,12 @@ __ret;})
221 221
222/* Call setsockopt() */ 222/* Call setsockopt() */
223int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, 223int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
224 int len); 224 unsigned int len);
225int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, 225int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
226 int *len); 226 int *len);
227 227
228int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, 228int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
229 char __user *opt, int len); 229 char __user *opt, unsigned int len);
230int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, 230int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
231 char __user *opt, int *len); 231 char __user *opt, int *len);
232 232
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 3a9d36d1e92a..2e6d95f97419 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -442,6 +442,7 @@ enum perf_callchain_context {
442#include <linux/hrtimer.h> 442#include <linux/hrtimer.h>
443#include <linux/fs.h> 443#include <linux/fs.h>
444#include <linux/pid_namespace.h> 444#include <linux/pid_namespace.h>
445#include <linux/workqueue.h>
445#include <asm/atomic.h> 446#include <asm/atomic.h>
446 447
447#define PERF_MAX_STACK_DEPTH 255 448#define PERF_MAX_STACK_DEPTH 255
@@ -513,6 +514,10 @@ struct file;
513 514
514struct perf_mmap_data { 515struct perf_mmap_data {
515 struct rcu_head rcu_head; 516 struct rcu_head rcu_head;
517#ifdef CONFIG_PERF_USE_VMALLOC
518 struct work_struct work;
519#endif
520 int data_order;
516 int nr_pages; /* nr of data pages */ 521 int nr_pages; /* nr of data pages */
517 int writable; /* are we writable */ 522 int writable; /* are we writable */
518 int nr_locked; /* nr pages mlocked */ 523 int nr_locked; /* nr pages mlocked */
diff --git a/include/linux/poll.h b/include/linux/poll.h
index fa287f25138d..6673743946f7 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -6,10 +6,10 @@
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7 7
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9#include <linux/ktime.h>
9#include <linux/wait.h> 10#include <linux/wait.h>
10#include <linux/string.h> 11#include <linux/string.h>
11#include <linux/fs.h> 12#include <linux/fs.h>
12#include <linux/sched.h>
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14 14
15/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating 15/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 731af71cddc9..fcb9884df618 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -114,8 +114,7 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent);
114int __must_check res_counter_charge_locked(struct res_counter *counter, 114int __must_check res_counter_charge_locked(struct res_counter *counter,
115 unsigned long val); 115 unsigned long val);
116int __must_check res_counter_charge(struct res_counter *counter, 116int __must_check res_counter_charge(struct res_counter *counter,
117 unsigned long val, struct res_counter **limit_fail_at, 117 unsigned long val, struct res_counter **limit_fail_at);
118 struct res_counter **soft_limit_at);
119 118
120/* 119/*
121 * uncharge - tell that some portion of the resource is released 120 * uncharge - tell that some portion of the resource is released
@@ -128,8 +127,7 @@ int __must_check res_counter_charge(struct res_counter *counter,
128 */ 127 */
129 128
130void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); 129void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
131void res_counter_uncharge(struct res_counter *counter, unsigned long val, 130void res_counter_uncharge(struct res_counter *counter, unsigned long val);
132 bool *was_soft_limit_excess);
133 131
134static inline bool res_counter_limit_check_locked(struct res_counter *cnt) 132static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
135{ 133{
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 3b461dffe244..3273a0c5043b 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -16,7 +16,7 @@ struct __kernel_sockaddr_storage {
16 /* _SS_MAXSIZE value minus size of ss_family */ 16 /* _SS_MAXSIZE value minus size of ss_family */
17} __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */ 17} __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */
18 18
19#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) 19#ifdef __KERNEL__
20 20
21#include <asm/socket.h> /* arch-dependent defines */ 21#include <asm/socket.h> /* arch-dependent defines */
22#include <linux/sockios.h> /* the SIOCxxx I/O controls */ 22#include <linux/sockios.h> /* the SIOCxxx I/O controls */
@@ -101,21 +101,6 @@ struct cmsghdr {
101 ((char *)(cmsg) - (char *)(mhdr)->msg_control))) 101 ((char *)(cmsg) - (char *)(mhdr)->msg_control)))
102 102
103/* 103/*
104 * This mess will go away with glibc
105 */
106
107#ifdef __KERNEL__
108#define __KINLINE static inline
109#elif defined(__GNUC__)
110#define __KINLINE static __inline__
111#elif defined(__cplusplus)
112#define __KINLINE static inline
113#else
114#define __KINLINE static
115#endif
116
117
118/*
119 * Get the next cmsg header 104 * Get the next cmsg header
120 * 105 *
121 * PLEASE, do not touch this function. If you think, that it is 106 * PLEASE, do not touch this function. If you think, that it is
@@ -128,7 +113,7 @@ struct cmsghdr {
128 * ancillary object DATA. --ANK (980731) 113 * ancillary object DATA. --ANK (980731)
129 */ 114 */
130 115
131__KINLINE struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, 116static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
132 struct cmsghdr *__cmsg) 117 struct cmsghdr *__cmsg)
133{ 118{
134 struct cmsghdr * __ptr; 119 struct cmsghdr * __ptr;
@@ -140,7 +125,7 @@ __KINLINE struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
140 return __ptr; 125 return __ptr;
141} 126}
142 127
143__KINLINE struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg) 128static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg)
144{ 129{
145 return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg); 130 return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
146} 131}
diff --git a/include/net/compat.h b/include/net/compat.h
index 5bbf8bf9efea..7c3002832d05 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -40,8 +40,8 @@ extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
40 40
41extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int); 41extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int);
42 42
43extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, int, 43extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int,
44 int (*)(struct sock *, int, int, char __user *, int)); 44 int (*)(struct sock *, int, int, char __user *, unsigned int));
45extern int compat_mc_getsockopt(struct sock *, int, int, char __user *, 45extern int compat_mc_getsockopt(struct sock *, int, int, char __user *,
46 int __user *, int (*)(struct sock *, int, int, char __user *, 46 int __user *, int (*)(struct sock *, int, int, char __user *,
47 int __user *)); 47 int __user *));
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 03cffd9f64e3..696d6e4ce68a 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -48,13 +48,13 @@ struct inet_connection_sock_af_ops {
48 u16 net_header_len; 48 u16 net_header_len;
49 u16 sockaddr_len; 49 u16 sockaddr_len;
50 int (*setsockopt)(struct sock *sk, int level, int optname, 50 int (*setsockopt)(struct sock *sk, int level, int optname,
51 char __user *optval, int optlen); 51 char __user *optval, unsigned int optlen);
52 int (*getsockopt)(struct sock *sk, int level, int optname, 52 int (*getsockopt)(struct sock *sk, int level, int optname,
53 char __user *optval, int __user *optlen); 53 char __user *optval, int __user *optlen);
54#ifdef CONFIG_COMPAT 54#ifdef CONFIG_COMPAT
55 int (*compat_setsockopt)(struct sock *sk, 55 int (*compat_setsockopt)(struct sock *sk,
56 int level, int optname, 56 int level, int optname,
57 char __user *optval, int optlen); 57 char __user *optval, unsigned int optlen);
58 int (*compat_getsockopt)(struct sock *sk, 58 int (*compat_getsockopt)(struct sock *sk,
59 int level, int optname, 59 int level, int optname,
60 char __user *optval, int __user *optlen); 60 char __user *optval, int __user *optlen);
@@ -332,5 +332,5 @@ extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
332extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, 332extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
333 char __user *optval, int __user *optlen); 333 char __user *optval, int __user *optlen);
334extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, 334extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
335 char __user *optval, int optlen); 335 char __user *optval, unsigned int optlen);
336#endif /* _INET_CONNECTION_SOCK_H */ 336#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 5b26a0bd178e..2f47e5482b55 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -381,10 +381,10 @@ extern int ip_options_rcv_srr(struct sk_buff *skb);
381extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb); 381extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
382extern int ip_cmsg_send(struct net *net, 382extern int ip_cmsg_send(struct net *net,
383 struct msghdr *msg, struct ipcm_cookie *ipc); 383 struct msghdr *msg, struct ipcm_cookie *ipc);
384extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen); 384extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen);
385extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); 385extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen);
386extern int compat_ip_setsockopt(struct sock *sk, int level, 386extern int compat_ip_setsockopt(struct sock *sk, int level,
387 int optname, char __user *optval, int optlen); 387 int optname, char __user *optval, unsigned int optlen);
388extern int compat_ip_getsockopt(struct sock *sk, int level, 388extern int compat_ip_getsockopt(struct sock *sk, int level,
389 int optname, char __user *optval, int __user *optlen); 389 int optname, char __user *optval, int __user *optlen);
390extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)); 390extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index ad9a51130254..8c31d8a0c1fe 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -550,7 +550,7 @@ extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
550extern int ipv6_setsockopt(struct sock *sk, int level, 550extern int ipv6_setsockopt(struct sock *sk, int level,
551 int optname, 551 int optname,
552 char __user *optval, 552 char __user *optval,
553 int optlen); 553 unsigned int optlen);
554extern int ipv6_getsockopt(struct sock *sk, int level, 554extern int ipv6_getsockopt(struct sock *sk, int level,
555 int optname, 555 int optname,
556 char __user *optval, 556 char __user *optval,
@@ -559,7 +559,7 @@ extern int compat_ipv6_setsockopt(struct sock *sk,
559 int level, 559 int level,
560 int optname, 560 int optname,
561 char __user *optval, 561 char __user *optval,
562 int optlen); 562 unsigned int optlen);
563extern int compat_ipv6_getsockopt(struct sock *sk, 563extern int compat_ipv6_getsockopt(struct sock *sk,
564 int level, 564 int level,
565 int optname, 565 int optname,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 42d00ced5eb8..6e5f0e0c7967 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -544,7 +544,7 @@ struct sctp_af {
544 int level, 544 int level,
545 int optname, 545 int optname,
546 char __user *optval, 546 char __user *optval,
547 int optlen); 547 unsigned int optlen);
548 int (*getsockopt) (struct sock *sk, 548 int (*getsockopt) (struct sock *sk,
549 int level, 549 int level,
550 int optname, 550 int optname,
@@ -554,7 +554,7 @@ struct sctp_af {
554 int level, 554 int level,
555 int optname, 555 int optname,
556 char __user *optval, 556 char __user *optval,
557 int optlen); 557 unsigned int optlen);
558 int (*compat_getsockopt) (struct sock *sk, 558 int (*compat_getsockopt) (struct sock *sk,
559 int level, 559 int level,
560 int optname, 560 int optname,
diff --git a/include/net/sock.h b/include/net/sock.h
index 950409dcec3d..1621935aad5b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -624,7 +624,7 @@ struct proto {
624 void (*shutdown)(struct sock *sk, int how); 624 void (*shutdown)(struct sock *sk, int how);
625 int (*setsockopt)(struct sock *sk, int level, 625 int (*setsockopt)(struct sock *sk, int level,
626 int optname, char __user *optval, 626 int optname, char __user *optval,
627 int optlen); 627 unsigned int optlen);
628 int (*getsockopt)(struct sock *sk, int level, 628 int (*getsockopt)(struct sock *sk, int level,
629 int optname, char __user *optval, 629 int optname, char __user *optval,
630 int __user *option); 630 int __user *option);
@@ -632,7 +632,7 @@ struct proto {
632 int (*compat_setsockopt)(struct sock *sk, 632 int (*compat_setsockopt)(struct sock *sk,
633 int level, 633 int level,
634 int optname, char __user *optval, 634 int optname, char __user *optval,
635 int optlen); 635 unsigned int optlen);
636 int (*compat_getsockopt)(struct sock *sk, 636 int (*compat_getsockopt)(struct sock *sk,
637 int level, 637 int level,
638 int optname, char __user *optval, 638 int optname, char __user *optval,
@@ -951,7 +951,7 @@ extern void sock_rfree(struct sk_buff *skb);
951 951
952extern int sock_setsockopt(struct socket *sock, int level, 952extern int sock_setsockopt(struct socket *sock, int level,
953 int op, char __user *optval, 953 int op, char __user *optval,
954 int optlen); 954 unsigned int optlen);
955 955
956extern int sock_getsockopt(struct socket *sock, int level, 956extern int sock_getsockopt(struct socket *sock, int level,
957 int op, char __user *optval, 957 int op, char __user *optval,
@@ -993,7 +993,7 @@ extern int sock_no_shutdown(struct socket *, int);
993extern int sock_no_getsockopt(struct socket *, int , int, 993extern int sock_no_getsockopt(struct socket *, int , int,
994 char __user *, int __user *); 994 char __user *, int __user *);
995extern int sock_no_setsockopt(struct socket *, int, int, 995extern int sock_no_setsockopt(struct socket *, int, int,
996 char __user *, int); 996 char __user *, unsigned int);
997extern int sock_no_sendmsg(struct kiocb *, struct socket *, 997extern int sock_no_sendmsg(struct kiocb *, struct socket *,
998 struct msghdr *, size_t); 998 struct msghdr *, size_t);
999extern int sock_no_recvmsg(struct kiocb *, struct socket *, 999extern int sock_no_recvmsg(struct kiocb *, struct socket *,
@@ -1015,11 +1015,11 @@ extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
1015extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 1015extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1016 struct msghdr *msg, size_t size, int flags); 1016 struct msghdr *msg, size_t size, int flags);
1017extern int sock_common_setsockopt(struct socket *sock, int level, int optname, 1017extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
1018 char __user *optval, int optlen); 1018 char __user *optval, unsigned int optlen);
1019extern int compat_sock_common_getsockopt(struct socket *sock, int level, 1019extern int compat_sock_common_getsockopt(struct socket *sock, int level,
1020 int optname, char __user *optval, int __user *optlen); 1020 int optname, char __user *optval, int __user *optlen);
1021extern int compat_sock_common_setsockopt(struct socket *sock, int level, 1021extern int compat_sock_common_setsockopt(struct socket *sock, int level,
1022 int optname, char __user *optval, int optlen); 1022 int optname, char __user *optval, unsigned int optlen);
1023 1023
1024extern void sk_common_release(struct sock *sk); 1024extern void sk_common_release(struct sock *sk);
1025 1025
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 56b76027b85e..03a49c703377 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -394,13 +394,13 @@ extern int tcp_getsockopt(struct sock *sk, int level,
394 int __user *optlen); 394 int __user *optlen);
395extern int tcp_setsockopt(struct sock *sk, int level, 395extern int tcp_setsockopt(struct sock *sk, int level,
396 int optname, char __user *optval, 396 int optname, char __user *optval,
397 int optlen); 397 unsigned int optlen);
398extern int compat_tcp_getsockopt(struct sock *sk, 398extern int compat_tcp_getsockopt(struct sock *sk,
399 int level, int optname, 399 int level, int optname,
400 char __user *optval, int __user *optlen); 400 char __user *optval, int __user *optlen);
401extern int compat_tcp_setsockopt(struct sock *sk, 401extern int compat_tcp_setsockopt(struct sock *sk,
402 int level, int optname, 402 int level, int optname,
403 char __user *optval, int optlen); 403 char __user *optval, unsigned int optlen);
404extern void tcp_set_keepalive(struct sock *sk, int val); 404extern void tcp_set_keepalive(struct sock *sk, int val);
405extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, 405extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk,
406 struct msghdr *msg, 406 struct msghdr *msg,
diff --git a/include/net/udp.h b/include/net/udp.h
index 5fb029f817a3..f98abd2ce709 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -144,7 +144,7 @@ extern unsigned int udp_poll(struct file *file, struct socket *sock,
144extern int udp_lib_getsockopt(struct sock *sk, int level, int optname, 144extern int udp_lib_getsockopt(struct sock *sk, int level, int optname,
145 char __user *optval, int __user *optlen); 145 char __user *optval, int __user *optlen);
146extern int udp_lib_setsockopt(struct sock *sk, int level, int optname, 146extern int udp_lib_setsockopt(struct sock *sk, int level, int optname,
147 char __user *optval, int optlen, 147 char __user *optval, unsigned int optlen,
148 int (*push_pending_frames)(struct sock *)); 148 int (*push_pending_frames)(struct sock *));
149 149
150extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, 150extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index d86af94691c2..00405b5f624a 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -488,6 +488,39 @@ TRACE_EVENT(block_remap,
488 (unsigned long long)__entry->old_sector) 488 (unsigned long long)__entry->old_sector)
489); 489);
490 490
491TRACE_EVENT(block_rq_remap,
492
493 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
494 sector_t from),
495
496 TP_ARGS(q, rq, dev, from),
497
498 TP_STRUCT__entry(
499 __field( dev_t, dev )
500 __field( sector_t, sector )
501 __field( unsigned int, nr_sector )
502 __field( dev_t, old_dev )
503 __field( sector_t, old_sector )
504 __array( char, rwbs, 6 )
505 ),
506
507 TP_fast_assign(
508 __entry->dev = disk_devt(rq->rq_disk);
509 __entry->sector = blk_rq_pos(rq);
510 __entry->nr_sector = blk_rq_sectors(rq);
511 __entry->old_dev = dev;
512 __entry->old_sector = from;
513 blk_fill_rwbs_rq(__entry->rwbs, rq);
514 ),
515
516 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
517 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
518 (unsigned long long)__entry->sector,
519 __entry->nr_sector,
520 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
521 (unsigned long long)__entry->old_sector)
522);
523
491#endif /* _TRACE_BLOCK_H */ 524#endif /* _TRACE_BLOCK_H */
492 525
493/* This part must be outside protection */ 526/* This part must be outside protection */
diff --git a/init/Kconfig b/init/Kconfig
index c7bac39d6c61..09c5c6431f42 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -921,6 +921,11 @@ config HAVE_PERF_EVENTS
921 help 921 help
922 See tools/perf/design.txt for details. 922 See tools/perf/design.txt for details.
923 923
924config PERF_USE_VMALLOC
925 bool
926 help
927 See tools/perf/design.txt for details
928
924menu "Kernel Performance Events And Counters" 929menu "Kernel Performance Events And Counters"
925 930
926config PERF_EVENTS 931config PERF_EVENTS
@@ -976,6 +981,19 @@ config PERF_COUNTERS
976 981
977 Say N if unsure. 982 Say N if unsure.
978 983
984config DEBUG_PERF_USE_VMALLOC
985 default n
986 bool "Debug: use vmalloc to back perf mmap() buffers"
987 depends on PERF_EVENTS && DEBUG_KERNEL
988 select PERF_USE_VMALLOC
989 help
990 Use vmalloc memory to back perf mmap() buffers.
991
992 Mostly useful for debugging the vmalloc code on platforms
993 that don't require it.
994
995 Say N if unsure.
996
979endmenu 997endmenu
980 998
981config VM_EVENT_COUNTERS 999config VM_EVENT_COUNTERS
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 7ccba4bc5e3b..ca83b73fba19 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -703,7 +703,7 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
703static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); 703static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
704static int cgroup_populate_dir(struct cgroup *cgrp); 704static int cgroup_populate_dir(struct cgroup *cgrp);
705static const struct inode_operations cgroup_dir_inode_operations; 705static const struct inode_operations cgroup_dir_inode_operations;
706static struct file_operations proc_cgroupstats_operations; 706static const struct file_operations proc_cgroupstats_operations;
707 707
708static struct backing_dev_info cgroup_backing_dev_info = { 708static struct backing_dev_info cgroup_backing_dev_info = {
709 .name = "cgroup", 709 .name = "cgroup",
@@ -1863,7 +1863,7 @@ static int cgroup_seqfile_release(struct inode *inode, struct file *file)
1863 return single_release(inode, file); 1863 return single_release(inode, file);
1864} 1864}
1865 1865
1866static struct file_operations cgroup_seqfile_operations = { 1866static const struct file_operations cgroup_seqfile_operations = {
1867 .read = seq_read, 1867 .read = seq_read,
1868 .write = cgroup_file_write, 1868 .write = cgroup_file_write,
1869 .llseek = seq_lseek, 1869 .llseek = seq_lseek,
@@ -1922,7 +1922,7 @@ static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
1922 return simple_rename(old_dir, old_dentry, new_dir, new_dentry); 1922 return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
1923} 1923}
1924 1924
1925static struct file_operations cgroup_file_operations = { 1925static const struct file_operations cgroup_file_operations = {
1926 .read = cgroup_file_read, 1926 .read = cgroup_file_read,
1927 .write = cgroup_file_write, 1927 .write = cgroup_file_write,
1928 .llseek = generic_file_llseek, 1928 .llseek = generic_file_llseek,
@@ -3369,7 +3369,7 @@ static int cgroup_open(struct inode *inode, struct file *file)
3369 return single_open(file, proc_cgroup_show, pid); 3369 return single_open(file, proc_cgroup_show, pid);
3370} 3370}
3371 3371
3372struct file_operations proc_cgroup_operations = { 3372const struct file_operations proc_cgroup_operations = {
3373 .open = cgroup_open, 3373 .open = cgroup_open,
3374 .read = seq_read, 3374 .read = seq_read,
3375 .llseek = seq_lseek, 3375 .llseek = seq_lseek,
@@ -3398,7 +3398,7 @@ static int cgroupstats_open(struct inode *inode, struct file *file)
3398 return single_open(file, proc_cgroupstats_show, NULL); 3398 return single_open(file, proc_cgroupstats_show, NULL);
3399} 3399}
3400 3400
3401static struct file_operations proc_cgroupstats_operations = { 3401static const struct file_operations proc_cgroupstats_operations = {
3402 .open = cgroupstats_open, 3402 .open = cgroupstats_open,
3403 .read = seq_read, 3403 .read = seq_read,
3404 .llseek = seq_lseek, 3404 .llseek = seq_lseek,
@@ -3708,8 +3708,10 @@ static void check_for_release(struct cgroup *cgrp)
3708void __css_put(struct cgroup_subsys_state *css) 3708void __css_put(struct cgroup_subsys_state *css)
3709{ 3709{
3710 struct cgroup *cgrp = css->cgroup; 3710 struct cgroup *cgrp = css->cgroup;
3711 int val;
3711 rcu_read_lock(); 3712 rcu_read_lock();
3712 if (atomic_dec_return(&css->refcnt) == 1) { 3713 val = atomic_dec_return(&css->refcnt);
3714 if (val == 1) {
3713 if (notify_on_release(cgrp)) { 3715 if (notify_on_release(cgrp)) {
3714 set_bit(CGRP_RELEASABLE, &cgrp->flags); 3716 set_bit(CGRP_RELEASABLE, &cgrp->flags);
3715 check_for_release(cgrp); 3717 check_for_release(cgrp);
@@ -3717,6 +3719,7 @@ void __css_put(struct cgroup_subsys_state *css)
3717 cgroup_wakeup_rmdir_waiter(cgrp); 3719 cgroup_wakeup_rmdir_waiter(cgrp);
3718 } 3720 }
3719 rcu_read_unlock(); 3721 rcu_read_unlock();
3722 WARN_ON_ONCE(val < 1);
3720} 3723}
3721 3724
3722/* 3725/*
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 6d7020490f94..3e1c36e7998f 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -726,8 +726,6 @@ static int hrtimer_switch_to_hres(void)
726 /* "Retrigger" the interrupt to get things going */ 726 /* "Retrigger" the interrupt to get things going */
727 retrigger_next_event(NULL); 727 retrigger_next_event(NULL);
728 local_irq_restore(flags); 728 local_irq_restore(flags);
729 printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
730 smp_processor_id());
731 return 1; 729 return 1;
732} 730}
733 731
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index cfadc1291d0b..5240d75f4c60 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1333,7 +1333,7 @@ static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
1333 return seq_open(filp, &kprobes_seq_ops); 1333 return seq_open(filp, &kprobes_seq_ops);
1334} 1334}
1335 1335
1336static struct file_operations debugfs_kprobes_operations = { 1336static const struct file_operations debugfs_kprobes_operations = {
1337 .open = kprobes_open, 1337 .open = kprobes_open,
1338 .read = seq_read, 1338 .read = seq_read,
1339 .llseek = seq_lseek, 1339 .llseek = seq_lseek,
@@ -1515,7 +1515,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
1515 return count; 1515 return count;
1516} 1516}
1517 1517
1518static struct file_operations fops_kp = { 1518static const struct file_operations fops_kp = {
1519 .read = read_enabled_file_bool, 1519 .read = read_enabled_file_bool,
1520 .write = write_enabled_file_bool, 1520 .write = write_enabled_file_bool,
1521}; 1521};
diff --git a/kernel/module.c b/kernel/module.c
index fe748a86d452..8b7d8805819d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1992,12 +1992,14 @@ static inline unsigned long layout_symtab(struct module *mod,
1992 Elf_Shdr *sechdrs, 1992 Elf_Shdr *sechdrs,
1993 unsigned int symindex, 1993 unsigned int symindex,
1994 unsigned int strindex, 1994 unsigned int strindex,
1995 const Elf_Hdr *hdr, 1995 const Elf_Ehdr *hdr,
1996 const char *secstrings, 1996 const char *secstrings,
1997 unsigned long *pstroffs, 1997 unsigned long *pstroffs,
1998 unsigned long *strmap) 1998 unsigned long *strmap)
1999{ 1999{
2000 return 0;
2000} 2001}
2002
2001static inline void add_kallsyms(struct module *mod, 2003static inline void add_kallsyms(struct module *mod,
2002 Elf_Shdr *sechdrs, 2004 Elf_Shdr *sechdrs,
2003 unsigned int shnum, 2005 unsigned int shnum,
@@ -2081,9 +2083,8 @@ static noinline struct module *load_module(void __user *umod,
2081 struct module *mod; 2083 struct module *mod;
2082 long err = 0; 2084 long err = 0;
2083 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ 2085 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
2084#ifdef CONFIG_KALLSYMS
2085 unsigned long symoffs, stroffs, *strmap; 2086 unsigned long symoffs, stroffs, *strmap;
2086#endif 2087
2087 mm_segment_t old_fs; 2088 mm_segment_t old_fs;
2088 2089
2089 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", 2090 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 0f86feb6db0c..9d0b5c665883 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -20,6 +20,7 @@
20#include <linux/percpu.h> 20#include <linux/percpu.h>
21#include <linux/ptrace.h> 21#include <linux/ptrace.h>
22#include <linux/vmstat.h> 22#include <linux/vmstat.h>
23#include <linux/vmalloc.h>
23#include <linux/hardirq.h> 24#include <linux/hardirq.h>
24#include <linux/rculist.h> 25#include <linux/rculist.h>
25#include <linux/uaccess.h> 26#include <linux/uaccess.h>
@@ -1030,14 +1031,10 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1030 update_context_time(ctx); 1031 update_context_time(ctx);
1031 1032
1032 perf_disable(); 1033 perf_disable();
1033 if (ctx->nr_active) { 1034 if (ctx->nr_active)
1034 list_for_each_entry(event, &ctx->group_list, group_entry) { 1035 list_for_each_entry(event, &ctx->group_list, group_entry)
1035 if (event != event->group_leader) 1036 group_sched_out(event, cpuctx, ctx);
1036 event_sched_out(event, cpuctx, ctx); 1037
1037 else
1038 group_sched_out(event, cpuctx, ctx);
1039 }
1040 }
1041 perf_enable(); 1038 perf_enable();
1042 out: 1039 out:
1043 spin_unlock(&ctx->lock); 1040 spin_unlock(&ctx->lock);
@@ -1258,12 +1255,8 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1258 if (event->cpu != -1 && event->cpu != cpu) 1255 if (event->cpu != -1 && event->cpu != cpu)
1259 continue; 1256 continue;
1260 1257
1261 if (event != event->group_leader) 1258 if (group_can_go_on(event, cpuctx, 1))
1262 event_sched_in(event, cpuctx, ctx, cpu); 1259 group_sched_in(event, cpuctx, ctx, cpu);
1263 else {
1264 if (group_can_go_on(event, cpuctx, 1))
1265 group_sched_in(event, cpuctx, ctx, cpu);
1266 }
1267 1260
1268 /* 1261 /*
1269 * If this pinned group hasn't been scheduled, 1262 * If this pinned group hasn't been scheduled,
@@ -1291,15 +1284,9 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1291 if (event->cpu != -1 && event->cpu != cpu) 1284 if (event->cpu != -1 && event->cpu != cpu)
1292 continue; 1285 continue;
1293 1286
1294 if (event != event->group_leader) { 1287 if (group_can_go_on(event, cpuctx, can_add_hw))
1295 if (event_sched_in(event, cpuctx, ctx, cpu)) 1288 if (group_sched_in(event, cpuctx, ctx, cpu))
1296 can_add_hw = 0; 1289 can_add_hw = 0;
1297 } else {
1298 if (group_can_go_on(event, cpuctx, can_add_hw)) {
1299 if (group_sched_in(event, cpuctx, ctx, cpu))
1300 can_add_hw = 0;
1301 }
1302 }
1303 } 1290 }
1304 perf_enable(); 1291 perf_enable();
1305 out: 1292 out:
@@ -2105,49 +2092,31 @@ unlock:
2105 rcu_read_unlock(); 2092 rcu_read_unlock();
2106} 2093}
2107 2094
2108static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2095static unsigned long perf_data_size(struct perf_mmap_data *data)
2109{ 2096{
2110 struct perf_event *event = vma->vm_file->private_data; 2097 return data->nr_pages << (PAGE_SHIFT + data->data_order);
2111 struct perf_mmap_data *data; 2098}
2112 int ret = VM_FAULT_SIGBUS;
2113
2114 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2115 if (vmf->pgoff == 0)
2116 ret = 0;
2117 return ret;
2118 }
2119
2120 rcu_read_lock();
2121 data = rcu_dereference(event->data);
2122 if (!data)
2123 goto unlock;
2124
2125 if (vmf->pgoff == 0) {
2126 vmf->page = virt_to_page(data->user_page);
2127 } else {
2128 int nr = vmf->pgoff - 1;
2129
2130 if ((unsigned)nr > data->nr_pages)
2131 goto unlock;
2132 2099
2133 if (vmf->flags & FAULT_FLAG_WRITE) 2100#ifndef CONFIG_PERF_USE_VMALLOC
2134 goto unlock;
2135 2101
2136 vmf->page = virt_to_page(data->data_pages[nr]); 2102/*
2137 } 2103 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2104 */
2138 2105
2139 get_page(vmf->page); 2106static struct page *
2140 vmf->page->mapping = vma->vm_file->f_mapping; 2107perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2141 vmf->page->index = vmf->pgoff; 2108{
2109 if (pgoff > data->nr_pages)
2110 return NULL;
2142 2111
2143 ret = 0; 2112 if (pgoff == 0)
2144unlock: 2113 return virt_to_page(data->user_page);
2145 rcu_read_unlock();
2146 2114
2147 return ret; 2115 return virt_to_page(data->data_pages[pgoff - 1]);
2148} 2116}
2149 2117
2150static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages) 2118static struct perf_mmap_data *
2119perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2151{ 2120{
2152 struct perf_mmap_data *data; 2121 struct perf_mmap_data *data;
2153 unsigned long size; 2122 unsigned long size;
@@ -2172,19 +2141,10 @@ static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2172 goto fail_data_pages; 2141 goto fail_data_pages;
2173 } 2142 }
2174 2143
2144 data->data_order = 0;
2175 data->nr_pages = nr_pages; 2145 data->nr_pages = nr_pages;
2176 atomic_set(&data->lock, -1);
2177
2178 if (event->attr.watermark) {
2179 data->watermark = min_t(long, PAGE_SIZE * nr_pages,
2180 event->attr.wakeup_watermark);
2181 }
2182 if (!data->watermark)
2183 data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4);
2184 2146
2185 rcu_assign_pointer(event->data, data); 2147 return data;
2186
2187 return 0;
2188 2148
2189fail_data_pages: 2149fail_data_pages:
2190 for (i--; i >= 0; i--) 2150 for (i--; i >= 0; i--)
@@ -2196,7 +2156,7 @@ fail_user_page:
2196 kfree(data); 2156 kfree(data);
2197 2157
2198fail: 2158fail:
2199 return -ENOMEM; 2159 return NULL;
2200} 2160}
2201 2161
2202static void perf_mmap_free_page(unsigned long addr) 2162static void perf_mmap_free_page(unsigned long addr)
@@ -2207,28 +2167,169 @@ static void perf_mmap_free_page(unsigned long addr)
2207 __free_page(page); 2167 __free_page(page);
2208} 2168}
2209 2169
2210static void __perf_mmap_data_free(struct rcu_head *rcu_head) 2170static void perf_mmap_data_free(struct perf_mmap_data *data)
2211{ 2171{
2212 struct perf_mmap_data *data;
2213 int i; 2172 int i;
2214 2173
2215 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2216
2217 perf_mmap_free_page((unsigned long)data->user_page); 2174 perf_mmap_free_page((unsigned long)data->user_page);
2218 for (i = 0; i < data->nr_pages; i++) 2175 for (i = 0; i < data->nr_pages; i++)
2219 perf_mmap_free_page((unsigned long)data->data_pages[i]); 2176 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2177}
2178
2179#else
2180
2181/*
2182 * Back perf_mmap() with vmalloc memory.
2183 *
2184 * Required for architectures that have d-cache aliasing issues.
2185 */
2186
2187static struct page *
2188perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2189{
2190 if (pgoff > (1UL << data->data_order))
2191 return NULL;
2192
2193 return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE);
2194}
2195
2196static void perf_mmap_unmark_page(void *addr)
2197{
2198 struct page *page = vmalloc_to_page(addr);
2199
2200 page->mapping = NULL;
2201}
2202
2203static void perf_mmap_data_free_work(struct work_struct *work)
2204{
2205 struct perf_mmap_data *data;
2206 void *base;
2207 int i, nr;
2208
2209 data = container_of(work, struct perf_mmap_data, work);
2210 nr = 1 << data->data_order;
2211
2212 base = data->user_page;
2213 for (i = 0; i < nr + 1; i++)
2214 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2215
2216 vfree(base);
2217}
2220 2218
2219static void perf_mmap_data_free(struct perf_mmap_data *data)
2220{
2221 schedule_work(&data->work);
2222}
2223
2224static struct perf_mmap_data *
2225perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2226{
2227 struct perf_mmap_data *data;
2228 unsigned long size;
2229 void *all_buf;
2230
2231 WARN_ON(atomic_read(&event->mmap_count));
2232
2233 size = sizeof(struct perf_mmap_data);
2234 size += sizeof(void *);
2235
2236 data = kzalloc(size, GFP_KERNEL);
2237 if (!data)
2238 goto fail;
2239
2240 INIT_WORK(&data->work, perf_mmap_data_free_work);
2241
2242 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2243 if (!all_buf)
2244 goto fail_all_buf;
2245
2246 data->user_page = all_buf;
2247 data->data_pages[0] = all_buf + PAGE_SIZE;
2248 data->data_order = ilog2(nr_pages);
2249 data->nr_pages = 1;
2250
2251 return data;
2252
2253fail_all_buf:
2254 kfree(data);
2255
2256fail:
2257 return NULL;
2258}
2259
2260#endif
2261
2262static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2263{
2264 struct perf_event *event = vma->vm_file->private_data;
2265 struct perf_mmap_data *data;
2266 int ret = VM_FAULT_SIGBUS;
2267
2268 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2269 if (vmf->pgoff == 0)
2270 ret = 0;
2271 return ret;
2272 }
2273
2274 rcu_read_lock();
2275 data = rcu_dereference(event->data);
2276 if (!data)
2277 goto unlock;
2278
2279 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2280 goto unlock;
2281
2282 vmf->page = perf_mmap_to_page(data, vmf->pgoff);
2283 if (!vmf->page)
2284 goto unlock;
2285
2286 get_page(vmf->page);
2287 vmf->page->mapping = vma->vm_file->f_mapping;
2288 vmf->page->index = vmf->pgoff;
2289
2290 ret = 0;
2291unlock:
2292 rcu_read_unlock();
2293
2294 return ret;
2295}
2296
2297static void
2298perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
2299{
2300 long max_size = perf_data_size(data);
2301
2302 atomic_set(&data->lock, -1);
2303
2304 if (event->attr.watermark) {
2305 data->watermark = min_t(long, max_size,
2306 event->attr.wakeup_watermark);
2307 }
2308
2309 if (!data->watermark)
2310 data->watermark = max_t(long, PAGE_SIZE, max_size / 2);
2311
2312
2313 rcu_assign_pointer(event->data, data);
2314}
2315
2316static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
2317{
2318 struct perf_mmap_data *data;
2319
2320 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2321 perf_mmap_data_free(data);
2221 kfree(data); 2322 kfree(data);
2222} 2323}
2223 2324
2224static void perf_mmap_data_free(struct perf_event *event) 2325static void perf_mmap_data_release(struct perf_event *event)
2225{ 2326{
2226 struct perf_mmap_data *data = event->data; 2327 struct perf_mmap_data *data = event->data;
2227 2328
2228 WARN_ON(atomic_read(&event->mmap_count)); 2329 WARN_ON(atomic_read(&event->mmap_count));
2229 2330
2230 rcu_assign_pointer(event->data, NULL); 2331 rcu_assign_pointer(event->data, NULL);
2231 call_rcu(&data->rcu_head, __perf_mmap_data_free); 2332 call_rcu(&data->rcu_head, perf_mmap_data_free_rcu);
2232} 2333}
2233 2334
2234static void perf_mmap_open(struct vm_area_struct *vma) 2335static void perf_mmap_open(struct vm_area_struct *vma)
@@ -2244,11 +2345,12 @@ static void perf_mmap_close(struct vm_area_struct *vma)
2244 2345
2245 WARN_ON_ONCE(event->ctx->parent_ctx); 2346 WARN_ON_ONCE(event->ctx->parent_ctx);
2246 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { 2347 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2348 unsigned long size = perf_data_size(event->data);
2247 struct user_struct *user = current_user(); 2349 struct user_struct *user = current_user();
2248 2350
2249 atomic_long_sub(event->data->nr_pages + 1, &user->locked_vm); 2351 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
2250 vma->vm_mm->locked_vm -= event->data->nr_locked; 2352 vma->vm_mm->locked_vm -= event->data->nr_locked;
2251 perf_mmap_data_free(event); 2353 perf_mmap_data_release(event);
2252 mutex_unlock(&event->mmap_mutex); 2354 mutex_unlock(&event->mmap_mutex);
2253 } 2355 }
2254} 2356}
@@ -2266,6 +2368,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2266 unsigned long user_locked, user_lock_limit; 2368 unsigned long user_locked, user_lock_limit;
2267 struct user_struct *user = current_user(); 2369 struct user_struct *user = current_user();
2268 unsigned long locked, lock_limit; 2370 unsigned long locked, lock_limit;
2371 struct perf_mmap_data *data;
2269 unsigned long vma_size; 2372 unsigned long vma_size;
2270 unsigned long nr_pages; 2373 unsigned long nr_pages;
2271 long user_extra, extra; 2374 long user_extra, extra;
@@ -2328,10 +2431,15 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2328 } 2431 }
2329 2432
2330 WARN_ON(event->data); 2433 WARN_ON(event->data);
2331 ret = perf_mmap_data_alloc(event, nr_pages); 2434
2332 if (ret) 2435 data = perf_mmap_data_alloc(event, nr_pages);
2436 ret = -ENOMEM;
2437 if (!data)
2333 goto unlock; 2438 goto unlock;
2334 2439
2440 ret = 0;
2441 perf_mmap_data_init(event, data);
2442
2335 atomic_set(&event->mmap_count, 1); 2443 atomic_set(&event->mmap_count, 1);
2336 atomic_long_add(user_extra, &user->locked_vm); 2444 atomic_long_add(user_extra, &user->locked_vm);
2337 vma->vm_mm->locked_vm += extra; 2445 vma->vm_mm->locked_vm += extra;
@@ -2519,7 +2627,7 @@ static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
2519 if (!data->writable) 2627 if (!data->writable)
2520 return true; 2628 return true;
2521 2629
2522 mask = (data->nr_pages << PAGE_SHIFT) - 1; 2630 mask = perf_data_size(data) - 1;
2523 2631
2524 offset = (offset - tail) & mask; 2632 offset = (offset - tail) & mask;
2525 head = (head - tail) & mask; 2633 head = (head - tail) & mask;
@@ -2624,7 +2732,7 @@ void perf_output_copy(struct perf_output_handle *handle,
2624 const void *buf, unsigned int len) 2732 const void *buf, unsigned int len)
2625{ 2733{
2626 unsigned int pages_mask; 2734 unsigned int pages_mask;
2627 unsigned int offset; 2735 unsigned long offset;
2628 unsigned int size; 2736 unsigned int size;
2629 void **pages; 2737 void **pages;
2630 2738
@@ -2633,12 +2741,14 @@ void perf_output_copy(struct perf_output_handle *handle,
2633 pages = handle->data->data_pages; 2741 pages = handle->data->data_pages;
2634 2742
2635 do { 2743 do {
2636 unsigned int page_offset; 2744 unsigned long page_offset;
2745 unsigned long page_size;
2637 int nr; 2746 int nr;
2638 2747
2639 nr = (offset >> PAGE_SHIFT) & pages_mask; 2748 nr = (offset >> PAGE_SHIFT) & pages_mask;
2640 page_offset = offset & (PAGE_SIZE - 1); 2749 page_size = 1UL << (handle->data->data_order + PAGE_SHIFT);
2641 size = min_t(unsigned int, PAGE_SIZE - page_offset, len); 2750 page_offset = offset & (page_size - 1);
2751 size = min_t(unsigned int, page_size - page_offset, len);
2642 2752
2643 memcpy(pages[nr] + page_offset, buf, size); 2753 memcpy(pages[nr] + page_offset, buf, size);
2644 2754
@@ -4781,9 +4891,7 @@ int perf_event_init_task(struct task_struct *child)
4781 * We dont have to disable NMIs - we are only looking at 4891 * We dont have to disable NMIs - we are only looking at
4782 * the list, not manipulating it: 4892 * the list, not manipulating it:
4783 */ 4893 */
4784 list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) { 4894 list_for_each_entry(event, &parent_ctx->group_list, group_entry) {
4785 if (event != event->group_leader)
4786 continue;
4787 4895
4788 if (!event->attr.inherit) { 4896 if (!event->attr.inherit) {
4789 inherited_all = 0; 4897 inherited_all = 0;
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index c89f5e9fd173..179e6ad80dc0 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -93,7 +93,7 @@ static int rcudata_open(struct inode *inode, struct file *file)
93 return single_open(file, show_rcudata, NULL); 93 return single_open(file, show_rcudata, NULL);
94} 94}
95 95
96static struct file_operations rcudata_fops = { 96static const struct file_operations rcudata_fops = {
97 .owner = THIS_MODULE, 97 .owner = THIS_MODULE,
98 .open = rcudata_open, 98 .open = rcudata_open,
99 .read = seq_read, 99 .read = seq_read,
@@ -145,7 +145,7 @@ static int rcudata_csv_open(struct inode *inode, struct file *file)
145 return single_open(file, show_rcudata_csv, NULL); 145 return single_open(file, show_rcudata_csv, NULL);
146} 146}
147 147
148static struct file_operations rcudata_csv_fops = { 148static const struct file_operations rcudata_csv_fops = {
149 .owner = THIS_MODULE, 149 .owner = THIS_MODULE,
150 .open = rcudata_csv_open, 150 .open = rcudata_csv_open,
151 .read = seq_read, 151 .read = seq_read,
@@ -196,7 +196,7 @@ static int rcuhier_open(struct inode *inode, struct file *file)
196 return single_open(file, show_rcuhier, NULL); 196 return single_open(file, show_rcuhier, NULL);
197} 197}
198 198
199static struct file_operations rcuhier_fops = { 199static const struct file_operations rcuhier_fops = {
200 .owner = THIS_MODULE, 200 .owner = THIS_MODULE,
201 .open = rcuhier_open, 201 .open = rcuhier_open,
202 .read = seq_read, 202 .read = seq_read,
@@ -222,7 +222,7 @@ static int rcugp_open(struct inode *inode, struct file *file)
222 return single_open(file, show_rcugp, NULL); 222 return single_open(file, show_rcugp, NULL);
223} 223}
224 224
225static struct file_operations rcugp_fops = { 225static const struct file_operations rcugp_fops = {
226 .owner = THIS_MODULE, 226 .owner = THIS_MODULE,
227 .open = rcugp_open, 227 .open = rcugp_open,
228 .read = seq_read, 228 .read = seq_read,
@@ -276,7 +276,7 @@ static int rcu_pending_open(struct inode *inode, struct file *file)
276 return single_open(file, show_rcu_pending, NULL); 276 return single_open(file, show_rcu_pending, NULL);
277} 277}
278 278
279static struct file_operations rcu_pending_fops = { 279static const struct file_operations rcu_pending_fops = {
280 .owner = THIS_MODULE, 280 .owner = THIS_MODULE,
281 .open = rcu_pending_open, 281 .open = rcu_pending_open,
282 .read = seq_read, 282 .read = seq_read,
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index 88faec23e833..bcdabf37c40b 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -37,27 +37,17 @@ int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
37} 37}
38 38
39int res_counter_charge(struct res_counter *counter, unsigned long val, 39int res_counter_charge(struct res_counter *counter, unsigned long val,
40 struct res_counter **limit_fail_at, 40 struct res_counter **limit_fail_at)
41 struct res_counter **soft_limit_fail_at)
42{ 41{
43 int ret; 42 int ret;
44 unsigned long flags; 43 unsigned long flags;
45 struct res_counter *c, *u; 44 struct res_counter *c, *u;
46 45
47 *limit_fail_at = NULL; 46 *limit_fail_at = NULL;
48 if (soft_limit_fail_at)
49 *soft_limit_fail_at = NULL;
50 local_irq_save(flags); 47 local_irq_save(flags);
51 for (c = counter; c != NULL; c = c->parent) { 48 for (c = counter; c != NULL; c = c->parent) {
52 spin_lock(&c->lock); 49 spin_lock(&c->lock);
53 ret = res_counter_charge_locked(c, val); 50 ret = res_counter_charge_locked(c, val);
54 /*
55 * With soft limits, we return the highest ancestor
56 * that exceeds its soft limit
57 */
58 if (soft_limit_fail_at &&
59 !res_counter_soft_limit_check_locked(c))
60 *soft_limit_fail_at = c;
61 spin_unlock(&c->lock); 51 spin_unlock(&c->lock);
62 if (ret < 0) { 52 if (ret < 0) {
63 *limit_fail_at = c; 53 *limit_fail_at = c;
@@ -85,8 +75,7 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
85 counter->usage -= val; 75 counter->usage -= val;
86} 76}
87 77
88void res_counter_uncharge(struct res_counter *counter, unsigned long val, 78void res_counter_uncharge(struct res_counter *counter, unsigned long val)
89 bool *was_soft_limit_excess)
90{ 79{
91 unsigned long flags; 80 unsigned long flags;
92 struct res_counter *c; 81 struct res_counter *c;
@@ -94,9 +83,6 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val,
94 local_irq_save(flags); 83 local_irq_save(flags);
95 for (c = counter; c != NULL; c = c->parent) { 84 for (c = counter; c != NULL; c = c->parent) {
96 spin_lock(&c->lock); 85 spin_lock(&c->lock);
97 if (was_soft_limit_excess)
98 *was_soft_limit_excess =
99 !res_counter_soft_limit_check_locked(c);
100 res_counter_uncharge_locked(c, val); 86 res_counter_uncharge_locked(c, val);
101 spin_unlock(&c->lock); 87 spin_unlock(&c->lock);
102 } 88 }
diff --git a/kernel/sched.c b/kernel/sched.c
index ee61f454a98b..1535f3884b88 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -780,7 +780,7 @@ static int sched_feat_open(struct inode *inode, struct file *filp)
780 return single_open(filp, sched_feat_show, NULL); 780 return single_open(filp, sched_feat_show, NULL);
781} 781}
782 782
783static struct file_operations sched_feat_fops = { 783static const struct file_operations sched_feat_fops = {
784 .open = sched_feat_open, 784 .open = sched_feat_open,
785 .write = sched_feat_write, 785 .write = sched_feat_write,
786 .read = seq_read, 786 .read = seq_read,
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e0f59a21c061..89aed5933ed4 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -231,6 +231,13 @@ void tick_nohz_stop_sched_tick(int inidle)
231 if (!inidle && !ts->inidle) 231 if (!inidle && !ts->inidle)
232 goto end; 232 goto end;
233 233
234 /*
235 * Set ts->inidle unconditionally. Even if the system did not
236 * switch to NOHZ mode the cpu frequency governers rely on the
237 * update of the idle time accounting in tick_nohz_start_idle().
238 */
239 ts->inidle = 1;
240
234 now = tick_nohz_start_idle(ts); 241 now = tick_nohz_start_idle(ts);
235 242
236 /* 243 /*
@@ -248,8 +255,6 @@ void tick_nohz_stop_sched_tick(int inidle)
248 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 255 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
249 goto end; 256 goto end;
250 257
251 ts->inidle = 1;
252
253 if (need_resched()) 258 if (need_resched())
254 goto end; 259 goto end;
255 260
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index fddd69d16e03..1b5b7aa2fdfd 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -275,7 +275,7 @@ static int timer_list_open(struct inode *inode, struct file *filp)
275 return single_open(filp, timer_list_show, NULL); 275 return single_open(filp, timer_list_show, NULL);
276} 276}
277 277
278static struct file_operations timer_list_fops = { 278static const struct file_operations timer_list_fops = {
279 .open = timer_list_open, 279 .open = timer_list_open,
280 .read = seq_read, 280 .read = seq_read,
281 .llseek = seq_lseek, 281 .llseek = seq_lseek,
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 4cde8b9c716f..ee5681f8d7ec 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -395,7 +395,7 @@ static int tstats_open(struct inode *inode, struct file *filp)
395 return single_open(filp, tstats_show, NULL); 395 return single_open(filp, tstats_show, NULL);
396} 396}
397 397
398static struct file_operations tstats_fops = { 398static const struct file_operations tstats_fops = {
399 .open = tstats_open, 399 .open = tstats_open,
400 .read = seq_read, 400 .read = seq_read,
401 .write = tstats_write, 401 .write = tstats_write,
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 3eb159c277c8..d9d6206e0b14 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -856,6 +856,37 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
856} 856}
857 857
858/** 858/**
859 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
860 * @q: queue the io is for
861 * @rq: the source request
862 * @dev: target device
863 * @from: source sector
864 *
865 * Description:
866 * Device mapper remaps request to other devices.
867 * Add a trace for that action.
868 *
869 **/
870static void blk_add_trace_rq_remap(struct request_queue *q,
871 struct request *rq, dev_t dev,
872 sector_t from)
873{
874 struct blk_trace *bt = q->blk_trace;
875 struct blk_io_trace_remap r;
876
877 if (likely(!bt))
878 return;
879
880 r.device_from = cpu_to_be32(dev);
881 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
882 r.sector_from = cpu_to_be64(from);
883
884 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
885 rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
886 sizeof(r), &r);
887}
888
889/**
859 * blk_add_driver_data - Add binary message with driver-specific data 890 * blk_add_driver_data - Add binary message with driver-specific data
860 * @q: queue the io is for 891 * @q: queue the io is for
861 * @rq: io request 892 * @rq: io request
@@ -922,10 +953,13 @@ static void blk_register_tracepoints(void)
922 WARN_ON(ret); 953 WARN_ON(ret);
923 ret = register_trace_block_remap(blk_add_trace_remap); 954 ret = register_trace_block_remap(blk_add_trace_remap);
924 WARN_ON(ret); 955 WARN_ON(ret);
956 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap);
957 WARN_ON(ret);
925} 958}
926 959
927static void blk_unregister_tracepoints(void) 960static void blk_unregister_tracepoints(void)
928{ 961{
962 unregister_trace_block_rq_remap(blk_add_trace_rq_remap);
929 unregister_trace_block_remap(blk_add_trace_remap); 963 unregister_trace_block_remap(blk_add_trace_remap);
930 unregister_trace_block_split(blk_add_trace_split); 964 unregister_trace_block_split(blk_add_trace_split);
931 unregister_trace_block_unplug_io(blk_add_trace_unplug_io); 965 unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
@@ -1657,6 +1691,11 @@ int blk_trace_init_sysfs(struct device *dev)
1657 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); 1691 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1658} 1692}
1659 1693
1694void blk_trace_remove_sysfs(struct device *dev)
1695{
1696 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1697}
1698
1660#endif /* CONFIG_BLK_DEV_IO_TRACE */ 1699#endif /* CONFIG_BLK_DEV_IO_TRACE */
1661 1700
1662#ifdef CONFIG_EVENT_TRACING 1701#ifdef CONFIG_EVENT_TRACING
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 46592feab5a6..37ba67e33265 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -225,7 +225,11 @@ static void ftrace_update_pid_func(void)
225 if (ftrace_trace_function == ftrace_stub) 225 if (ftrace_trace_function == ftrace_stub)
226 return; 226 return;
227 227
228#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
228 func = ftrace_trace_function; 229 func = ftrace_trace_function;
230#else
231 func = __ftrace_trace_function;
232#endif
229 233
230 if (ftrace_pid_trace) { 234 if (ftrace_pid_trace) {
231 set_ftrace_pid_function(func); 235 set_ftrace_pid_function(func);
@@ -1074,14 +1078,9 @@ static void ftrace_replace_code(int enable)
1074 failed = __ftrace_replace_code(rec, enable); 1078 failed = __ftrace_replace_code(rec, enable);
1075 if (failed) { 1079 if (failed) {
1076 rec->flags |= FTRACE_FL_FAILED; 1080 rec->flags |= FTRACE_FL_FAILED;
1077 if ((system_state == SYSTEM_BOOTING) || 1081 ftrace_bug(failed, rec->ip);
1078 !core_kernel_text(rec->ip)) { 1082 /* Stop processing */
1079 ftrace_free_rec(rec); 1083 return;
1080 } else {
1081 ftrace_bug(failed, rec->ip);
1082 /* Stop processing */
1083 return;
1084 }
1085 } 1084 }
1086 } while_for_each_ftrace_rec(); 1085 } while_for_each_ftrace_rec();
1087} 1086}
@@ -2658,19 +2657,17 @@ static int ftrace_convert_nops(struct module *mod,
2658} 2657}
2659 2658
2660#ifdef CONFIG_MODULES 2659#ifdef CONFIG_MODULES
2661void ftrace_release(void *start, void *end) 2660void ftrace_release_mod(struct module *mod)
2662{ 2661{
2663 struct dyn_ftrace *rec; 2662 struct dyn_ftrace *rec;
2664 struct ftrace_page *pg; 2663 struct ftrace_page *pg;
2665 unsigned long s = (unsigned long)start;
2666 unsigned long e = (unsigned long)end;
2667 2664
2668 if (ftrace_disabled || !start || start == end) 2665 if (ftrace_disabled)
2669 return; 2666 return;
2670 2667
2671 mutex_lock(&ftrace_lock); 2668 mutex_lock(&ftrace_lock);
2672 do_for_each_ftrace_rec(pg, rec) { 2669 do_for_each_ftrace_rec(pg, rec) {
2673 if ((rec->ip >= s) && (rec->ip < e)) { 2670 if (within_module_core(rec->ip, mod)) {
2674 /* 2671 /*
2675 * rec->ip is changed in ftrace_free_rec() 2672 * rec->ip is changed in ftrace_free_rec()
2676 * It should not between s and e if record was freed. 2673 * It should not between s and e if record was freed.
@@ -2702,9 +2699,7 @@ static int ftrace_module_notify(struct notifier_block *self,
2702 mod->num_ftrace_callsites); 2699 mod->num_ftrace_callsites);
2703 break; 2700 break;
2704 case MODULE_STATE_GOING: 2701 case MODULE_STATE_GOING:
2705 ftrace_release(mod->ftrace_callsites, 2702 ftrace_release_mod(mod);
2706 mod->ftrace_callsites +
2707 mod->num_ftrace_callsites);
2708 break; 2703 break;
2709 } 2704 }
2710 2705
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index 81b1645c8549..a91da69f153a 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -501,7 +501,7 @@ static int __init init_kmem_tracer(void)
501 return 1; 501 return 1;
502 } 502 }
503 503
504 if (!register_tracer(&kmem_tracer)) { 504 if (register_tracer(&kmem_tracer) != 0) {
505 pr_warning("Warning: could not register the kmem tracer\n"); 505 pr_warning("Warning: could not register the kmem tracer\n");
506 return 1; 506 return 1;
507 } 507 }
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 7a7a9fd249a9..4a194f08f88c 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -34,6 +34,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
34 struct trace_array *tr = branch_tracer; 34 struct trace_array *tr = branch_tracer;
35 struct ring_buffer_event *event; 35 struct ring_buffer_event *event;
36 struct trace_branch *entry; 36 struct trace_branch *entry;
37 struct ring_buffer *buffer;
37 unsigned long flags; 38 unsigned long flags;
38 int cpu, pc; 39 int cpu, pc;
39 const char *p; 40 const char *p;
@@ -54,7 +55,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
54 goto out; 55 goto out;
55 56
56 pc = preempt_count(); 57 pc = preempt_count();
57 event = trace_buffer_lock_reserve(tr, TRACE_BRANCH, 58 buffer = tr->buffer;
59 event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
58 sizeof(*entry), flags, pc); 60 sizeof(*entry), flags, pc);
59 if (!event) 61 if (!event)
60 goto out; 62 goto out;
@@ -74,8 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
74 entry->line = f->line; 76 entry->line = f->line;
75 entry->correct = val == expect; 77 entry->correct = val == expect;
76 78
77 if (!filter_check_discard(call, entry, tr->buffer, event)) 79 if (!filter_check_discard(call, entry, buffer, event))
78 ring_buffer_unlock_commit(tr->buffer, event); 80 ring_buffer_unlock_commit(buffer, event);
79 81
80 out: 82 out:
81 atomic_dec(&tr->data[cpu]->disabled); 83 atomic_dec(&tr->data[cpu]->disabled);
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index dd44b8768867..8d5c171cc998 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -31,7 +31,7 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
31 if (atomic_inc_return(&event->profile_count)) 31 if (atomic_inc_return(&event->profile_count))
32 return 0; 32 return 0;
33 33
34 if (!total_profile_count++) { 34 if (!total_profile_count) {
35 buf = (char *)alloc_percpu(profile_buf_t); 35 buf = (char *)alloc_percpu(profile_buf_t);
36 if (!buf) 36 if (!buf)
37 goto fail_buf; 37 goto fail_buf;
@@ -46,14 +46,19 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
46 } 46 }
47 47
48 ret = event->profile_enable(); 48 ret = event->profile_enable();
49 if (!ret) 49 if (!ret) {
50 total_profile_count++;
50 return 0; 51 return 0;
52 }
51 53
52 kfree(trace_profile_buf_nmi);
53fail_buf_nmi: 54fail_buf_nmi:
54 kfree(trace_profile_buf); 55 if (!total_profile_count) {
56 free_percpu(trace_profile_buf_nmi);
57 free_percpu(trace_profile_buf);
58 trace_profile_buf_nmi = NULL;
59 trace_profile_buf = NULL;
60 }
55fail_buf: 61fail_buf:
56 total_profile_count--;
57 atomic_dec(&event->profile_count); 62 atomic_dec(&event->profile_count);
58 63
59 return ret; 64 return ret;
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index 23b63859130e..69543a905cd5 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -165,6 +165,7 @@ void trace_hw_branch(u64 from, u64 to)
165 struct ftrace_event_call *call = &event_hw_branch; 165 struct ftrace_event_call *call = &event_hw_branch;
166 struct trace_array *tr = hw_branch_trace; 166 struct trace_array *tr = hw_branch_trace;
167 struct ring_buffer_event *event; 167 struct ring_buffer_event *event;
168 struct ring_buffer *buf;
168 struct hw_branch_entry *entry; 169 struct hw_branch_entry *entry;
169 unsigned long irq1; 170 unsigned long irq1;
170 int cpu; 171 int cpu;
@@ -180,7 +181,8 @@ void trace_hw_branch(u64 from, u64 to)
180 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) 181 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
181 goto out; 182 goto out;
182 183
183 event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES, 184 buf = tr->buffer;
185 event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES,
184 sizeof(*entry), 0, 0); 186 sizeof(*entry), 0, 0);
185 if (!event) 187 if (!event)
186 goto out; 188 goto out;
@@ -189,8 +191,8 @@ void trace_hw_branch(u64 from, u64 to)
189 entry->ent.type = TRACE_HW_BRANCHES; 191 entry->ent.type = TRACE_HW_BRANCHES;
190 entry->from = from; 192 entry->from = from;
191 entry->to = to; 193 entry->to = to;
192 if (!filter_check_discard(call, entry, tr->buffer, event)) 194 if (!filter_check_discard(call, entry, buf, event))
193 trace_buffer_unlock_commit(tr, event, 0, 0); 195 trace_buffer_unlock_commit(buf, event, 0, 0);
194 196
195 out: 197 out:
196 atomic_dec(&tr->data[cpu]->disabled); 198 atomic_dec(&tr->data[cpu]->disabled);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index f572f44c6e1e..ed17565826b0 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -486,16 +486,18 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
486 hardirq ? 'h' : softirq ? 's' : '.')) 486 hardirq ? 'h' : softirq ? 's' : '.'))
487 return 0; 487 return 0;
488 488
489 if (entry->lock_depth < 0) 489 if (entry->preempt_count)
490 ret = trace_seq_putc(s, '.'); 490 ret = trace_seq_printf(s, "%x", entry->preempt_count);
491 else 491 else
492 ret = trace_seq_printf(s, "%d", entry->lock_depth); 492 ret = trace_seq_putc(s, '.');
493
493 if (!ret) 494 if (!ret)
494 return 0; 495 return 0;
495 496
496 if (entry->preempt_count) 497 if (entry->lock_depth < 0)
497 return trace_seq_printf(s, "%x", entry->preempt_count); 498 return trace_seq_putc(s, '.');
498 return trace_seq_putc(s, '.'); 499
500 return trace_seq_printf(s, "%d", entry->lock_depth);
499} 501}
500 502
501static int 503static int
@@ -883,7 +885,7 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
883 trace_assign_type(field, iter->ent); 885 trace_assign_type(field, iter->ent);
884 886
885 if (!S) 887 if (!S)
886 task_state_char(field->prev_state); 888 S = task_state_char(field->prev_state);
887 T = task_state_char(field->next_state); 889 T = task_state_char(field->next_state);
888 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", 890 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
889 field->prev_pid, 891 field->prev_pid,
@@ -918,7 +920,7 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
918 trace_assign_type(field, iter->ent); 920 trace_assign_type(field, iter->ent);
919 921
920 if (!S) 922 if (!S)
921 task_state_char(field->prev_state); 923 S = task_state_char(field->prev_state);
922 T = task_state_char(field->next_state); 924 T = task_state_char(field->next_state);
923 925
924 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); 926 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 9fbce6c9d2e1..527e17eae575 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -166,7 +166,7 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
166 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" 166 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
167 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", 167 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
168 SYSCALL_FIELD(int, nr), 168 SYSCALL_FIELD(int, nr),
169 SYSCALL_FIELD(unsigned long, ret)); 169 SYSCALL_FIELD(long, ret));
170 if (!ret) 170 if (!ret)
171 return 0; 171 return 0;
172 172
@@ -212,7 +212,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call)
212 if (ret) 212 if (ret)
213 return ret; 213 return ret;
214 214
215 ret = trace_define_field(call, SYSCALL_FIELD(unsigned long, ret), 0, 215 ret = trace_define_field(call, SYSCALL_FIELD(long, ret), 0,
216 FILTER_OTHER); 216 FILTER_OTHER);
217 217
218 return ret; 218 return ret;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index b91839e9e892..33bed5e67a21 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1771,7 +1771,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1771 * advance both strings to next white space 1771 * advance both strings to next white space
1772 */ 1772 */
1773 if (*fmt == '*') { 1773 if (*fmt == '*') {
1774 while (!isspace(*fmt) && *fmt) 1774 while (!isspace(*fmt) && *fmt != '%' && *fmt)
1775 fmt++; 1775 fmt++;
1776 while (!isspace(*str) && *str) 1776 while (!isspace(*str) && *str)
1777 str++; 1777 str++;
diff --git a/mm/Kconfig b/mm/Kconfig
index edd300aca173..57963c6063d1 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -224,7 +224,9 @@ config KSM
224 the many instances by a single resident page with that content, so 224 the many instances by a single resident page with that content, so
225 saving memory until one or another app needs to modify the content. 225 saving memory until one or another app needs to modify the content.
226 Recommended for use with KVM, or with other duplicative applications. 226 Recommended for use with KVM, or with other duplicative applications.
227 See Documentation/vm/ksm.txt for more information. 227 See Documentation/vm/ksm.txt for more information: KSM is inactive
228 until a program has madvised that an area is MADV_MERGEABLE, and
229 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
228 230
229config DEFAULT_MMAP_MIN_ADDR 231config DEFAULT_MMAP_MIN_ADDR
230 int "Low address space to protect from user allocation" 232 int "Low address space to protect from user allocation"
diff --git a/mm/ksm.c b/mm/ksm.c
index f7edac356f46..bef1af4f77e3 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -184,11 +184,6 @@ static DEFINE_SPINLOCK(ksm_mmlist_lock);
184 sizeof(struct __struct), __alignof__(struct __struct),\ 184 sizeof(struct __struct), __alignof__(struct __struct),\
185 (__flags), NULL) 185 (__flags), NULL)
186 186
187static void __init ksm_init_max_kernel_pages(void)
188{
189 ksm_max_kernel_pages = nr_free_buffer_pages() / 4;
190}
191
192static int __init ksm_slab_init(void) 187static int __init ksm_slab_init(void)
193{ 188{
194 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); 189 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
@@ -1673,7 +1668,7 @@ static int __init ksm_init(void)
1673 struct task_struct *ksm_thread; 1668 struct task_struct *ksm_thread;
1674 int err; 1669 int err;
1675 1670
1676 ksm_init_max_kernel_pages(); 1671 ksm_max_kernel_pages = totalram_pages / 4;
1677 1672
1678 err = ksm_slab_init(); 1673 err = ksm_slab_init();
1679 if (err) 1674 if (err)
@@ -1697,6 +1692,9 @@ static int __init ksm_init(void)
1697 kthread_stop(ksm_thread); 1692 kthread_stop(ksm_thread);
1698 goto out_free2; 1693 goto out_free2;
1699 } 1694 }
1695#else
1696 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
1697
1700#endif /* CONFIG_SYSFS */ 1698#endif /* CONFIG_SYSFS */
1701 1699
1702 return 0; 1700 return 0;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e2b98a6875c0..f99f5991d6bb 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -313,7 +313,8 @@ soft_limit_tree_from_page(struct page *page)
313static void 313static void
314__mem_cgroup_insert_exceeded(struct mem_cgroup *mem, 314__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
315 struct mem_cgroup_per_zone *mz, 315 struct mem_cgroup_per_zone *mz,
316 struct mem_cgroup_tree_per_zone *mctz) 316 struct mem_cgroup_tree_per_zone *mctz,
317 unsigned long long new_usage_in_excess)
317{ 318{
318 struct rb_node **p = &mctz->rb_root.rb_node; 319 struct rb_node **p = &mctz->rb_root.rb_node;
319 struct rb_node *parent = NULL; 320 struct rb_node *parent = NULL;
@@ -322,7 +323,9 @@ __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
322 if (mz->on_tree) 323 if (mz->on_tree)
323 return; 324 return;
324 325
325 mz->usage_in_excess = res_counter_soft_limit_excess(&mem->res); 326 mz->usage_in_excess = new_usage_in_excess;
327 if (!mz->usage_in_excess)
328 return;
326 while (*p) { 329 while (*p) {
327 parent = *p; 330 parent = *p;
328 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, 331 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
@@ -353,16 +356,6 @@ __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
353} 356}
354 357
355static void 358static void
356mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
357 struct mem_cgroup_per_zone *mz,
358 struct mem_cgroup_tree_per_zone *mctz)
359{
360 spin_lock(&mctz->lock);
361 __mem_cgroup_insert_exceeded(mem, mz, mctz);
362 spin_unlock(&mctz->lock);
363}
364
365static void
366mem_cgroup_remove_exceeded(struct mem_cgroup *mem, 359mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
367 struct mem_cgroup_per_zone *mz, 360 struct mem_cgroup_per_zone *mz,
368 struct mem_cgroup_tree_per_zone *mctz) 361 struct mem_cgroup_tree_per_zone *mctz)
@@ -392,34 +385,36 @@ static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
392 385
393static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) 386static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
394{ 387{
395 unsigned long long prev_usage_in_excess, new_usage_in_excess; 388 unsigned long long excess;
396 bool updated_tree = false;
397 struct mem_cgroup_per_zone *mz; 389 struct mem_cgroup_per_zone *mz;
398 struct mem_cgroup_tree_per_zone *mctz; 390 struct mem_cgroup_tree_per_zone *mctz;
399 391 int nid = page_to_nid(page);
400 mz = mem_cgroup_zoneinfo(mem, page_to_nid(page), page_zonenum(page)); 392 int zid = page_zonenum(page);
401 mctz = soft_limit_tree_from_page(page); 393 mctz = soft_limit_tree_from_page(page);
402 394
403 /* 395 /*
404 * We do updates in lazy mode, mem's are removed 396 * Necessary to update all ancestors when hierarchy is used.
405 * lazily from the per-zone, per-node rb tree 397 * because their event counter is not touched.
406 */ 398 */
407 prev_usage_in_excess = mz->usage_in_excess; 399 for (; mem; mem = parent_mem_cgroup(mem)) {
408 400 mz = mem_cgroup_zoneinfo(mem, nid, zid);
409 new_usage_in_excess = res_counter_soft_limit_excess(&mem->res); 401 excess = res_counter_soft_limit_excess(&mem->res);
410 if (prev_usage_in_excess) { 402 /*
411 mem_cgroup_remove_exceeded(mem, mz, mctz); 403 * We have to update the tree if mz is on RB-tree or
412 updated_tree = true; 404 * mem is over its softlimit.
413 } 405 */
414 if (!new_usage_in_excess) 406 if (excess || mz->on_tree) {
415 goto done; 407 spin_lock(&mctz->lock);
416 mem_cgroup_insert_exceeded(mem, mz, mctz); 408 /* if on-tree, remove it */
417 409 if (mz->on_tree)
418done: 410 __mem_cgroup_remove_exceeded(mem, mz, mctz);
419 if (updated_tree) { 411 /*
420 spin_lock(&mctz->lock); 412 * Insert again. mz->usage_in_excess will be updated.
421 mz->usage_in_excess = new_usage_in_excess; 413 * If excess is 0, no tree ops.
422 spin_unlock(&mctz->lock); 414 */
415 __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
416 spin_unlock(&mctz->lock);
417 }
423 } 418 }
424} 419}
425 420
@@ -447,9 +442,10 @@ static struct mem_cgroup_per_zone *
447__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 442__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
448{ 443{
449 struct rb_node *rightmost = NULL; 444 struct rb_node *rightmost = NULL;
450 struct mem_cgroup_per_zone *mz = NULL; 445 struct mem_cgroup_per_zone *mz;
451 446
452retry: 447retry:
448 mz = NULL;
453 rightmost = rb_last(&mctz->rb_root); 449 rightmost = rb_last(&mctz->rb_root);
454 if (!rightmost) 450 if (!rightmost)
455 goto done; /* Nothing to reclaim from */ 451 goto done; /* Nothing to reclaim from */
@@ -1270,9 +1266,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1270 gfp_t gfp_mask, struct mem_cgroup **memcg, 1266 gfp_t gfp_mask, struct mem_cgroup **memcg,
1271 bool oom, struct page *page) 1267 bool oom, struct page *page)
1272{ 1268{
1273 struct mem_cgroup *mem, *mem_over_limit, *mem_over_soft_limit; 1269 struct mem_cgroup *mem, *mem_over_limit;
1274 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 1270 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1275 struct res_counter *fail_res, *soft_fail_res = NULL; 1271 struct res_counter *fail_res;
1276 1272
1277 if (unlikely(test_thread_flag(TIF_MEMDIE))) { 1273 if (unlikely(test_thread_flag(TIF_MEMDIE))) {
1278 /* Don't account this! */ 1274 /* Don't account this! */
@@ -1304,17 +1300,16 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1304 1300
1305 if (mem_cgroup_is_root(mem)) 1301 if (mem_cgroup_is_root(mem))
1306 goto done; 1302 goto done;
1307 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res, 1303 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
1308 &soft_fail_res);
1309 if (likely(!ret)) { 1304 if (likely(!ret)) {
1310 if (!do_swap_account) 1305 if (!do_swap_account)
1311 break; 1306 break;
1312 ret = res_counter_charge(&mem->memsw, PAGE_SIZE, 1307 ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
1313 &fail_res, NULL); 1308 &fail_res);
1314 if (likely(!ret)) 1309 if (likely(!ret))
1315 break; 1310 break;
1316 /* mem+swap counter fails */ 1311 /* mem+swap counter fails */
1317 res_counter_uncharge(&mem->res, PAGE_SIZE, NULL); 1312 res_counter_uncharge(&mem->res, PAGE_SIZE);
1318 flags |= MEM_CGROUP_RECLAIM_NOSWAP; 1313 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1319 mem_over_limit = mem_cgroup_from_res_counter(fail_res, 1314 mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1320 memsw); 1315 memsw);
@@ -1353,16 +1348,11 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
1353 } 1348 }
1354 } 1349 }
1355 /* 1350 /*
1356 * Insert just the ancestor, we should trickle down to the correct 1351 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1357 * cgroup for reclaim, since the other nodes will be below their 1352 * if they exceeds softlimit.
1358 * soft limit
1359 */ 1353 */
1360 if (soft_fail_res) { 1354 if (mem_cgroup_soft_limit_check(mem))
1361 mem_over_soft_limit = 1355 mem_cgroup_update_tree(mem, page);
1362 mem_cgroup_from_res_counter(soft_fail_res, res);
1363 if (mem_cgroup_soft_limit_check(mem_over_soft_limit))
1364 mem_cgroup_update_tree(mem_over_soft_limit, page);
1365 }
1366done: 1356done:
1367 return 0; 1357 return 0;
1368nomem: 1358nomem:
@@ -1437,10 +1427,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1437 if (unlikely(PageCgroupUsed(pc))) { 1427 if (unlikely(PageCgroupUsed(pc))) {
1438 unlock_page_cgroup(pc); 1428 unlock_page_cgroup(pc);
1439 if (!mem_cgroup_is_root(mem)) { 1429 if (!mem_cgroup_is_root(mem)) {
1440 res_counter_uncharge(&mem->res, PAGE_SIZE, NULL); 1430 res_counter_uncharge(&mem->res, PAGE_SIZE);
1441 if (do_swap_account) 1431 if (do_swap_account)
1442 res_counter_uncharge(&mem->memsw, PAGE_SIZE, 1432 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1443 NULL);
1444 } 1433 }
1445 css_put(&mem->css); 1434 css_put(&mem->css);
1446 return; 1435 return;
@@ -1519,7 +1508,7 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
1519 goto out; 1508 goto out;
1520 1509
1521 if (!mem_cgroup_is_root(from)) 1510 if (!mem_cgroup_is_root(from))
1522 res_counter_uncharge(&from->res, PAGE_SIZE, NULL); 1511 res_counter_uncharge(&from->res, PAGE_SIZE);
1523 mem_cgroup_charge_statistics(from, pc, false); 1512 mem_cgroup_charge_statistics(from, pc, false);
1524 1513
1525 page = pc->page; 1514 page = pc->page;
@@ -1539,7 +1528,7 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
1539 } 1528 }
1540 1529
1541 if (do_swap_account && !mem_cgroup_is_root(from)) 1530 if (do_swap_account && !mem_cgroup_is_root(from))
1542 res_counter_uncharge(&from->memsw, PAGE_SIZE, NULL); 1531 res_counter_uncharge(&from->memsw, PAGE_SIZE);
1543 css_put(&from->css); 1532 css_put(&from->css);
1544 1533
1545 css_get(&to->css); 1534 css_get(&to->css);
@@ -1610,9 +1599,9 @@ uncharge:
1610 css_put(&parent->css); 1599 css_put(&parent->css);
1611 /* uncharge if move fails */ 1600 /* uncharge if move fails */
1612 if (!mem_cgroup_is_root(parent)) { 1601 if (!mem_cgroup_is_root(parent)) {
1613 res_counter_uncharge(&parent->res, PAGE_SIZE, NULL); 1602 res_counter_uncharge(&parent->res, PAGE_SIZE);
1614 if (do_swap_account) 1603 if (do_swap_account)
1615 res_counter_uncharge(&parent->memsw, PAGE_SIZE, NULL); 1604 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
1616 } 1605 }
1617 return ret; 1606 return ret;
1618} 1607}
@@ -1803,8 +1792,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1803 * calling css_tryget 1792 * calling css_tryget
1804 */ 1793 */
1805 if (!mem_cgroup_is_root(memcg)) 1794 if (!mem_cgroup_is_root(memcg))
1806 res_counter_uncharge(&memcg->memsw, PAGE_SIZE, 1795 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1807 NULL);
1808 mem_cgroup_swap_statistics(memcg, false); 1796 mem_cgroup_swap_statistics(memcg, false);
1809 mem_cgroup_put(memcg); 1797 mem_cgroup_put(memcg);
1810 } 1798 }
@@ -1831,9 +1819,9 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1831 if (!mem) 1819 if (!mem)
1832 return; 1820 return;
1833 if (!mem_cgroup_is_root(mem)) { 1821 if (!mem_cgroup_is_root(mem)) {
1834 res_counter_uncharge(&mem->res, PAGE_SIZE, NULL); 1822 res_counter_uncharge(&mem->res, PAGE_SIZE);
1835 if (do_swap_account) 1823 if (do_swap_account)
1836 res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL); 1824 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1837 } 1825 }
1838 css_put(&mem->css); 1826 css_put(&mem->css);
1839} 1827}
@@ -1848,7 +1836,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1848 struct page_cgroup *pc; 1836 struct page_cgroup *pc;
1849 struct mem_cgroup *mem = NULL; 1837 struct mem_cgroup *mem = NULL;
1850 struct mem_cgroup_per_zone *mz; 1838 struct mem_cgroup_per_zone *mz;
1851 bool soft_limit_excess = false;
1852 1839
1853 if (mem_cgroup_disabled()) 1840 if (mem_cgroup_disabled())
1854 return NULL; 1841 return NULL;
@@ -1888,10 +1875,10 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1888 } 1875 }
1889 1876
1890 if (!mem_cgroup_is_root(mem)) { 1877 if (!mem_cgroup_is_root(mem)) {
1891 res_counter_uncharge(&mem->res, PAGE_SIZE, &soft_limit_excess); 1878 res_counter_uncharge(&mem->res, PAGE_SIZE);
1892 if (do_swap_account && 1879 if (do_swap_account &&
1893 (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)) 1880 (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1894 res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL); 1881 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1895 } 1882 }
1896 if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 1883 if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1897 mem_cgroup_swap_statistics(mem, true); 1884 mem_cgroup_swap_statistics(mem, true);
@@ -1908,7 +1895,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1908 mz = page_cgroup_zoneinfo(pc); 1895 mz = page_cgroup_zoneinfo(pc);
1909 unlock_page_cgroup(pc); 1896 unlock_page_cgroup(pc);
1910 1897
1911 if (soft_limit_excess && mem_cgroup_soft_limit_check(mem)) 1898 if (mem_cgroup_soft_limit_check(mem))
1912 mem_cgroup_update_tree(mem, page); 1899 mem_cgroup_update_tree(mem, page);
1913 /* at swapout, this memcg will be accessed to record to swap */ 1900 /* at swapout, this memcg will be accessed to record to swap */
1914 if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 1901 if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
@@ -1986,7 +1973,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
1986 * This memcg can be obsolete one. We avoid calling css_tryget 1973 * This memcg can be obsolete one. We avoid calling css_tryget
1987 */ 1974 */
1988 if (!mem_cgroup_is_root(memcg)) 1975 if (!mem_cgroup_is_root(memcg))
1989 res_counter_uncharge(&memcg->memsw, PAGE_SIZE, NULL); 1976 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1990 mem_cgroup_swap_statistics(memcg, false); 1977 mem_cgroup_swap_statistics(memcg, false);
1991 mem_cgroup_put(memcg); 1978 mem_cgroup_put(memcg);
1992 } 1979 }
@@ -2233,6 +2220,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2233 unsigned long reclaimed; 2220 unsigned long reclaimed;
2234 int loop = 0; 2221 int loop = 0;
2235 struct mem_cgroup_tree_per_zone *mctz; 2222 struct mem_cgroup_tree_per_zone *mctz;
2223 unsigned long long excess;
2236 2224
2237 if (order > 0) 2225 if (order > 0)
2238 return 0; 2226 return 0;
@@ -2284,9 +2272,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2284 break; 2272 break;
2285 } while (1); 2273 } while (1);
2286 } 2274 }
2287 mz->usage_in_excess =
2288 res_counter_soft_limit_excess(&mz->mem->res);
2289 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz); 2275 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
2276 excess = res_counter_soft_limit_excess(&mz->mem->res);
2290 /* 2277 /*
2291 * One school of thought says that we should not add 2278 * One school of thought says that we should not add
2292 * back the node to the tree if reclaim returns 0. 2279 * back the node to the tree if reclaim returns 0.
@@ -2295,8 +2282,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2295 * memory to reclaim from. Consider this as a longer 2282 * memory to reclaim from. Consider this as a longer
2296 * term TODO. 2283 * term TODO.
2297 */ 2284 */
2298 if (mz->usage_in_excess) 2285 /* If excess == 0, no tree ops */
2299 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz); 2286 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
2300 spin_unlock(&mctz->lock); 2287 spin_unlock(&mctz->lock);
2301 css_put(&mz->mem->css); 2288 css_put(&mz->mem->css);
2302 loop++; 2289 loop++;
diff --git a/mm/rmap.c b/mm/rmap.c
index 28aafe2b5306..dd43373a483f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -242,8 +242,8 @@ vma_address(struct page *page, struct vm_area_struct *vma)
242} 242}
243 243
244/* 244/*
245 * At what user virtual address is page expected in vma? checking that the 245 * At what user virtual address is page expected in vma?
246 * page matches the vma: currently only used on anon pages, by unuse_vma; 246 * checking that the page matches the vma.
247 */ 247 */
248unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 248unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
249{ 249{
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 4de7f02f820b..a1bc6b9af9a2 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1974,12 +1974,14 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1974 goto bad_swap; 1974 goto bad_swap;
1975 } 1975 }
1976 1976
1977 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { 1977 if (p->bdev) {
1978 p->flags |= SWP_SOLIDSTATE; 1978 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
1979 p->cluster_next = 1 + (random32() % p->highest_bit); 1979 p->flags |= SWP_SOLIDSTATE;
1980 p->cluster_next = 1 + (random32() % p->highest_bit);
1981 }
1982 if (discard_swap(p) == 0)
1983 p->flags |= SWP_DISCARDABLE;
1980 } 1984 }
1981 if (discard_swap(p) == 0)
1982 p->flags |= SWP_DISCARDABLE;
1983 1985
1984 mutex_lock(&swapon_mutex); 1986 mutex_lock(&swapon_mutex);
1985 spin_lock(&swap_lock); 1987 spin_lock(&swap_lock);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 69511e663234..5e7aed0802bf 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -25,10 +25,10 @@
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/pfn.h> 26#include <linux/pfn.h>
27#include <linux/kmemleak.h> 27#include <linux/kmemleak.h>
28#include <linux/highmem.h>
29#include <asm/atomic.h> 28#include <asm/atomic.h>
30#include <asm/uaccess.h> 29#include <asm/uaccess.h>
31#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
31#include <asm/shmparam.h>
32 32
33 33
34/*** Page table manipulation functions ***/ 34/*** Page table manipulation functions ***/
@@ -1156,12 +1156,11 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1156} 1156}
1157 1157
1158static struct vm_struct *__get_vm_area_node(unsigned long size, 1158static struct vm_struct *__get_vm_area_node(unsigned long size,
1159 unsigned long flags, unsigned long start, unsigned long end, 1159 unsigned long align, unsigned long flags, unsigned long start,
1160 int node, gfp_t gfp_mask, void *caller) 1160 unsigned long end, int node, gfp_t gfp_mask, void *caller)
1161{ 1161{
1162 static struct vmap_area *va; 1162 static struct vmap_area *va;
1163 struct vm_struct *area; 1163 struct vm_struct *area;
1164 unsigned long align = 1;
1165 1164
1166 BUG_ON(in_interrupt()); 1165 BUG_ON(in_interrupt());
1167 if (flags & VM_IOREMAP) { 1166 if (flags & VM_IOREMAP) {
@@ -1201,7 +1200,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
1201struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1200struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1202 unsigned long start, unsigned long end) 1201 unsigned long start, unsigned long end)
1203{ 1202{
1204 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 1203 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
1205 __builtin_return_address(0)); 1204 __builtin_return_address(0));
1206} 1205}
1207EXPORT_SYMBOL_GPL(__get_vm_area); 1206EXPORT_SYMBOL_GPL(__get_vm_area);
@@ -1210,7 +1209,7 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1210 unsigned long start, unsigned long end, 1209 unsigned long start, unsigned long end,
1211 void *caller) 1210 void *caller)
1212{ 1211{
1213 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 1212 return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
1214 caller); 1213 caller);
1215} 1214}
1216 1215
@@ -1225,22 +1224,22 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1225 */ 1224 */
1226struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 1225struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1227{ 1226{
1228 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 1227 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1229 -1, GFP_KERNEL, __builtin_return_address(0)); 1228 -1, GFP_KERNEL, __builtin_return_address(0));
1230} 1229}
1231 1230
1232struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 1231struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1233 void *caller) 1232 void *caller)
1234{ 1233{
1235 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 1234 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1236 -1, GFP_KERNEL, caller); 1235 -1, GFP_KERNEL, caller);
1237} 1236}
1238 1237
1239struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, 1238struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
1240 int node, gfp_t gfp_mask) 1239 int node, gfp_t gfp_mask)
1241{ 1240{
1242 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, 1241 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1243 gfp_mask, __builtin_return_address(0)); 1242 node, gfp_mask, __builtin_return_address(0));
1244} 1243}
1245 1244
1246static struct vm_struct *find_vm_area(const void *addr) 1245static struct vm_struct *find_vm_area(const void *addr)
@@ -1403,7 +1402,8 @@ void *vmap(struct page **pages, unsigned int count,
1403} 1402}
1404EXPORT_SYMBOL(vmap); 1403EXPORT_SYMBOL(vmap);
1405 1404
1406static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 1405static void *__vmalloc_node(unsigned long size, unsigned long align,
1406 gfp_t gfp_mask, pgprot_t prot,
1407 int node, void *caller); 1407 int node, void *caller);
1408static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 1408static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1409 pgprot_t prot, int node, void *caller) 1409 pgprot_t prot, int node, void *caller)
@@ -1417,7 +1417,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1417 area->nr_pages = nr_pages; 1417 area->nr_pages = nr_pages;
1418 /* Please note that the recursion is strictly bounded. */ 1418 /* Please note that the recursion is strictly bounded. */
1419 if (array_size > PAGE_SIZE) { 1419 if (array_size > PAGE_SIZE) {
1420 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, 1420 pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO,
1421 PAGE_KERNEL, node, caller); 1421 PAGE_KERNEL, node, caller);
1422 area->flags |= VM_VPAGES; 1422 area->flags |= VM_VPAGES;
1423 } else { 1423 } else {
@@ -1476,6 +1476,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1476/** 1476/**
1477 * __vmalloc_node - allocate virtually contiguous memory 1477 * __vmalloc_node - allocate virtually contiguous memory
1478 * @size: allocation size 1478 * @size: allocation size
1479 * @align: desired alignment
1479 * @gfp_mask: flags for the page level allocator 1480 * @gfp_mask: flags for the page level allocator
1480 * @prot: protection mask for the allocated pages 1481 * @prot: protection mask for the allocated pages
1481 * @node: node to use for allocation or -1 1482 * @node: node to use for allocation or -1
@@ -1485,8 +1486,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1485 * allocator with @gfp_mask flags. Map them into contiguous 1486 * allocator with @gfp_mask flags. Map them into contiguous
1486 * kernel virtual space, using a pagetable protection of @prot. 1487 * kernel virtual space, using a pagetable protection of @prot.
1487 */ 1488 */
1488static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 1489static void *__vmalloc_node(unsigned long size, unsigned long align,
1489 int node, void *caller) 1490 gfp_t gfp_mask, pgprot_t prot,
1491 int node, void *caller)
1490{ 1492{
1491 struct vm_struct *area; 1493 struct vm_struct *area;
1492 void *addr; 1494 void *addr;
@@ -1496,8 +1498,8 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1496 if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1498 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1497 return NULL; 1499 return NULL;
1498 1500
1499 area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, 1501 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
1500 node, gfp_mask, caller); 1502 VMALLOC_END, node, gfp_mask, caller);
1501 1503
1502 if (!area) 1504 if (!area)
1503 return NULL; 1505 return NULL;
@@ -1516,7 +1518,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1516 1518
1517void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1519void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1518{ 1520{
1519 return __vmalloc_node(size, gfp_mask, prot, -1, 1521 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
1520 __builtin_return_address(0)); 1522 __builtin_return_address(0));
1521} 1523}
1522EXPORT_SYMBOL(__vmalloc); 1524EXPORT_SYMBOL(__vmalloc);
@@ -1532,7 +1534,7 @@ EXPORT_SYMBOL(__vmalloc);
1532 */ 1534 */
1533void *vmalloc(unsigned long size) 1535void *vmalloc(unsigned long size)
1534{ 1536{
1535 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1537 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1536 -1, __builtin_return_address(0)); 1538 -1, __builtin_return_address(0));
1537} 1539}
1538EXPORT_SYMBOL(vmalloc); 1540EXPORT_SYMBOL(vmalloc);
@@ -1549,7 +1551,8 @@ void *vmalloc_user(unsigned long size)
1549 struct vm_struct *area; 1551 struct vm_struct *area;
1550 void *ret; 1552 void *ret;
1551 1553
1552 ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 1554 ret = __vmalloc_node(size, SHMLBA,
1555 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1553 PAGE_KERNEL, -1, __builtin_return_address(0)); 1556 PAGE_KERNEL, -1, __builtin_return_address(0));
1554 if (ret) { 1557 if (ret) {
1555 area = find_vm_area(ret); 1558 area = find_vm_area(ret);
@@ -1572,7 +1575,7 @@ EXPORT_SYMBOL(vmalloc_user);
1572 */ 1575 */
1573void *vmalloc_node(unsigned long size, int node) 1576void *vmalloc_node(unsigned long size, int node)
1574{ 1577{
1575 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1578 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1576 node, __builtin_return_address(0)); 1579 node, __builtin_return_address(0));
1577} 1580}
1578EXPORT_SYMBOL(vmalloc_node); 1581EXPORT_SYMBOL(vmalloc_node);
@@ -1595,7 +1598,7 @@ EXPORT_SYMBOL(vmalloc_node);
1595 1598
1596void *vmalloc_exec(unsigned long size) 1599void *vmalloc_exec(unsigned long size)
1597{ 1600{
1598 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 1601 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1599 -1, __builtin_return_address(0)); 1602 -1, __builtin_return_address(0));
1600} 1603}
1601 1604
@@ -1616,7 +1619,7 @@ void *vmalloc_exec(unsigned long size)
1616 */ 1619 */
1617void *vmalloc_32(unsigned long size) 1620void *vmalloc_32(unsigned long size)
1618{ 1621{
1619 return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, 1622 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
1620 -1, __builtin_return_address(0)); 1623 -1, __builtin_return_address(0));
1621} 1624}
1622EXPORT_SYMBOL(vmalloc_32); 1625EXPORT_SYMBOL(vmalloc_32);
@@ -1633,7 +1636,7 @@ void *vmalloc_32_user(unsigned long size)
1633 struct vm_struct *area; 1636 struct vm_struct *area;
1634 void *ret; 1637 void *ret;
1635 1638
1636 ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 1639 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1637 -1, __builtin_return_address(0)); 1640 -1, __builtin_return_address(0));
1638 if (ret) { 1641 if (ret) {
1639 area = find_vm_area(ret); 1642 area = find_vm_area(ret);
diff --git a/net/atm/common.c b/net/atm/common.c
index 8c4d843eb17f..950bd16d2383 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -679,7 +679,7 @@ static int check_qos(const struct atm_qos *qos)
679} 679}
680 680
681int vcc_setsockopt(struct socket *sock, int level, int optname, 681int vcc_setsockopt(struct socket *sock, int level, int optname,
682 char __user *optval, int optlen) 682 char __user *optval, unsigned int optlen)
683{ 683{
684 struct atm_vcc *vcc; 684 struct atm_vcc *vcc;
685 unsigned long value; 685 unsigned long value;
diff --git a/net/atm/common.h b/net/atm/common.h
index 92e2981f479f..f48a76b6cdf4 100644
--- a/net/atm/common.h
+++ b/net/atm/common.h
@@ -21,7 +21,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
21int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 21int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
22int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 22int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
23int vcc_setsockopt(struct socket *sock, int level, int optname, 23int vcc_setsockopt(struct socket *sock, int level, int optname,
24 char __user *optval, int optlen); 24 char __user *optval, unsigned int optlen);
25int vcc_getsockopt(struct socket *sock, int level, int optname, 25int vcc_getsockopt(struct socket *sock, int level, int optname,
26 char __user *optval, int __user *optlen); 26 char __user *optval, int __user *optlen);
27 27
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index e1d22d9430dd..d4c024504f99 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -59,7 +59,7 @@ static int pvc_connect(struct socket *sock,struct sockaddr *sockaddr,
59} 59}
60 60
61static int pvc_setsockopt(struct socket *sock, int level, int optname, 61static int pvc_setsockopt(struct socket *sock, int level, int optname,
62 char __user *optval, int optlen) 62 char __user *optval, unsigned int optlen)
63{ 63{
64 struct sock *sk = sock->sk; 64 struct sock *sk = sock->sk;
65 int error; 65 int error;
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 7b831b526d0b..f90d143c4b25 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -446,7 +446,7 @@ int svc_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
446 446
447 447
448static int svc_setsockopt(struct socket *sock, int level, int optname, 448static int svc_setsockopt(struct socket *sock, int level, int optname,
449 char __user *optval, int optlen) 449 char __user *optval, unsigned int optlen)
450{ 450{
451 struct sock *sk = sock->sk; 451 struct sock *sk = sock->sk;
452 struct atm_vcc *vcc = ATM_SD(sock); 452 struct atm_vcc *vcc = ATM_SD(sock);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 4102de1022ee..f45460730371 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -534,7 +534,7 @@ ax25_cb *ax25_create_cb(void)
534 */ 534 */
535 535
536static int ax25_setsockopt(struct socket *sock, int level, int optname, 536static int ax25_setsockopt(struct socket *sock, int level, int optname,
537 char __user *optval, int optlen) 537 char __user *optval, unsigned int optlen)
538{ 538{
539 struct sock *sk = sock->sk; 539 struct sock *sk = sock->sk;
540 ax25_cb *ax25; 540 ax25_cb *ax25;
@@ -901,7 +901,6 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
901 901
902 sock_init_data(NULL, sk); 902 sock_init_data(NULL, sk);
903 903
904 sk->sk_destruct = ax25_free_sock;
905 sk->sk_type = osk->sk_type; 904 sk->sk_type = osk->sk_type;
906 sk->sk_priority = osk->sk_priority; 905 sk->sk_priority = osk->sk_priority;
907 sk->sk_protocol = osk->sk_protocol; 906 sk->sk_protocol = osk->sk_protocol;
@@ -939,6 +938,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
939 } 938 }
940 939
941 sk->sk_protinfo = ax25; 940 sk->sk_protinfo = ax25;
941 sk->sk_destruct = ax25_free_sock;
942 ax25->sk = sk; 942 ax25->sk = sk;
943 943
944 return sk; 944 return sk;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 4f9621f759a0..75302a986067 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -466,7 +466,7 @@ drop:
466 goto done; 466 goto done;
467} 467}
468 468
469static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int len) 469static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
470{ 470{
471 struct hci_ufilter uf = { .opcode = 0 }; 471 struct hci_ufilter uf = { .opcode = 0 };
472 struct sock *sk = sock->sk; 472 struct sock *sk = sock->sk;
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index b03012564647..555d9da1869b 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1698,7 +1698,7 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
1698 return bt_sock_recvmsg(iocb, sock, msg, len, flags); 1698 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1699} 1699}
1700 1700
1701static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen) 1701static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1702{ 1702{
1703 struct sock *sk = sock->sk; 1703 struct sock *sk = sock->sk;
1704 struct l2cap_options opts; 1704 struct l2cap_options opts;
@@ -1755,7 +1755,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
1755 return err; 1755 return err;
1756} 1756}
1757 1757
1758static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) 1758static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1759{ 1759{
1760 struct sock *sk = sock->sk; 1760 struct sock *sk = sock->sk;
1761 struct bt_security sec; 1761 struct bt_security sec;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 0b85e8116859..8a20aaf1f231 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -730,7 +730,7 @@ out:
730 return copied ? : err; 730 return copied ? : err;
731} 731}
732 732
733static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen) 733static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
734{ 734{
735 struct sock *sk = sock->sk; 735 struct sock *sk = sock->sk;
736 int err = 0; 736 int err = 0;
@@ -766,7 +766,7 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __u
766 return err; 766 return err;
767} 767}
768 768
769static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) 769static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
770{ 770{
771 struct sock *sk = sock->sk; 771 struct sock *sk = sock->sk;
772 struct bt_security sec; 772 struct bt_security sec;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 13c27f17192c..77f4153bdb5e 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -644,7 +644,7 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
644 return err; 644 return err;
645} 645}
646 646
647static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) 647static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
648{ 648{
649 struct sock *sk = sock->sk; 649 struct sock *sk = sock->sk;
650 int err = 0; 650 int err = 0;
diff --git a/net/can/raw.c b/net/can/raw.c
index db3152df7d2b..b5e897922d32 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -411,7 +411,7 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
411} 411}
412 412
413static int raw_setsockopt(struct socket *sock, int level, int optname, 413static int raw_setsockopt(struct socket *sock, int level, int optname,
414 char __user *optval, int optlen) 414 char __user *optval, unsigned int optlen)
415{ 415{
416 struct sock *sk = sock->sk; 416 struct sock *sk = sock->sk;
417 struct raw_sock *ro = raw_sk(sk); 417 struct raw_sock *ro = raw_sk(sk);
diff --git a/net/compat.c b/net/compat.c
index 12728b17a226..a407c3addbae 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -331,7 +331,7 @@ struct compat_sock_fprog {
331}; 331};
332 332
333static int do_set_attach_filter(struct socket *sock, int level, int optname, 333static int do_set_attach_filter(struct socket *sock, int level, int optname,
334 char __user *optval, int optlen) 334 char __user *optval, unsigned int optlen)
335{ 335{
336 struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval; 336 struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
337 struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog)); 337 struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
@@ -351,7 +351,7 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname,
351} 351}
352 352
353static int do_set_sock_timeout(struct socket *sock, int level, 353static int do_set_sock_timeout(struct socket *sock, int level,
354 int optname, char __user *optval, int optlen) 354 int optname, char __user *optval, unsigned int optlen)
355{ 355{
356 struct compat_timeval __user *up = (struct compat_timeval __user *) optval; 356 struct compat_timeval __user *up = (struct compat_timeval __user *) optval;
357 struct timeval ktime; 357 struct timeval ktime;
@@ -373,7 +373,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
373} 373}
374 374
375static int compat_sock_setsockopt(struct socket *sock, int level, int optname, 375static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
376 char __user *optval, int optlen) 376 char __user *optval, unsigned int optlen)
377{ 377{
378 if (optname == SO_ATTACH_FILTER) 378 if (optname == SO_ATTACH_FILTER)
379 return do_set_attach_filter(sock, level, optname, 379 return do_set_attach_filter(sock, level, optname,
@@ -385,7 +385,7 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
385} 385}
386 386
387asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, 387asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
388 char __user *optval, int optlen) 388 char __user *optval, unsigned int optlen)
389{ 389{
390 int err; 390 int err;
391 struct socket *sock; 391 struct socket *sock;
@@ -558,8 +558,8 @@ struct compat_group_filter {
558 558
559 559
560int compat_mc_setsockopt(struct sock *sock, int level, int optname, 560int compat_mc_setsockopt(struct sock *sock, int level, int optname,
561 char __user *optval, int optlen, 561 char __user *optval, unsigned int optlen,
562 int (*setsockopt)(struct sock *,int,int,char __user *,int)) 562 int (*setsockopt)(struct sock *,int,int,char __user *,unsigned int))
563{ 563{
564 char __user *koptval = optval; 564 char __user *koptval = optval;
565 int koptlen = optlen; 565 int koptlen = optlen;
diff --git a/net/core/dev.c b/net/core/dev.c
index 560c8c9c03ab..b8f74cfb1bfd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2288,6 +2288,9 @@ int netif_receive_skb(struct sk_buff *skb)
2288 int ret = NET_RX_DROP; 2288 int ret = NET_RX_DROP;
2289 __be16 type; 2289 __be16 type;
2290 2290
2291 if (!skb->tstamp.tv64)
2292 net_timestamp(skb);
2293
2291 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb)) 2294 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2292 return NET_RX_SUCCESS; 2295 return NET_RX_SUCCESS;
2293 2296
@@ -2295,9 +2298,6 @@ int netif_receive_skb(struct sk_buff *skb)
2295 if (netpoll_receive_skb(skb)) 2298 if (netpoll_receive_skb(skb))
2296 return NET_RX_DROP; 2299 return NET_RX_DROP;
2297 2300
2298 if (!skb->tstamp.tv64)
2299 net_timestamp(skb);
2300
2301 if (!skb->iif) 2301 if (!skb->iif)
2302 skb->iif = skb->dev->ifindex; 2302 skb->iif = skb->dev->ifindex;
2303 2303
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 821d30918cfc..427ded841224 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -366,13 +366,13 @@ static ssize_t wireless_show(struct device *d, char *buf,
366 const struct iw_statistics *iw; 366 const struct iw_statistics *iw;
367 ssize_t ret = -EINVAL; 367 ssize_t ret = -EINVAL;
368 368
369 read_lock(&dev_base_lock); 369 rtnl_lock();
370 if (dev_isalive(dev)) { 370 if (dev_isalive(dev)) {
371 iw = get_wireless_stats(dev); 371 iw = get_wireless_stats(dev);
372 if (iw) 372 if (iw)
373 ret = (*format)(iw, buf); 373 ret = (*format)(iw, buf);
374 } 374 }
375 read_unlock(&dev_base_lock); 375 rtnl_unlock();
376 376
377 return ret; 377 return ret;
378} 378}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 4d11c28ca8ca..86acdba0a97d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -964,7 +964,7 @@ static ssize_t pktgen_if_write(struct file *file,
964 if (value == 0x7FFFFFFF) 964 if (value == 0x7FFFFFFF)
965 pkt_dev->delay = ULLONG_MAX; 965 pkt_dev->delay = ULLONG_MAX;
966 else 966 else
967 pkt_dev->delay = (u64)value * NSEC_PER_USEC; 967 pkt_dev->delay = (u64)value;
968 968
969 sprintf(pg_result, "OK: delay=%llu", 969 sprintf(pg_result, "OK: delay=%llu",
970 (unsigned long long) pkt_dev->delay); 970 (unsigned long long) pkt_dev->delay);
@@ -2105,15 +2105,17 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2105static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) 2105static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2106{ 2106{
2107 ktime_t start_time, end_time; 2107 ktime_t start_time, end_time;
2108 s32 remaining; 2108 s64 remaining;
2109 struct hrtimer_sleeper t; 2109 struct hrtimer_sleeper t;
2110 2110
2111 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 2111 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2112 hrtimer_set_expires(&t.timer, spin_until); 2112 hrtimer_set_expires(&t.timer, spin_until);
2113 2113
2114 remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer)); 2114 remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer));
2115 if (remaining <= 0) 2115 if (remaining <= 0) {
2116 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
2116 return; 2117 return;
2118 }
2117 2119
2118 start_time = ktime_now(); 2120 start_time = ktime_now();
2119 if (remaining < 100) 2121 if (remaining < 100)
@@ -2210,7 +2212,7 @@ static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
2210 if (pkt_dev->flags & F_QUEUE_MAP_CPU) 2212 if (pkt_dev->flags & F_QUEUE_MAP_CPU)
2211 pkt_dev->cur_queue_map = smp_processor_id(); 2213 pkt_dev->cur_queue_map = smp_processor_id();
2212 2214
2213 else if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { 2215 else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) {
2214 __u16 t; 2216 __u16 t;
2215 if (pkt_dev->flags & F_QUEUE_MAP_RND) { 2217 if (pkt_dev->flags & F_QUEUE_MAP_RND) {
2216 t = random32() % 2218 t = random32() %
diff --git a/net/core/sock.c b/net/core/sock.c
index 524712a7b154..7626b6aacd68 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -446,7 +446,7 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
446 */ 446 */
447 447
448int sock_setsockopt(struct socket *sock, int level, int optname, 448int sock_setsockopt(struct socket *sock, int level, int optname,
449 char __user *optval, int optlen) 449 char __user *optval, unsigned int optlen)
450{ 450{
451 struct sock *sk = sock->sk; 451 struct sock *sk = sock->sk;
452 int val; 452 int val;
@@ -1228,17 +1228,22 @@ void __init sk_init(void)
1228void sock_wfree(struct sk_buff *skb) 1228void sock_wfree(struct sk_buff *skb)
1229{ 1229{
1230 struct sock *sk = skb->sk; 1230 struct sock *sk = skb->sk;
1231 int res; 1231 unsigned int len = skb->truesize;
1232 1232
1233 /* In case it might be waiting for more memory. */ 1233 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1234 res = atomic_sub_return(skb->truesize, &sk->sk_wmem_alloc); 1234 /*
1235 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) 1235 * Keep a reference on sk_wmem_alloc, this will be released
1236 * after sk_write_space() call
1237 */
1238 atomic_sub(len - 1, &sk->sk_wmem_alloc);
1236 sk->sk_write_space(sk); 1239 sk->sk_write_space(sk);
1240 len = 1;
1241 }
1237 /* 1242 /*
1238 * if sk_wmem_alloc reached 0, we are last user and should 1243 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1239 * free this sock, as sk_free() call could not do it. 1244 * could not do because of in-flight packets
1240 */ 1245 */
1241 if (res == 0) 1246 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1242 __sk_free(sk); 1247 __sk_free(sk);
1243} 1248}
1244EXPORT_SYMBOL(sock_wfree); 1249EXPORT_SYMBOL(sock_wfree);
@@ -1697,7 +1702,7 @@ int sock_no_shutdown(struct socket *sock, int how)
1697EXPORT_SYMBOL(sock_no_shutdown); 1702EXPORT_SYMBOL(sock_no_shutdown);
1698 1703
1699int sock_no_setsockopt(struct socket *sock, int level, int optname, 1704int sock_no_setsockopt(struct socket *sock, int level, int optname,
1700 char __user *optval, int optlen) 1705 char __user *optval, unsigned int optlen)
1701{ 1706{
1702 return -EOPNOTSUPP; 1707 return -EOPNOTSUPP;
1703} 1708}
@@ -2018,7 +2023,7 @@ EXPORT_SYMBOL(sock_common_recvmsg);
2018 * Set socket options on an inet socket. 2023 * Set socket options on an inet socket.
2019 */ 2024 */
2020int sock_common_setsockopt(struct socket *sock, int level, int optname, 2025int sock_common_setsockopt(struct socket *sock, int level, int optname,
2021 char __user *optval, int optlen) 2026 char __user *optval, unsigned int optlen)
2022{ 2027{
2023 struct sock *sk = sock->sk; 2028 struct sock *sk = sock->sk;
2024 2029
@@ -2028,7 +2033,7 @@ EXPORT_SYMBOL(sock_common_setsockopt);
2028 2033
2029#ifdef CONFIG_COMPAT 2034#ifdef CONFIG_COMPAT
2030int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 2035int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2031 char __user *optval, int optlen) 2036 char __user *optval, unsigned int optlen)
2032{ 2037{
2033 struct sock *sk = sock->sk; 2038 struct sock *sk = sock->sk;
2034 2039
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index d6bc47363b1c..5ef32c2f0d6a 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -290,14 +290,14 @@ extern int dccp_disconnect(struct sock *sk, int flags);
290extern int dccp_getsockopt(struct sock *sk, int level, int optname, 290extern int dccp_getsockopt(struct sock *sk, int level, int optname,
291 char __user *optval, int __user *optlen); 291 char __user *optval, int __user *optlen);
292extern int dccp_setsockopt(struct sock *sk, int level, int optname, 292extern int dccp_setsockopt(struct sock *sk, int level, int optname,
293 char __user *optval, int optlen); 293 char __user *optval, unsigned int optlen);
294#ifdef CONFIG_COMPAT 294#ifdef CONFIG_COMPAT
295extern int compat_dccp_getsockopt(struct sock *sk, 295extern int compat_dccp_getsockopt(struct sock *sk,
296 int level, int optname, 296 int level, int optname,
297 char __user *optval, int __user *optlen); 297 char __user *optval, int __user *optlen);
298extern int compat_dccp_setsockopt(struct sock *sk, 298extern int compat_dccp_setsockopt(struct sock *sk,
299 int level, int optname, 299 int level, int optname,
300 char __user *optval, int optlen); 300 char __user *optval, unsigned int optlen);
301#endif 301#endif
302extern int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg); 302extern int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
303extern int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, 303extern int dccp_sendmsg(struct kiocb *iocb, struct sock *sk,
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index bc4467082a00..a156319fd0ac 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -393,7 +393,7 @@ out:
393EXPORT_SYMBOL_GPL(dccp_ioctl); 393EXPORT_SYMBOL_GPL(dccp_ioctl);
394 394
395static int dccp_setsockopt_service(struct sock *sk, const __be32 service, 395static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
396 char __user *optval, int optlen) 396 char __user *optval, unsigned int optlen)
397{ 397{
398 struct dccp_sock *dp = dccp_sk(sk); 398 struct dccp_sock *dp = dccp_sk(sk);
399 struct dccp_service_list *sl = NULL; 399 struct dccp_service_list *sl = NULL;
@@ -464,7 +464,7 @@ static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
464} 464}
465 465
466static int dccp_setsockopt_ccid(struct sock *sk, int type, 466static int dccp_setsockopt_ccid(struct sock *sk, int type,
467 char __user *optval, int optlen) 467 char __user *optval, unsigned int optlen)
468{ 468{
469 u8 *val; 469 u8 *val;
470 int rc = 0; 470 int rc = 0;
@@ -494,7 +494,7 @@ static int dccp_setsockopt_ccid(struct sock *sk, int type,
494} 494}
495 495
496static int do_dccp_setsockopt(struct sock *sk, int level, int optname, 496static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
497 char __user *optval, int optlen) 497 char __user *optval, unsigned int optlen)
498{ 498{
499 struct dccp_sock *dp = dccp_sk(sk); 499 struct dccp_sock *dp = dccp_sk(sk);
500 int val, err = 0; 500 int val, err = 0;
@@ -546,7 +546,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
546} 546}
547 547
548int dccp_setsockopt(struct sock *sk, int level, int optname, 548int dccp_setsockopt(struct sock *sk, int level, int optname,
549 char __user *optval, int optlen) 549 char __user *optval, unsigned int optlen)
550{ 550{
551 if (level != SOL_DCCP) 551 if (level != SOL_DCCP)
552 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level, 552 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
@@ -559,7 +559,7 @@ EXPORT_SYMBOL_GPL(dccp_setsockopt);
559 559
560#ifdef CONFIG_COMPAT 560#ifdef CONFIG_COMPAT
561int compat_dccp_setsockopt(struct sock *sk, int level, int optname, 561int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
562 char __user *optval, int optlen) 562 char __user *optval, unsigned int optlen)
563{ 563{
564 if (level != SOL_DCCP) 564 if (level != SOL_DCCP)
565 return inet_csk_compat_setsockopt(sk, level, optname, 565 return inet_csk_compat_setsockopt(sk, level, optname,
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 77d40289653c..7a58c87baf17 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -157,7 +157,7 @@ static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
157static struct hlist_head dn_wild_sk; 157static struct hlist_head dn_wild_sk;
158static atomic_t decnet_memory_allocated; 158static atomic_t decnet_memory_allocated;
159 159
160static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen, int flags); 160static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
161static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); 161static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
162 162
163static struct hlist_head *dn_find_list(struct sock *sk) 163static struct hlist_head *dn_find_list(struct sock *sk)
@@ -1325,7 +1325,7 @@ out:
1325 return err; 1325 return err;
1326} 1326}
1327 1327
1328static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) 1328static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1329{ 1329{
1330 struct sock *sk = sock->sk; 1330 struct sock *sk = sock->sk;
1331 int err; 1331 int err;
@@ -1337,7 +1337,7 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use
1337 return err; 1337 return err;
1338} 1338}
1339 1339
1340static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, int optlen, int flags) 1340static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, unsigned int optlen, int flags)
1341{ 1341{
1342 struct sock *sk = sock->sk; 1342 struct sock *sk = sock->sk;
1343 struct dn_scp *scp = DN_SK(sk); 1343 struct dn_scp *scp = DN_SK(sk);
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 51593a48f2dd..a413b1bf4465 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -414,7 +414,7 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname,
414} 414}
415 415
416static int dgram_setsockopt(struct sock *sk, int level, int optname, 416static int dgram_setsockopt(struct sock *sk, int level, int optname,
417 char __user *optval, int optlen) 417 char __user *optval, unsigned int optlen)
418{ 418{
419 struct dgram_sock *ro = dgram_sk(sk); 419 struct dgram_sock *ro = dgram_sk(sk);
420 int val; 420 int val;
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index 13198859982e..30e74eee07d6 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -244,7 +244,7 @@ static int raw_getsockopt(struct sock *sk, int level, int optname,
244} 244}
245 245
246static int raw_setsockopt(struct sock *sk, int level, int optname, 246static int raw_setsockopt(struct sock *sk, int level, int optname,
247 char __user *optval, int optlen) 247 char __user *optval, unsigned int optlen)
248{ 248{
249 return -EOPNOTSUPP; 249 return -EOPNOTSUPP;
250} 250}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 58c4b0f7c4aa..57737b8d1711 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1119,6 +1119,7 @@ int inet_sk_rebuild_header(struct sock *sk)
1119{ 1119{
1120 struct flowi fl = { 1120 struct flowi fl = {
1121 .oif = sk->sk_bound_dev_if, 1121 .oif = sk->sk_bound_dev_if,
1122 .mark = sk->sk_mark,
1122 .nl_u = { 1123 .nl_u = {
1123 .ip4_u = { 1124 .ip4_u = {
1124 .daddr = daddr, 1125 .daddr = daddr,
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e92f1fd28aa5..5df2f6a0b0f0 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1077,12 +1077,16 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1077 ip_mc_up(in_dev); 1077 ip_mc_up(in_dev);
1078 /* fall through */ 1078 /* fall through */
1079 case NETDEV_CHANGEADDR: 1079 case NETDEV_CHANGEADDR:
1080 if (IN_DEV_ARP_NOTIFY(in_dev)) 1080 /* Send gratuitous ARP to notify of link change */
1081 arp_send(ARPOP_REQUEST, ETH_P_ARP, 1081 if (IN_DEV_ARP_NOTIFY(in_dev)) {
1082 in_dev->ifa_list->ifa_address, 1082 struct in_ifaddr *ifa = in_dev->ifa_list;
1083 dev, 1083
1084 in_dev->ifa_list->ifa_address, 1084 if (ifa)
1085 NULL, dev->dev_addr, NULL); 1085 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1086 ifa->ifa_address, dev,
1087 ifa->ifa_address, NULL,
1088 dev->dev_addr, NULL);
1089 }
1086 break; 1090 break;
1087 case NETDEV_DOWN: 1091 case NETDEV_DOWN:
1088 ip_mc_down(in_dev); 1092 ip_mc_down(in_dev);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 22cd19ee44e5..4351ca2cf0b8 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -714,7 +714,7 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
714EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt); 714EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
715 715
716int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, 716int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
717 char __user *optval, int optlen) 717 char __user *optval, unsigned int optlen)
718{ 718{
719 const struct inet_connection_sock *icsk = inet_csk(sk); 719 const struct inet_connection_sock *icsk = inet_csk(sk);
720 720
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9fe5d7b81580..f9895180f481 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -335,6 +335,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
335 335
336 { 336 {
337 struct flowi fl = { .oif = sk->sk_bound_dev_if, 337 struct flowi fl = { .oif = sk->sk_bound_dev_if,
338 .mark = sk->sk_mark,
338 .nl_u = { .ip4_u = 339 .nl_u = { .ip4_u =
339 { .daddr = daddr, 340 { .daddr = daddr,
340 .saddr = inet->saddr, 341 .saddr = inet->saddr,
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 5a0693576e82..0c0b6e363a20 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -440,7 +440,7 @@ out:
440 */ 440 */
441 441
442static int do_ip_setsockopt(struct sock *sk, int level, 442static int do_ip_setsockopt(struct sock *sk, int level,
443 int optname, char __user *optval, int optlen) 443 int optname, char __user *optval, unsigned int optlen)
444{ 444{
445 struct inet_sock *inet = inet_sk(sk); 445 struct inet_sock *inet = inet_sk(sk);
446 int val = 0, err; 446 int val = 0, err;
@@ -950,7 +950,7 @@ e_inval:
950} 950}
951 951
952int ip_setsockopt(struct sock *sk, int level, 952int ip_setsockopt(struct sock *sk, int level,
953 int optname, char __user *optval, int optlen) 953 int optname, char __user *optval, unsigned int optlen)
954{ 954{
955 int err; 955 int err;
956 956
@@ -975,7 +975,7 @@ EXPORT_SYMBOL(ip_setsockopt);
975 975
976#ifdef CONFIG_COMPAT 976#ifdef CONFIG_COMPAT
977int compat_ip_setsockopt(struct sock *sk, int level, int optname, 977int compat_ip_setsockopt(struct sock *sk, int level, int optname,
978 char __user *optval, int optlen) 978 char __user *optval, unsigned int optlen)
979{ 979{
980 int err; 980 int err;
981 981
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index c43ec2d51ce2..630a56df7b47 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -931,7 +931,7 @@ static void mrtsock_destruct(struct sock *sk)
931 * MOSPF/PIM router set up we can clean this up. 931 * MOSPF/PIM router set up we can clean this up.
932 */ 932 */
933 933
934int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen) 934int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
935{ 935{
936 int ret; 936 int ret;
937 struct vifctl vif; 937 struct vifctl vif;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index ebb1e5848bc6..757c9171e7c2 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -741,7 +741,7 @@ out: return ret;
741} 741}
742 742
743static int do_raw_setsockopt(struct sock *sk, int level, int optname, 743static int do_raw_setsockopt(struct sock *sk, int level, int optname,
744 char __user *optval, int optlen) 744 char __user *optval, unsigned int optlen)
745{ 745{
746 if (optname == ICMP_FILTER) { 746 if (optname == ICMP_FILTER) {
747 if (inet_sk(sk)->num != IPPROTO_ICMP) 747 if (inet_sk(sk)->num != IPPROTO_ICMP)
@@ -753,7 +753,7 @@ static int do_raw_setsockopt(struct sock *sk, int level, int optname,
753} 753}
754 754
755static int raw_setsockopt(struct sock *sk, int level, int optname, 755static int raw_setsockopt(struct sock *sk, int level, int optname,
756 char __user *optval, int optlen) 756 char __user *optval, unsigned int optlen)
757{ 757{
758 if (level != SOL_RAW) 758 if (level != SOL_RAW)
759 return ip_setsockopt(sk, level, optname, optval, optlen); 759 return ip_setsockopt(sk, level, optname, optval, optlen);
@@ -762,7 +762,7 @@ static int raw_setsockopt(struct sock *sk, int level, int optname,
762 762
763#ifdef CONFIG_COMPAT 763#ifdef CONFIG_COMPAT
764static int compat_raw_setsockopt(struct sock *sk, int level, int optname, 764static int compat_raw_setsockopt(struct sock *sk, int level, int optname,
765 char __user *optval, int optlen) 765 char __user *optval, unsigned int optlen)
766{ 766{
767 if (level != SOL_RAW) 767 if (level != SOL_RAW)
768 return compat_ip_setsockopt(sk, level, optname, optval, optlen); 768 return compat_ip_setsockopt(sk, level, optname, optval, optlen);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 21387ebabf00..64d0af675823 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -580,7 +580,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
580 580
581 lock_sock(sk); 581 lock_sock(sk);
582 582
583 timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK); 583 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
584 while (tss.len) { 584 while (tss.len) {
585 ret = __tcp_splice_read(sk, &tss); 585 ret = __tcp_splice_read(sk, &tss);
586 if (ret < 0) 586 if (ret < 0)
@@ -2032,7 +2032,7 @@ int tcp_disconnect(struct sock *sk, int flags)
2032 * Socket option code for TCP. 2032 * Socket option code for TCP.
2033 */ 2033 */
2034static int do_tcp_setsockopt(struct sock *sk, int level, 2034static int do_tcp_setsockopt(struct sock *sk, int level,
2035 int optname, char __user *optval, int optlen) 2035 int optname, char __user *optval, unsigned int optlen)
2036{ 2036{
2037 struct tcp_sock *tp = tcp_sk(sk); 2037 struct tcp_sock *tp = tcp_sk(sk);
2038 struct inet_connection_sock *icsk = inet_csk(sk); 2038 struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2047,7 +2047,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2047 return -EINVAL; 2047 return -EINVAL;
2048 2048
2049 val = strncpy_from_user(name, optval, 2049 val = strncpy_from_user(name, optval,
2050 min(TCP_CA_NAME_MAX-1, optlen)); 2050 min_t(long, TCP_CA_NAME_MAX-1, optlen));
2051 if (val < 0) 2051 if (val < 0)
2052 return -EFAULT; 2052 return -EFAULT;
2053 name[val] = 0; 2053 name[val] = 0;
@@ -2220,7 +2220,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2220} 2220}
2221 2221
2222int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 2222int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2223 int optlen) 2223 unsigned int optlen)
2224{ 2224{
2225 struct inet_connection_sock *icsk = inet_csk(sk); 2225 struct inet_connection_sock *icsk = inet_csk(sk);
2226 2226
@@ -2232,7 +2232,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2232 2232
2233#ifdef CONFIG_COMPAT 2233#ifdef CONFIG_COMPAT
2234int compat_tcp_setsockopt(struct sock *sk, int level, int optname, 2234int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2235 char __user *optval, int optlen) 2235 char __user *optval, unsigned int optlen)
2236{ 2236{
2237 if (level != SOL_TCP) 2237 if (level != SOL_TCP)
2238 return inet_csk_compat_setsockopt(sk, level, optname, 2238 return inet_csk_compat_setsockopt(sk, level, optname,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5200aab0ca97..fcd278a7080e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -361,6 +361,7 @@ static inline int tcp_urg_mode(const struct tcp_sock *tp)
361#define OPTION_SACK_ADVERTISE (1 << 0) 361#define OPTION_SACK_ADVERTISE (1 << 0)
362#define OPTION_TS (1 << 1) 362#define OPTION_TS (1 << 1)
363#define OPTION_MD5 (1 << 2) 363#define OPTION_MD5 (1 << 2)
364#define OPTION_WSCALE (1 << 3)
364 365
365struct tcp_out_options { 366struct tcp_out_options {
366 u8 options; /* bit field of OPTION_* */ 367 u8 options; /* bit field of OPTION_* */
@@ -427,7 +428,7 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
427 TCPOLEN_SACK_PERM); 428 TCPOLEN_SACK_PERM);
428 } 429 }
429 430
430 if (unlikely(opts->ws)) { 431 if (unlikely(OPTION_WSCALE & opts->options)) {
431 *ptr++ = htonl((TCPOPT_NOP << 24) | 432 *ptr++ = htonl((TCPOPT_NOP << 24) |
432 (TCPOPT_WINDOW << 16) | 433 (TCPOPT_WINDOW << 16) |
433 (TCPOLEN_WINDOW << 8) | 434 (TCPOLEN_WINDOW << 8) |
@@ -494,8 +495,8 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
494 } 495 }
495 if (likely(sysctl_tcp_window_scaling)) { 496 if (likely(sysctl_tcp_window_scaling)) {
496 opts->ws = tp->rx_opt.rcv_wscale; 497 opts->ws = tp->rx_opt.rcv_wscale;
497 if (likely(opts->ws)) 498 opts->options |= OPTION_WSCALE;
498 size += TCPOLEN_WSCALE_ALIGNED; 499 size += TCPOLEN_WSCALE_ALIGNED;
499 } 500 }
500 if (likely(sysctl_tcp_sack)) { 501 if (likely(sysctl_tcp_sack)) {
501 opts->options |= OPTION_SACK_ADVERTISE; 502 opts->options |= OPTION_SACK_ADVERTISE;
@@ -537,8 +538,8 @@ static unsigned tcp_synack_options(struct sock *sk,
537 538
538 if (likely(ireq->wscale_ok)) { 539 if (likely(ireq->wscale_ok)) {
539 opts->ws = ireq->rcv_wscale; 540 opts->ws = ireq->rcv_wscale;
540 if (likely(opts->ws)) 541 opts->options |= OPTION_WSCALE;
541 size += TCPOLEN_WSCALE_ALIGNED; 542 size += TCPOLEN_WSCALE_ALIGNED;
542 } 543 }
543 if (likely(doing_ts)) { 544 if (likely(doing_ts)) {
544 opts->options |= OPTION_TS; 545 opts->options |= OPTION_TS;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ebaaa7f973d7..6ec6a8a4a224 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -696,6 +696,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
696 696
697 if (rt == NULL) { 697 if (rt == NULL) {
698 struct flowi fl = { .oif = ipc.oif, 698 struct flowi fl = { .oif = ipc.oif,
699 .mark = sk->sk_mark,
699 .nl_u = { .ip4_u = 700 .nl_u = { .ip4_u =
700 { .daddr = faddr, 701 { .daddr = faddr,
701 .saddr = saddr, 702 .saddr = saddr,
@@ -1359,7 +1360,7 @@ void udp_destroy_sock(struct sock *sk)
1359 * Socket option code for UDP 1360 * Socket option code for UDP
1360 */ 1361 */
1361int udp_lib_setsockopt(struct sock *sk, int level, int optname, 1362int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1362 char __user *optval, int optlen, 1363 char __user *optval, unsigned int optlen,
1363 int (*push_pending_frames)(struct sock *)) 1364 int (*push_pending_frames)(struct sock *))
1364{ 1365{
1365 struct udp_sock *up = udp_sk(sk); 1366 struct udp_sock *up = udp_sk(sk);
@@ -1441,7 +1442,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1441EXPORT_SYMBOL(udp_lib_setsockopt); 1442EXPORT_SYMBOL(udp_lib_setsockopt);
1442 1443
1443int udp_setsockopt(struct sock *sk, int level, int optname, 1444int udp_setsockopt(struct sock *sk, int level, int optname,
1444 char __user *optval, int optlen) 1445 char __user *optval, unsigned int optlen)
1445{ 1446{
1446 if (level == SOL_UDP || level == SOL_UDPLITE) 1447 if (level == SOL_UDP || level == SOL_UDPLITE)
1447 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1448 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
@@ -1451,7 +1452,7 @@ int udp_setsockopt(struct sock *sk, int level, int optname,
1451 1452
1452#ifdef CONFIG_COMPAT 1453#ifdef CONFIG_COMPAT
1453int compat_udp_setsockopt(struct sock *sk, int level, int optname, 1454int compat_udp_setsockopt(struct sock *sk, int level, int optname,
1454 char __user *optval, int optlen) 1455 char __user *optval, unsigned int optlen)
1455{ 1456{
1456 if (level == SOL_UDP || level == SOL_UDPLITE) 1457 if (level == SOL_UDP || level == SOL_UDPLITE)
1457 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1458 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 9f4a6165f722..aaad650d47d9 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -11,13 +11,13 @@ extern void __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
11extern int udp_v4_get_port(struct sock *sk, unsigned short snum); 11extern int udp_v4_get_port(struct sock *sk, unsigned short snum);
12 12
13extern int udp_setsockopt(struct sock *sk, int level, int optname, 13extern int udp_setsockopt(struct sock *sk, int level, int optname,
14 char __user *optval, int optlen); 14 char __user *optval, unsigned int optlen);
15extern int udp_getsockopt(struct sock *sk, int level, int optname, 15extern int udp_getsockopt(struct sock *sk, int level, int optname,
16 char __user *optval, int __user *optlen); 16 char __user *optval, int __user *optlen);
17 17
18#ifdef CONFIG_COMPAT 18#ifdef CONFIG_COMPAT
19extern int compat_udp_setsockopt(struct sock *sk, int level, int optname, 19extern int compat_udp_setsockopt(struct sock *sk, int level, int optname,
20 char __user *optval, int optlen); 20 char __user *optval, unsigned int optlen);
21extern int compat_udp_getsockopt(struct sock *sk, int level, int optname, 21extern int compat_udp_getsockopt(struct sock *sk, int level, int optname,
22 char __user *optval, int __user *optlen); 22 char __user *optval, int __user *optlen);
23#endif 23#endif
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 090675e269ee..716153941fc4 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1281,7 +1281,7 @@ int ip6mr_sk_done(struct sock *sk)
1281 * MOSPF/PIM router set up we can clean this up. 1281 * MOSPF/PIM router set up we can clean this up.
1282 */ 1282 */
1283 1283
1284int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen) 1284int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1285{ 1285{
1286 int ret; 1286 int ret;
1287 struct mif6ctl vif; 1287 struct mif6ctl vif;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index f5e0682b402d..14f54eb5a7fc 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -123,7 +123,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
123} 123}
124 124
125static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, 125static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
126 char __user *optval, int optlen) 126 char __user *optval, unsigned int optlen)
127{ 127{
128 struct ipv6_pinfo *np = inet6_sk(sk); 128 struct ipv6_pinfo *np = inet6_sk(sk);
129 struct net *net = sock_net(sk); 129 struct net *net = sock_net(sk);
@@ -773,7 +773,7 @@ e_inval:
773} 773}
774 774
775int ipv6_setsockopt(struct sock *sk, int level, int optname, 775int ipv6_setsockopt(struct sock *sk, int level, int optname,
776 char __user *optval, int optlen) 776 char __user *optval, unsigned int optlen)
777{ 777{
778 int err; 778 int err;
779 779
@@ -801,7 +801,7 @@ EXPORT_SYMBOL(ipv6_setsockopt);
801 801
802#ifdef CONFIG_COMPAT 802#ifdef CONFIG_COMPAT
803int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, 803int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
804 char __user *optval, int optlen) 804 char __user *optval, unsigned int optlen)
805{ 805{
806 int err; 806 int err;
807 807
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 7d675b8d82d3..4f24570b0869 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -957,7 +957,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
957 957
958 958
959static int do_rawv6_setsockopt(struct sock *sk, int level, int optname, 959static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
960 char __user *optval, int optlen) 960 char __user *optval, unsigned int optlen)
961{ 961{
962 struct raw6_sock *rp = raw6_sk(sk); 962 struct raw6_sock *rp = raw6_sk(sk);
963 int val; 963 int val;
@@ -1000,7 +1000,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
1000} 1000}
1001 1001
1002static int rawv6_setsockopt(struct sock *sk, int level, int optname, 1002static int rawv6_setsockopt(struct sock *sk, int level, int optname,
1003 char __user *optval, int optlen) 1003 char __user *optval, unsigned int optlen)
1004{ 1004{
1005 switch(level) { 1005 switch(level) {
1006 case SOL_RAW: 1006 case SOL_RAW:
@@ -1024,7 +1024,7 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
1024 1024
1025#ifdef CONFIG_COMPAT 1025#ifdef CONFIG_COMPAT
1026static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname, 1026static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
1027 char __user *optval, int optlen) 1027 char __user *optval, unsigned int optlen)
1028{ 1028{
1029 switch (level) { 1029 switch (level) {
1030 case SOL_RAW: 1030 case SOL_RAW:
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index d65e0c496cc0..dbd19a78ca73 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -274,7 +274,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
274 274
275 c = 0; 275 c = 0;
276 for (prl = t->prl; prl; prl = prl->next) { 276 for (prl = t->prl; prl; prl = prl->next) {
277 if (c > cmax) 277 if (c >= cmax)
278 break; 278 break;
279 if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr) 279 if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr)
280 continue; 280 continue;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index b265b7047d3e..3a60f12b34ed 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1044,7 +1044,7 @@ void udpv6_destroy_sock(struct sock *sk)
1044 * Socket option code for UDP 1044 * Socket option code for UDP
1045 */ 1045 */
1046int udpv6_setsockopt(struct sock *sk, int level, int optname, 1046int udpv6_setsockopt(struct sock *sk, int level, int optname,
1047 char __user *optval, int optlen) 1047 char __user *optval, unsigned int optlen)
1048{ 1048{
1049 if (level == SOL_UDP || level == SOL_UDPLITE) 1049 if (level == SOL_UDP || level == SOL_UDPLITE)
1050 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1050 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
@@ -1054,7 +1054,7 @@ int udpv6_setsockopt(struct sock *sk, int level, int optname,
1054 1054
1055#ifdef CONFIG_COMPAT 1055#ifdef CONFIG_COMPAT
1056int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, 1056int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
1057 char __user *optval, int optlen) 1057 char __user *optval, unsigned int optlen)
1058{ 1058{
1059 if (level == SOL_UDP || level == SOL_UDPLITE) 1059 if (level == SOL_UDP || level == SOL_UDPLITE)
1060 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 1060 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 6bb303471e20..d7571046bfc4 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -16,10 +16,10 @@ extern int udp_v6_get_port(struct sock *sk, unsigned short snum);
16extern int udpv6_getsockopt(struct sock *sk, int level, int optname, 16extern int udpv6_getsockopt(struct sock *sk, int level, int optname,
17 char __user *optval, int __user *optlen); 17 char __user *optval, int __user *optlen);
18extern int udpv6_setsockopt(struct sock *sk, int level, int optname, 18extern int udpv6_setsockopt(struct sock *sk, int level, int optname,
19 char __user *optval, int optlen); 19 char __user *optval, unsigned int optlen);
20#ifdef CONFIG_COMPAT 20#ifdef CONFIG_COMPAT
21extern int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, 21extern int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
22 char __user *optval, int optlen); 22 char __user *optval, unsigned int optlen);
23extern int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, 23extern int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
24 char __user *optval, int __user *optlen); 24 char __user *optval, int __user *optlen);
25#endif 25#endif
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index f1118d92a191..66c7a20011f3 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1292,7 +1292,7 @@ const char *ipx_device_name(struct ipx_interface *intrfc)
1292 * socket object. */ 1292 * socket object. */
1293 1293
1294static int ipx_setsockopt(struct socket *sock, int level, int optname, 1294static int ipx_setsockopt(struct socket *sock, int level, int optname,
1295 char __user *optval, int optlen) 1295 char __user *optval, unsigned int optlen)
1296{ 1296{
1297 struct sock *sk = sock->sk; 1297 struct sock *sk = sock->sk;
1298 int opt; 1298 int opt;
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 50b43c57d5d8..dd35641835f4 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1826,7 +1826,7 @@ static int irda_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
1826 * 1826 *
1827 */ 1827 */
1828static int irda_setsockopt(struct socket *sock, int level, int optname, 1828static int irda_setsockopt(struct socket *sock, int level, int optname,
1829 char __user *optval, int optlen) 1829 char __user *optval, unsigned int optlen)
1830{ 1830{
1831 struct sock *sk = sock->sk; 1831 struct sock *sk = sock->sk;
1832 struct irda_sock *self = irda_sk(sk); 1832 struct irda_sock *self = irda_sk(sk);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index d985d163dcfc..bada1b9c670b 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1387,7 +1387,7 @@ static int iucv_sock_release(struct socket *sock)
1387 1387
1388/* getsockopt and setsockopt */ 1388/* getsockopt and setsockopt */
1389static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, 1389static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1390 char __user *optval, int optlen) 1390 char __user *optval, unsigned int optlen)
1391{ 1391{
1392 struct sock *sk = sock->sk; 1392 struct sock *sk = sock->sk;
1393 struct iucv_sock *iucv = iucv_sk(sk); 1393 struct iucv_sock *iucv = iucv_sk(sk);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index c45eee1c0e8d..7aa4fd170104 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -973,7 +973,7 @@ static int llc_ui_ioctl(struct socket *sock, unsigned int cmd,
973 * Set various connection specific parameters. 973 * Set various connection specific parameters.
974 */ 974 */
975static int llc_ui_setsockopt(struct socket *sock, int level, int optname, 975static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
976 char __user *optval, int optlen) 976 char __user *optval, unsigned int optlen)
977{ 977{
978 struct sock *sk = sock->sk; 978 struct sock *sk = sock->sk;
979 struct llc_sock *llc = llc_sk(sk); 979 struct llc_sock *llc = llc_sk(sk);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5143d203256b..fd4028296613 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -367,7 +367,10 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
367 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 367 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
368 u32 staflags; 368 u32 staflags;
369 369
370 if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control))) 370 if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control)
371 || ieee80211_is_auth(hdr->frame_control)
372 || ieee80211_is_assoc_resp(hdr->frame_control)
373 || ieee80211_is_reassoc_resp(hdr->frame_control)))
371 return TX_CONTINUE; 374 return TX_CONTINUE;
372 375
373 staflags = get_sta_flags(sta); 376 staflags = get_sta_flags(sta);
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
index 8ab829f86574..f042ae521557 100644
--- a/net/netfilter/nf_sockopt.c
+++ b/net/netfilter/nf_sockopt.c
@@ -113,7 +113,7 @@ static int nf_sockopt(struct sock *sk, u_int8_t pf, int val,
113} 113}
114 114
115int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, 115int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt,
116 int len) 116 unsigned int len)
117{ 117{
118 return nf_sockopt(sk, pf, val, opt, &len, 0); 118 return nf_sockopt(sk, pf, val, opt, &len, 0);
119} 119}
@@ -154,7 +154,7 @@ static int compat_nf_sockopt(struct sock *sk, u_int8_t pf, int val,
154} 154}
155 155
156int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, 156int compat_nf_setsockopt(struct sock *sk, u_int8_t pf,
157 int val, char __user *opt, int len) 157 int val, char __user *opt, unsigned int len)
158{ 158{
159 return compat_nf_sockopt(sk, pf, val, opt, &len, 0); 159 return compat_nf_sockopt(sk, pf, val, opt, &len, 0);
160} 160}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index dd85320907cb..19e98007691c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1150,7 +1150,7 @@ static void netlink_update_socket_mc(struct netlink_sock *nlk,
1150} 1150}
1151 1151
1152static int netlink_setsockopt(struct socket *sock, int level, int optname, 1152static int netlink_setsockopt(struct socket *sock, int level, int optname,
1153 char __user *optval, int optlen) 1153 char __user *optval, unsigned int optlen)
1154{ 1154{
1155 struct sock *sk = sock->sk; 1155 struct sock *sk = sock->sk;
1156 struct netlink_sock *nlk = nlk_sk(sk); 1156 struct netlink_sock *nlk = nlk_sk(sk);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index ce1a34b99c23..7a834952f67f 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -301,7 +301,7 @@ void nr_destroy_socket(struct sock *sk)
301 */ 301 */
302 302
303static int nr_setsockopt(struct socket *sock, int level, int optname, 303static int nr_setsockopt(struct socket *sock, int level, int optname,
304 char __user *optval, int optlen) 304 char __user *optval, unsigned int optlen)
305{ 305{
306 struct sock *sk = sock->sk; 306 struct sock *sk = sock->sk;
307 struct nr_sock *nr = nr_sk(sk); 307 struct nr_sock *nr = nr_sk(sk);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 103d5611b818..d7ecca0a0c07 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1701,7 +1701,7 @@ static void packet_flush_mclist(struct sock *sk)
1701} 1701}
1702 1702
1703static int 1703static int
1704packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) 1704packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1705{ 1705{
1706 struct sock *sk = sock->sk; 1706 struct sock *sk = sock->sk;
1707 struct packet_sock *po = pkt_sk(sk); 1707 struct packet_sock *po = pkt_sk(sk);
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index b8252d289cd7..5f32d217535b 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -742,7 +742,7 @@ static int pep_init(struct sock *sk)
742} 742}
743 743
744static int pep_setsockopt(struct sock *sk, int level, int optname, 744static int pep_setsockopt(struct sock *sk, int level, int optname,
745 char __user *optval, int optlen) 745 char __user *optval, unsigned int optlen)
746{ 746{
747 struct pep_sock *pn = pep_sk(sk); 747 struct pep_sock *pn = pep_sk(sk);
748 int val = 0, err = 0; 748 int val = 0, err = 0;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 07aa9f08d5fb..aa5b5a972bff 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -407,7 +407,6 @@ int pn_sock_get_port(struct sock *sk, unsigned short sport)
407 return -EADDRINUSE; 407 return -EADDRINUSE;
408 408
409found: 409found:
410 mutex_unlock(&port_mutex);
411 pn->sobject = pn_object(pn_addr(pn->sobject), sport); 410 pn->sobject = pn_object(pn_addr(pn->sobject), sport);
412 return 0; 411 return 0;
413} 412}
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 6b58aeff4c7a..98e05382fd3c 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -248,7 +248,7 @@ static int rds_cong_monitor(struct rds_sock *rs, char __user *optval,
248} 248}
249 249
250static int rds_setsockopt(struct socket *sock, int level, int optname, 250static int rds_setsockopt(struct socket *sock, int level, int optname,
251 char __user *optval, int optlen) 251 char __user *optval, unsigned int optlen)
252{ 252{
253 struct rds_sock *rs = rds_sk_to_rs(sock->sk); 253 struct rds_sock *rs = rds_sk_to_rs(sock->sk);
254 int ret; 254 int ret;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index dbeaf2983822..ba2efb960c60 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -27,6 +27,7 @@
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/mutex.h> 28#include <linux/mutex.h>
29#include <linux/rfkill.h> 29#include <linux/rfkill.h>
30#include <linux/sched.h>
30#include <linux/spinlock.h> 31#include <linux/spinlock.h>
31#include <linux/miscdevice.h> 32#include <linux/miscdevice.h>
32#include <linux/wait.h> 33#include <linux/wait.h>
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 1e166c9685aa..502cce76621d 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -370,7 +370,7 @@ void rose_destroy_socket(struct sock *sk)
370 */ 370 */
371 371
372static int rose_setsockopt(struct socket *sock, int level, int optname, 372static int rose_setsockopt(struct socket *sock, int level, int optname,
373 char __user *optval, int optlen) 373 char __user *optval, unsigned int optlen)
374{ 374{
375 struct sock *sk = sock->sk; 375 struct sock *sk = sock->sk;
376 struct rose_sock *rose = rose_sk(sk); 376 struct rose_sock *rose = rose_sk(sk);
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index bfe493ebf27c..a86afceaa94f 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -507,7 +507,7 @@ out:
507 * set RxRPC socket options 507 * set RxRPC socket options
508 */ 508 */
509static int rxrpc_setsockopt(struct socket *sock, int level, int optname, 509static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
510 char __user *optval, int optlen) 510 char __user *optval, unsigned int optlen)
511{ 511{
512 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 512 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
513 unsigned min_sec_level; 513 unsigned min_sec_level;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 89af37a6c871..c8d05758661d 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2027,7 +2027,8 @@ out:
2027 * instead a error will be indicated to the user. 2027 * instead a error will be indicated to the user.
2028 */ 2028 */
2029static int sctp_setsockopt_disable_fragments(struct sock *sk, 2029static int sctp_setsockopt_disable_fragments(struct sock *sk,
2030 char __user *optval, int optlen) 2030 char __user *optval,
2031 unsigned int optlen)
2031{ 2032{
2032 int val; 2033 int val;
2033 2034
@@ -2043,7 +2044,7 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk,
2043} 2044}
2044 2045
2045static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2046static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2046 int optlen) 2047 unsigned int optlen)
2047{ 2048{
2048 if (optlen > sizeof(struct sctp_event_subscribe)) 2049 if (optlen > sizeof(struct sctp_event_subscribe))
2049 return -EINVAL; 2050 return -EINVAL;
@@ -2064,7 +2065,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2064 * association is closed. 2065 * association is closed.
2065 */ 2066 */
2066static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, 2067static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2067 int optlen) 2068 unsigned int optlen)
2068{ 2069{
2069 struct sctp_sock *sp = sctp_sk(sk); 2070 struct sctp_sock *sp = sctp_sk(sk);
2070 2071
@@ -2318,7 +2319,8 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2318} 2319}
2319 2320
2320static int sctp_setsockopt_peer_addr_params(struct sock *sk, 2321static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2321 char __user *optval, int optlen) 2322 char __user *optval,
2323 unsigned int optlen)
2322{ 2324{
2323 struct sctp_paddrparams params; 2325 struct sctp_paddrparams params;
2324 struct sctp_transport *trans = NULL; 2326 struct sctp_transport *trans = NULL;
@@ -2430,7 +2432,7 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2430 */ 2432 */
2431 2433
2432static int sctp_setsockopt_delayed_ack(struct sock *sk, 2434static int sctp_setsockopt_delayed_ack(struct sock *sk,
2433 char __user *optval, int optlen) 2435 char __user *optval, unsigned int optlen)
2434{ 2436{
2435 struct sctp_sack_info params; 2437 struct sctp_sack_info params;
2436 struct sctp_transport *trans = NULL; 2438 struct sctp_transport *trans = NULL;
@@ -2546,7 +2548,7 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
2546 * by the change). With TCP-style sockets, this option is inherited by 2548 * by the change). With TCP-style sockets, this option is inherited by
2547 * sockets derived from a listener socket. 2549 * sockets derived from a listener socket.
2548 */ 2550 */
2549static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, int optlen) 2551static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen)
2550{ 2552{
2551 struct sctp_initmsg sinit; 2553 struct sctp_initmsg sinit;
2552 struct sctp_sock *sp = sctp_sk(sk); 2554 struct sctp_sock *sp = sctp_sk(sk);
@@ -2583,7 +2585,8 @@ static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, int opt
2583 * to this call if the caller is using the UDP model. 2585 * to this call if the caller is using the UDP model.
2584 */ 2586 */
2585static int sctp_setsockopt_default_send_param(struct sock *sk, 2587static int sctp_setsockopt_default_send_param(struct sock *sk,
2586 char __user *optval, int optlen) 2588 char __user *optval,
2589 unsigned int optlen)
2587{ 2590{
2588 struct sctp_sndrcvinfo info; 2591 struct sctp_sndrcvinfo info;
2589 struct sctp_association *asoc; 2592 struct sctp_association *asoc;
@@ -2622,7 +2625,7 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
2622 * association peer's addresses. 2625 * association peer's addresses.
2623 */ 2626 */
2624static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, 2627static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
2625 int optlen) 2628 unsigned int optlen)
2626{ 2629{
2627 struct sctp_prim prim; 2630 struct sctp_prim prim;
2628 struct sctp_transport *trans; 2631 struct sctp_transport *trans;
@@ -2651,7 +2654,7 @@ static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
2651 * integer boolean flag. 2654 * integer boolean flag.
2652 */ 2655 */
2653static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, 2656static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
2654 int optlen) 2657 unsigned int optlen)
2655{ 2658{
2656 int val; 2659 int val;
2657 2660
@@ -2676,7 +2679,8 @@ static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
2676 * be changed. 2679 * be changed.
2677 * 2680 *
2678 */ 2681 */
2679static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, int optlen) { 2682static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
2683{
2680 struct sctp_rtoinfo rtoinfo; 2684 struct sctp_rtoinfo rtoinfo;
2681 struct sctp_association *asoc; 2685 struct sctp_association *asoc;
2682 2686
@@ -2728,7 +2732,7 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, int opt
2728 * See [SCTP] for more information. 2732 * See [SCTP] for more information.
2729 * 2733 *
2730 */ 2734 */
2731static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, int optlen) 2735static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen)
2732{ 2736{
2733 2737
2734 struct sctp_assocparams assocparams; 2738 struct sctp_assocparams assocparams;
@@ -2800,7 +2804,7 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, int o
2800 * addresses and a user will receive both PF_INET6 and PF_INET type 2804 * addresses and a user will receive both PF_INET6 and PF_INET type
2801 * addresses on the socket. 2805 * addresses on the socket.
2802 */ 2806 */
2803static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, int optlen) 2807static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen)
2804{ 2808{
2805 int val; 2809 int val;
2806 struct sctp_sock *sp = sctp_sk(sk); 2810 struct sctp_sock *sp = sctp_sk(sk);
@@ -2844,7 +2848,7 @@ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, int op
2844 * changed (effecting future associations only). 2848 * changed (effecting future associations only).
2845 * assoc_value: This parameter specifies the maximum size in bytes. 2849 * assoc_value: This parameter specifies the maximum size in bytes.
2846 */ 2850 */
2847static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optlen) 2851static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
2848{ 2852{
2849 struct sctp_assoc_value params; 2853 struct sctp_assoc_value params;
2850 struct sctp_association *asoc; 2854 struct sctp_association *asoc;
@@ -2899,7 +2903,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optl
2899 * set primary request: 2903 * set primary request:
2900 */ 2904 */
2901static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, 2905static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
2902 int optlen) 2906 unsigned int optlen)
2903{ 2907{
2904 struct sctp_sock *sp; 2908 struct sctp_sock *sp;
2905 struct sctp_endpoint *ep; 2909 struct sctp_endpoint *ep;
@@ -2950,7 +2954,7 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
2950} 2954}
2951 2955
2952static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, 2956static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
2953 int optlen) 2957 unsigned int optlen)
2954{ 2958{
2955 struct sctp_setadaptation adaptation; 2959 struct sctp_setadaptation adaptation;
2956 2960
@@ -2979,7 +2983,7 @@ static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval
2979 * saved with outbound messages. 2983 * saved with outbound messages.
2980 */ 2984 */
2981static int sctp_setsockopt_context(struct sock *sk, char __user *optval, 2985static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
2982 int optlen) 2986 unsigned int optlen)
2983{ 2987{
2984 struct sctp_assoc_value params; 2988 struct sctp_assoc_value params;
2985 struct sctp_sock *sp; 2989 struct sctp_sock *sp;
@@ -3030,7 +3034,7 @@ static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
3030 */ 3034 */
3031static int sctp_setsockopt_fragment_interleave(struct sock *sk, 3035static int sctp_setsockopt_fragment_interleave(struct sock *sk,
3032 char __user *optval, 3036 char __user *optval,
3033 int optlen) 3037 unsigned int optlen)
3034{ 3038{
3035 int val; 3039 int val;
3036 3040
@@ -3063,7 +3067,7 @@ static int sctp_setsockopt_fragment_interleave(struct sock *sk,
3063 */ 3067 */
3064static int sctp_setsockopt_partial_delivery_point(struct sock *sk, 3068static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
3065 char __user *optval, 3069 char __user *optval,
3066 int optlen) 3070 unsigned int optlen)
3067{ 3071{
3068 u32 val; 3072 u32 val;
3069 3073
@@ -3096,7 +3100,7 @@ static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
3096 */ 3100 */
3097static int sctp_setsockopt_maxburst(struct sock *sk, 3101static int sctp_setsockopt_maxburst(struct sock *sk,
3098 char __user *optval, 3102 char __user *optval,
3099 int optlen) 3103 unsigned int optlen)
3100{ 3104{
3101 struct sctp_assoc_value params; 3105 struct sctp_assoc_value params;
3102 struct sctp_sock *sp; 3106 struct sctp_sock *sp;
@@ -3140,8 +3144,8 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
3140 * will only effect future associations on the socket. 3144 * will only effect future associations on the socket.
3141 */ 3145 */
3142static int sctp_setsockopt_auth_chunk(struct sock *sk, 3146static int sctp_setsockopt_auth_chunk(struct sock *sk,
3143 char __user *optval, 3147 char __user *optval,
3144 int optlen) 3148 unsigned int optlen)
3145{ 3149{
3146 struct sctp_authchunk val; 3150 struct sctp_authchunk val;
3147 3151
@@ -3172,8 +3176,8 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
3172 * endpoint requires the peer to use. 3176 * endpoint requires the peer to use.
3173 */ 3177 */
3174static int sctp_setsockopt_hmac_ident(struct sock *sk, 3178static int sctp_setsockopt_hmac_ident(struct sock *sk,
3175 char __user *optval, 3179 char __user *optval,
3176 int optlen) 3180 unsigned int optlen)
3177{ 3181{
3178 struct sctp_hmacalgo *hmacs; 3182 struct sctp_hmacalgo *hmacs;
3179 u32 idents; 3183 u32 idents;
@@ -3215,7 +3219,7 @@ out:
3215 */ 3219 */
3216static int sctp_setsockopt_auth_key(struct sock *sk, 3220static int sctp_setsockopt_auth_key(struct sock *sk,
3217 char __user *optval, 3221 char __user *optval,
3218 int optlen) 3222 unsigned int optlen)
3219{ 3223{
3220 struct sctp_authkey *authkey; 3224 struct sctp_authkey *authkey;
3221 struct sctp_association *asoc; 3225 struct sctp_association *asoc;
@@ -3260,8 +3264,8 @@ out:
3260 * the association shared key. 3264 * the association shared key.
3261 */ 3265 */
3262static int sctp_setsockopt_active_key(struct sock *sk, 3266static int sctp_setsockopt_active_key(struct sock *sk,
3263 char __user *optval, 3267 char __user *optval,
3264 int optlen) 3268 unsigned int optlen)
3265{ 3269{
3266 struct sctp_authkeyid val; 3270 struct sctp_authkeyid val;
3267 struct sctp_association *asoc; 3271 struct sctp_association *asoc;
@@ -3288,8 +3292,8 @@ static int sctp_setsockopt_active_key(struct sock *sk,
3288 * This set option will delete a shared secret key from use. 3292 * This set option will delete a shared secret key from use.
3289 */ 3293 */
3290static int sctp_setsockopt_del_key(struct sock *sk, 3294static int sctp_setsockopt_del_key(struct sock *sk,
3291 char __user *optval, 3295 char __user *optval,
3292 int optlen) 3296 unsigned int optlen)
3293{ 3297{
3294 struct sctp_authkeyid val; 3298 struct sctp_authkeyid val;
3295 struct sctp_association *asoc; 3299 struct sctp_association *asoc;
@@ -3332,7 +3336,7 @@ static int sctp_setsockopt_del_key(struct sock *sk,
3332 * optlen - the size of the buffer. 3336 * optlen - the size of the buffer.
3333 */ 3337 */
3334SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, 3338SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
3335 char __user *optval, int optlen) 3339 char __user *optval, unsigned int optlen)
3336{ 3340{
3337 int retval = 0; 3341 int retval = 0;
3338 3342
diff --git a/net/socket.c b/net/socket.c
index 41e8847508aa..75655365b5fd 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2391,7 +2391,7 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
2391} 2391}
2392 2392
2393int kernel_setsockopt(struct socket *sock, int level, int optname, 2393int kernel_setsockopt(struct socket *sock, int level, int optname,
2394 char *optval, int optlen) 2394 char *optval, unsigned int optlen)
2395{ 2395{
2396 mm_segment_t oldfs = get_fs(); 2396 mm_segment_t oldfs = get_fs();
2397 int err; 2397 int err;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e8254e809b79..e6d9abf7440e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1658,7 +1658,7 @@ restart:
1658 */ 1658 */
1659 1659
1660static int setsockopt(struct socket *sock, 1660static int setsockopt(struct socket *sock,
1661 int lvl, int opt, char __user *ov, int ol) 1661 int lvl, int opt, char __user *ov, unsigned int ol)
1662{ 1662{
1663 struct sock *sk = sock->sk; 1663 struct sock *sk = sock->sk;
1664 struct tipc_port *tport = tipc_sk_port(sk); 1664 struct tipc_port *tport = tipc_sk_port(sk);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 5e6c072c64d3..7fa9c7ad3d3b 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -409,7 +409,7 @@ static void x25_destroy_socket(struct sock *sk)
409 */ 409 */
410 410
411static int x25_setsockopt(struct socket *sock, int level, int optname, 411static int x25_setsockopt(struct socket *sock, int level, int optname,
412 char __user *optval, int optlen) 412 char __user *optval, unsigned int optlen)
413{ 413{
414 int opt; 414 int opt;
415 struct sock *sk = sock->sk; 415 struct sock *sk = sock->sk;
diff --git a/samples/tracepoints/tracepoint-sample.c b/samples/tracepoints/tracepoint-sample.c
index 9cf80a11e8b6..26fab33ffa8c 100644
--- a/samples/tracepoints/tracepoint-sample.c
+++ b/samples/tracepoints/tracepoint-sample.c
@@ -28,7 +28,7 @@ static int my_open(struct inode *inode, struct file *file)
28 return -EPERM; 28 return -EPERM;
29} 29}
30 30
31static struct file_operations mark_ops = { 31static const struct file_operations mark_ops = {
32 .open = my_open, 32 .open = my_open,
33}; 33};
34 34
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 8e9777b76405..0c72c9c38956 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -43,7 +43,7 @@ static ssize_t ima_show_htable_violations(struct file *filp,
43 return ima_show_htable_value(buf, count, ppos, &ima_htable.violations); 43 return ima_show_htable_value(buf, count, ppos, &ima_htable.violations);
44} 44}
45 45
46static struct file_operations ima_htable_violations_ops = { 46static const struct file_operations ima_htable_violations_ops = {
47 .read = ima_show_htable_violations 47 .read = ima_show_htable_violations
48}; 48};
49 49
@@ -55,7 +55,7 @@ static ssize_t ima_show_measurements_count(struct file *filp,
55 55
56} 56}
57 57
58static struct file_operations ima_measurements_count_ops = { 58static const struct file_operations ima_measurements_count_ops = {
59 .read = ima_show_measurements_count 59 .read = ima_show_measurements_count
60}; 60};
61 61
@@ -158,7 +158,7 @@ static int ima_measurements_open(struct inode *inode, struct file *file)
158 return seq_open(file, &ima_measurments_seqops); 158 return seq_open(file, &ima_measurments_seqops);
159} 159}
160 160
161static struct file_operations ima_measurements_ops = { 161static const struct file_operations ima_measurements_ops = {
162 .open = ima_measurements_open, 162 .open = ima_measurements_open,
163 .read = seq_read, 163 .read = seq_read,
164 .llseek = seq_lseek, 164 .llseek = seq_lseek,
@@ -233,7 +233,7 @@ static int ima_ascii_measurements_open(struct inode *inode, struct file *file)
233 return seq_open(file, &ima_ascii_measurements_seqops); 233 return seq_open(file, &ima_ascii_measurements_seqops);
234} 234}
235 235
236static struct file_operations ima_ascii_measurements_ops = { 236static const struct file_operations ima_ascii_measurements_ops = {
237 .open = ima_ascii_measurements_open, 237 .open = ima_ascii_measurements_open,
238 .read = seq_read, 238 .read = seq_read,
239 .llseek = seq_lseek, 239 .llseek = seq_lseek,
@@ -313,7 +313,7 @@ static int ima_release_policy(struct inode *inode, struct file *file)
313 return 0; 313 return 0;
314} 314}
315 315
316static struct file_operations ima_measure_policy_ops = { 316static const struct file_operations ima_measure_policy_ops = {
317 .open = ima_open_policy, 317 .open = ima_open_policy,
318 .write = ima_write_policy, 318 .write = ima_write_policy,
319 .release = ima_release_policy 319 .release = ima_release_policy
diff --git a/sound/aoa/codecs/tas.c b/sound/aoa/codecs/tas.c
index f0ebc971c686..1dd66ddffcaf 100644
--- a/sound/aoa/codecs/tas.c
+++ b/sound/aoa/codecs/tas.c
@@ -897,6 +897,15 @@ static int tas_create(struct i2c_adapter *adapter,
897 client = i2c_new_device(adapter, &info); 897 client = i2c_new_device(adapter, &info);
898 if (!client) 898 if (!client)
899 return -ENODEV; 899 return -ENODEV;
900 /*
901 * We know the driver is already loaded, so the device should be
902 * already bound. If not it means binding failed, and then there
903 * is no point in keeping the device instantiated.
904 */
905 if (!client->driver) {
906 i2c_unregister_device(client);
907 return -ENODEV;
908 }
900 909
901 /* 910 /*
902 * Let i2c-core delete that device on driver removal. 911 * Let i2c-core delete that device on driver removal.
diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c
index 6e7d09ae0e82..7d722a025d0d 100644
--- a/sound/drivers/opl3/opl3_midi.c
+++ b/sound/drivers/opl3/opl3_midi.c
@@ -29,6 +29,8 @@ extern char snd_opl3_regmap[MAX_OPL2_VOICES][4];
29 29
30extern int use_internal_drums; 30extern int use_internal_drums;
31 31
32static void snd_opl3_note_off_unsafe(void *p, int note, int vel,
33 struct snd_midi_channel *chan);
32/* 34/*
33 * The next table looks magical, but it certainly is not. Its values have 35 * The next table looks magical, but it certainly is not. Its values have
34 * been calculated as table[i]=8*log(i/64)/log(2) with an obvious exception 36 * been calculated as table[i]=8*log(i/64)/log(2) with an obvious exception
@@ -242,16 +244,20 @@ void snd_opl3_timer_func(unsigned long data)
242 int again = 0; 244 int again = 0;
243 int i; 245 int i;
244 246
245 spin_lock_irqsave(&opl3->sys_timer_lock, flags); 247 spin_lock_irqsave(&opl3->voice_lock, flags);
246 for (i = 0; i < opl3->max_voices; i++) { 248 for (i = 0; i < opl3->max_voices; i++) {
247 struct snd_opl3_voice *vp = &opl3->voices[i]; 249 struct snd_opl3_voice *vp = &opl3->voices[i];
248 if (vp->state > 0 && vp->note_off_check) { 250 if (vp->state > 0 && vp->note_off_check) {
249 if (vp->note_off == jiffies) 251 if (vp->note_off == jiffies)
250 snd_opl3_note_off(opl3, vp->note, 0, vp->chan); 252 snd_opl3_note_off_unsafe(opl3, vp->note, 0,
253 vp->chan);
251 else 254 else
252 again++; 255 again++;
253 } 256 }
254 } 257 }
258 spin_unlock_irqrestore(&opl3->voice_lock, flags);
259
260 spin_lock_irqsave(&opl3->sys_timer_lock, flags);
255 if (again) { 261 if (again) {
256 opl3->tlist.expires = jiffies + 1; /* invoke again */ 262 opl3->tlist.expires = jiffies + 1; /* invoke again */
257 add_timer(&opl3->tlist); 263 add_timer(&opl3->tlist);
@@ -658,15 +664,14 @@ static void snd_opl3_kill_voice(struct snd_opl3 *opl3, int voice)
658/* 664/*
659 * Release a note in response to a midi note off. 665 * Release a note in response to a midi note off.
660 */ 666 */
661void snd_opl3_note_off(void *p, int note, int vel, struct snd_midi_channel *chan) 667static void snd_opl3_note_off_unsafe(void *p, int note, int vel,
668 struct snd_midi_channel *chan)
662{ 669{
663 struct snd_opl3 *opl3; 670 struct snd_opl3 *opl3;
664 671
665 int voice; 672 int voice;
666 struct snd_opl3_voice *vp; 673 struct snd_opl3_voice *vp;
667 674
668 unsigned long flags;
669
670 opl3 = p; 675 opl3 = p;
671 676
672#ifdef DEBUG_MIDI 677#ifdef DEBUG_MIDI
@@ -674,12 +679,9 @@ void snd_opl3_note_off(void *p, int note, int vel, struct snd_midi_channel *chan
674 chan->number, chan->midi_program, note); 679 chan->number, chan->midi_program, note);
675#endif 680#endif
676 681
677 spin_lock_irqsave(&opl3->voice_lock, flags);
678
679 if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) { 682 if (opl3->synth_mode == SNDRV_OPL3_MODE_SEQ) {
680 if (chan->drum_channel && use_internal_drums) { 683 if (chan->drum_channel && use_internal_drums) {
681 snd_opl3_drum_switch(opl3, note, vel, 0, chan); 684 snd_opl3_drum_switch(opl3, note, vel, 0, chan);
682 spin_unlock_irqrestore(&opl3->voice_lock, flags);
683 return; 685 return;
684 } 686 }
685 /* this loop will hopefully kill all extra voices, because 687 /* this loop will hopefully kill all extra voices, because
@@ -697,6 +699,16 @@ void snd_opl3_note_off(void *p, int note, int vel, struct snd_midi_channel *chan
697 snd_opl3_kill_voice(opl3, voice); 699 snd_opl3_kill_voice(opl3, voice);
698 } 700 }
699 } 701 }
702}
703
704void snd_opl3_note_off(void *p, int note, int vel,
705 struct snd_midi_channel *chan)
706{
707 struct snd_opl3 *opl3 = p;
708 unsigned long flags;
709
710 spin_lock_irqsave(&opl3->voice_lock, flags);
711 snd_opl3_note_off_unsafe(p, note, vel, chan);
700 spin_unlock_irqrestore(&opl3->voice_lock, flags); 712 spin_unlock_irqrestore(&opl3->voice_lock, flags);
701} 713}
702 714
diff --git a/sound/mips/hal2.c b/sound/mips/hal2.c
index c52691c2fc46..9a88cdfd952a 100644
--- a/sound/mips/hal2.c
+++ b/sound/mips/hal2.c
@@ -915,7 +915,7 @@ static int __devinit hal2_probe(struct platform_device *pdev)
915 return 0; 915 return 0;
916} 916}
917 917
918static int __exit hal2_remove(struct platform_device *pdev) 918static int __devexit hal2_remove(struct platform_device *pdev)
919{ 919{
920 struct snd_card *card = platform_get_drvdata(pdev); 920 struct snd_card *card = platform_get_drvdata(pdev);
921 921
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index e497525bc11b..8691f4cf6191 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -973,7 +973,7 @@ static int __devinit snd_sgio2audio_probe(struct platform_device *pdev)
973 return 0; 973 return 0;
974} 974}
975 975
976static int __exit snd_sgio2audio_remove(struct platform_device *pdev) 976static int __devexit snd_sgio2audio_remove(struct platform_device *pdev)
977{ 977{
978 struct snd_card *card = platform_get_drvdata(pdev); 978 struct snd_card *card = platform_get_drvdata(pdev);
979 979
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index b1b3a644f738..75454648d50c 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -1037,7 +1037,7 @@ static int atc_line_front_unmute(struct ct_atc *atc, unsigned char state)
1037 1037
1038static int atc_line_surround_unmute(struct ct_atc *atc, unsigned char state) 1038static int atc_line_surround_unmute(struct ct_atc *atc, unsigned char state)
1039{ 1039{
1040 return atc_daio_unmute(atc, state, LINEO4); 1040 return atc_daio_unmute(atc, state, LINEO2);
1041} 1041}
1042 1042
1043static int atc_line_clfe_unmute(struct ct_atc *atc, unsigned char state) 1043static int atc_line_clfe_unmute(struct ct_atc *atc, unsigned char state)
@@ -1047,7 +1047,7 @@ static int atc_line_clfe_unmute(struct ct_atc *atc, unsigned char state)
1047 1047
1048static int atc_line_rear_unmute(struct ct_atc *atc, unsigned char state) 1048static int atc_line_rear_unmute(struct ct_atc *atc, unsigned char state)
1049{ 1049{
1050 return atc_daio_unmute(atc, state, LINEO2); 1050 return atc_daio_unmute(atc, state, LINEO4);
1051} 1051}
1052 1052
1053static int atc_line_in_unmute(struct ct_atc *atc, unsigned char state) 1053static int atc_line_in_unmute(struct ct_atc *atc, unsigned char state)
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index da2065cd2c0d..1305f7ca02c3 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -950,7 +950,7 @@ static int __devinit snd_echo_new_pcm(struct echoaudio *chip)
950 Control interface 950 Control interface
951******************************************************************************/ 951******************************************************************************/
952 952
953#ifndef ECHOCARD_HAS_VMIXER 953#if !defined(ECHOCARD_HAS_VMIXER) || defined(ECHOCARD_HAS_LINE_OUT_GAIN)
954 954
955/******************* PCM output volume *******************/ 955/******************* PCM output volume *******************/
956static int snd_echo_output_gain_info(struct snd_kcontrol *kcontrol, 956static int snd_echo_output_gain_info(struct snd_kcontrol *kcontrol,
@@ -1003,6 +1003,19 @@ static int snd_echo_output_gain_put(struct snd_kcontrol *kcontrol,
1003 return changed; 1003 return changed;
1004} 1004}
1005 1005
1006#ifdef ECHOCARD_HAS_LINE_OUT_GAIN
1007/* On the Mia this one controls the line-out volume */
1008static struct snd_kcontrol_new snd_echo_line_output_gain __devinitdata = {
1009 .name = "Line Playback Volume",
1010 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1011 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
1012 SNDRV_CTL_ELEM_ACCESS_TLV_READ,
1013 .info = snd_echo_output_gain_info,
1014 .get = snd_echo_output_gain_get,
1015 .put = snd_echo_output_gain_put,
1016 .tlv = {.p = db_scale_output_gain},
1017};
1018#else
1006static struct snd_kcontrol_new snd_echo_pcm_output_gain __devinitdata = { 1019static struct snd_kcontrol_new snd_echo_pcm_output_gain __devinitdata = {
1007 .name = "PCM Playback Volume", 1020 .name = "PCM Playback Volume",
1008 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1021 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -1012,9 +1025,10 @@ static struct snd_kcontrol_new snd_echo_pcm_output_gain __devinitdata = {
1012 .put = snd_echo_output_gain_put, 1025 .put = snd_echo_output_gain_put,
1013 .tlv = {.p = db_scale_output_gain}, 1026 .tlv = {.p = db_scale_output_gain},
1014}; 1027};
1015
1016#endif 1028#endif
1017 1029
1030#endif /* !ECHOCARD_HAS_VMIXER || ECHOCARD_HAS_LINE_OUT_GAIN */
1031
1018 1032
1019 1033
1020#ifdef ECHOCARD_HAS_INPUT_GAIN 1034#ifdef ECHOCARD_HAS_INPUT_GAIN
@@ -2030,10 +2044,18 @@ static int __devinit snd_echo_probe(struct pci_dev *pci,
2030 snd_echo_vmixer.count = num_pipes_out(chip) * num_busses_out(chip); 2044 snd_echo_vmixer.count = num_pipes_out(chip) * num_busses_out(chip);
2031 if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_vmixer, chip))) < 0) 2045 if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_vmixer, chip))) < 0)
2032 goto ctl_error; 2046 goto ctl_error;
2033#else 2047#ifdef ECHOCARD_HAS_LINE_OUT_GAIN
2034 if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_pcm_output_gain, chip))) < 0) 2048 err = snd_ctl_add(chip->card,
2049 snd_ctl_new1(&snd_echo_line_output_gain, chip));
2050 if (err < 0)
2035 goto ctl_error; 2051 goto ctl_error;
2036#endif 2052#endif
2053#else /* ECHOCARD_HAS_VMIXER */
2054 err = snd_ctl_add(chip->card,
2055 snd_ctl_new1(&snd_echo_pcm_output_gain, chip));
2056 if (err < 0)
2057 goto ctl_error;
2058#endif /* ECHOCARD_HAS_VMIXER */
2037 2059
2038#ifdef ECHOCARD_HAS_INPUT_GAIN 2060#ifdef ECHOCARD_HAS_INPUT_GAIN
2039 if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_line_input_gain, chip))) < 0) 2061 if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_echo_line_input_gain, chip))) < 0)
diff --git a/sound/pci/echoaudio/mia.c b/sound/pci/echoaudio/mia.c
index f3b9b45c9c1b..f05c8c097aa8 100644
--- a/sound/pci/echoaudio/mia.c
+++ b/sound/pci/echoaudio/mia.c
@@ -29,6 +29,7 @@
29#define ECHOCARD_HAS_ADAT FALSE 29#define ECHOCARD_HAS_ADAT FALSE
30#define ECHOCARD_HAS_STEREO_BIG_ENDIAN32 30#define ECHOCARD_HAS_STEREO_BIG_ENDIAN32
31#define ECHOCARD_HAS_MIDI 31#define ECHOCARD_HAS_MIDI
32#define ECHOCARD_HAS_LINE_OUT_GAIN
32 33
33/* Pipe indexes */ 34/* Pipe indexes */
34#define PX_ANALOG_OUT 0 /* 8 */ 35#define PX_ANALOG_OUT 0 /* 8 */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 20a66f85f0a4..c9ad182e1b4b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2303,6 +2303,7 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
2303 * white-list for enable_msi 2303 * white-list for enable_msi
2304 */ 2304 */
2305static struct snd_pci_quirk msi_white_list[] __devinitdata = { 2305static struct snd_pci_quirk msi_white_list[] __devinitdata = {
2306 SND_PCI_QUIRK(0x103c, 0x30f7, "HP Pavilion dv4t-1300", 1),
2306 SND_PCI_QUIRK(0x103c, 0x3607, "HP Compa CQ40", 1), 2307 SND_PCI_QUIRK(0x103c, 0x3607, "HP Compa CQ40", 1),
2307 {} 2308 {}
2308}; 2309};
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 215e72a87113..2d603f6aba63 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -4032,6 +4032,127 @@ static int ad1984a_thinkpad_init(struct hda_codec *codec)
4032} 4032}
4033 4033
4034/* 4034/*
4035 * HP Touchsmart
4036 * port-A (0x11) - front hp-out
4037 * port-B (0x14) - unused
4038 * port-C (0x15) - unused
4039 * port-D (0x12) - rear line out
4040 * port-E (0x1c) - front mic-in
4041 * port-F (0x16) - Internal speakers
4042 * digital-mic (0x17) - Internal mic
4043 */
4044
4045static struct hda_verb ad1984a_touchsmart_verbs[] = {
4046 /* DACs; unmute as default */
4047 {0x03, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
4048 {0x04, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
4049 /* Port-A (HP) mixer - route only from analog mixer */
4050 {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
4051 {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
4052 /* Port-A pin */
4053 {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
4054 /* Port-A (HP) pin - always unmuted */
4055 {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
4056 /* Port-E (int speaker) mixer - route only from analog mixer */
4057 {0x25, AC_VERB_SET_AMP_GAIN_MUTE, 0x03},
4058 /* Port-E pin */
4059 {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
4060 {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
4061 {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
4062 /* Port-F (int speaker) mixer - route only from analog mixer */
4063 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
4064 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
4065 /* Port-F pin */
4066 {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
4067 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
4068 /* Analog mixer; mute as default */
4069 {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
4070 {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
4071 {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
4072 {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
4073 {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
4074 {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
4075 /* Analog Mix output amp */
4076 {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
4077 /* capture sources */
4078 /* {0x0c, AC_VERB_SET_CONNECT_SEL, 0x0}, */ /* set via unsol */
4079 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
4080 {0x0d, AC_VERB_SET_CONNECT_SEL, 0x0},
4081 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
4082 /* unsolicited event for pin-sense */
4083 {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
4084 {0x1c, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT},
4085 /* allow to touch GPIO1 (for mute control) */
4086 {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
4087 {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
4088 {0x01, AC_VERB_SET_GPIO_DATA, 0x02}, /* first muted */
4089 /* internal mic - dmic */
4090 {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
4091 /* set magic COEFs for dmic */
4092 {0x01, AC_VERB_SET_COEF_INDEX, 0x13f7},
4093 {0x01, AC_VERB_SET_PROC_COEF, 0x08},
4094 { } /* end */
4095};
4096
4097static struct snd_kcontrol_new ad1984a_touchsmart_mixers[] = {
4098 HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
4099/* HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),*/
4100 {
4101 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
4102 .name = "Master Playback Switch",
4103 .info = snd_hda_mixer_amp_switch_info,
4104 .get = snd_hda_mixer_amp_switch_get,
4105 .put = ad1884a_mobile_master_sw_put,
4106 .private_value = HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT),
4107 },
4108 HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
4109 HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
4110 HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
4111 HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
4112 HDA_CODEC_VOLUME("Mic Boost", 0x25, 0x0, HDA_OUTPUT),
4113 HDA_CODEC_VOLUME("Internal Mic Boost", 0x17, 0x0, HDA_INPUT),
4114 { } /* end */
4115};
4116
4117/* switch to external mic if plugged */
4118static void ad1984a_touchsmart_automic(struct hda_codec *codec)
4119{
4120 if (snd_hda_codec_read(codec, 0x1c, 0,
4121 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000) {
4122 snd_hda_codec_write(codec, 0x0c, 0,
4123 AC_VERB_SET_CONNECT_SEL, 0x4);
4124 } else {
4125 snd_hda_codec_write(codec, 0x0c, 0,
4126 AC_VERB_SET_CONNECT_SEL, 0x5);
4127 }
4128}
4129
4130
4131/* unsolicited event for HP jack sensing */
4132static void ad1984a_touchsmart_unsol_event(struct hda_codec *codec,
4133 unsigned int res)
4134{
4135 switch (res >> 26) {
4136 case AD1884A_HP_EVENT:
4137 ad1884a_hp_automute(codec);
4138 break;
4139 case AD1884A_MIC_EVENT:
4140 ad1984a_touchsmart_automic(codec);
4141 break;
4142 }
4143}
4144
4145/* initialize jack-sensing, too */
4146static int ad1984a_touchsmart_init(struct hda_codec *codec)
4147{
4148 ad198x_init(codec);
4149 ad1884a_hp_automute(codec);
4150 ad1984a_touchsmart_automic(codec);
4151 return 0;
4152}
4153
4154
4155/*
4035 */ 4156 */
4036 4157
4037enum { 4158enum {
@@ -4039,6 +4160,7 @@ enum {
4039 AD1884A_LAPTOP, 4160 AD1884A_LAPTOP,
4040 AD1884A_MOBILE, 4161 AD1884A_MOBILE,
4041 AD1884A_THINKPAD, 4162 AD1884A_THINKPAD,
4163 AD1984A_TOUCHSMART,
4042 AD1884A_MODELS 4164 AD1884A_MODELS
4043}; 4165};
4044 4166
@@ -4047,6 +4169,7 @@ static const char *ad1884a_models[AD1884A_MODELS] = {
4047 [AD1884A_LAPTOP] = "laptop", 4169 [AD1884A_LAPTOP] = "laptop",
4048 [AD1884A_MOBILE] = "mobile", 4170 [AD1884A_MOBILE] = "mobile",
4049 [AD1884A_THINKPAD] = "thinkpad", 4171 [AD1884A_THINKPAD] = "thinkpad",
4172 [AD1984A_TOUCHSMART] = "touchsmart",
4050}; 4173};
4051 4174
4052static struct snd_pci_quirk ad1884a_cfg_tbl[] = { 4175static struct snd_pci_quirk ad1884a_cfg_tbl[] = {
@@ -4059,6 +4182,7 @@ static struct snd_pci_quirk ad1884a_cfg_tbl[] = {
4059 SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3600, "HP laptop", AD1884A_LAPTOP), 4182 SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3600, "HP laptop", AD1884A_LAPTOP),
4060 SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x7010, "HP laptop", AD1884A_MOBILE), 4183 SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x7010, "HP laptop", AD1884A_MOBILE),
4061 SND_PCI_QUIRK(0x17aa, 0x20ac, "Thinkpad X300", AD1884A_THINKPAD), 4184 SND_PCI_QUIRK(0x17aa, 0x20ac, "Thinkpad X300", AD1884A_THINKPAD),
4185 SND_PCI_QUIRK(0x103c, 0x2a82, "Touchsmart", AD1984A_TOUCHSMART),
4062 {} 4186 {}
4063}; 4187};
4064 4188
@@ -4142,6 +4266,21 @@ static int patch_ad1884a(struct hda_codec *codec)
4142 codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event; 4266 codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event;
4143 codec->patch_ops.init = ad1984a_thinkpad_init; 4267 codec->patch_ops.init = ad1984a_thinkpad_init;
4144 break; 4268 break;
4269 case AD1984A_TOUCHSMART:
4270 spec->mixers[0] = ad1984a_touchsmart_mixers;
4271 spec->init_verbs[0] = ad1984a_touchsmart_verbs;
4272 spec->multiout.dig_out_nid = 0;
4273 codec->patch_ops.unsol_event = ad1984a_touchsmart_unsol_event;
4274 codec->patch_ops.init = ad1984a_touchsmart_init;
4275 /* set the upper-limit for mixer amp to 0dB for avoiding the
4276 * possible damage by overloading
4277 */
4278 snd_hda_override_amp_caps(codec, 0x20, HDA_INPUT,
4279 (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
4280 (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
4281 (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
4282 (1 << AC_AMPCAP_MUTE_SHIFT));
4283 break;
4145 } 4284 }
4146 4285
4147 return 0; 4286 return 0;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 9d899eda44d7..3fbbc8c01e70 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -682,11 +682,13 @@ static struct hda_input_mux cxt5045_capture_source = {
682}; 682};
683 683
684static struct hda_input_mux cxt5045_capture_source_benq = { 684static struct hda_input_mux cxt5045_capture_source_benq = {
685 .num_items = 3, 685 .num_items = 5,
686 .items = { 686 .items = {
687 { "IntMic", 0x1 }, 687 { "IntMic", 0x1 },
688 { "ExtMic", 0x2 }, 688 { "ExtMic", 0x2 },
689 { "LineIn", 0x3 }, 689 { "LineIn", 0x3 },
690 { "CD", 0x4 },
691 { "Mixer", 0x0 },
690 } 692 }
691}; 693};
692 694
@@ -811,11 +813,19 @@ static struct snd_kcontrol_new cxt5045_mixers[] = {
811}; 813};
812 814
813static struct snd_kcontrol_new cxt5045_benq_mixers[] = { 815static struct snd_kcontrol_new cxt5045_benq_mixers[] = {
816 HDA_CODEC_VOLUME("CD Capture Volume", 0x1a, 0x04, HDA_INPUT),
817 HDA_CODEC_MUTE("CD Capture Switch", 0x1a, 0x04, HDA_INPUT),
818 HDA_CODEC_VOLUME("CD Playback Volume", 0x17, 0x4, HDA_INPUT),
819 HDA_CODEC_MUTE("CD Playback Switch", 0x17, 0x4, HDA_INPUT),
820
814 HDA_CODEC_VOLUME("Line In Capture Volume", 0x1a, 0x03, HDA_INPUT), 821 HDA_CODEC_VOLUME("Line In Capture Volume", 0x1a, 0x03, HDA_INPUT),
815 HDA_CODEC_MUTE("Line In Capture Switch", 0x1a, 0x03, HDA_INPUT), 822 HDA_CODEC_MUTE("Line In Capture Switch", 0x1a, 0x03, HDA_INPUT),
816 HDA_CODEC_VOLUME("Line In Playback Volume", 0x17, 0x3, HDA_INPUT), 823 HDA_CODEC_VOLUME("Line In Playback Volume", 0x17, 0x3, HDA_INPUT),
817 HDA_CODEC_MUTE("Line In Playback Switch", 0x17, 0x3, HDA_INPUT), 824 HDA_CODEC_MUTE("Line In Playback Switch", 0x17, 0x3, HDA_INPUT),
818 825
826 HDA_CODEC_VOLUME("Mixer Capture Volume", 0x1a, 0x0, HDA_INPUT),
827 HDA_CODEC_MUTE("Mixer Capture Switch", 0x1a, 0x0, HDA_INPUT),
828
819 {} 829 {}
820}; 830};
821 831
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 129605819560..470fd74a0a1a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1332,15 +1332,20 @@ do_sku:
1332 * when the external headphone out jack is plugged" 1332 * when the external headphone out jack is plugged"
1333 */ 1333 */
1334 if (!spec->autocfg.hp_pins[0]) { 1334 if (!spec->autocfg.hp_pins[0]) {
1335 hda_nid_t nid;
1335 tmp = (ass >> 11) & 0x3; /* HP to chassis */ 1336 tmp = (ass >> 11) & 0x3; /* HP to chassis */
1336 if (tmp == 0) 1337 if (tmp == 0)
1337 spec->autocfg.hp_pins[0] = porta; 1338 nid = porta;
1338 else if (tmp == 1) 1339 else if (tmp == 1)
1339 spec->autocfg.hp_pins[0] = porte; 1340 nid = porte;
1340 else if (tmp == 2) 1341 else if (tmp == 2)
1341 spec->autocfg.hp_pins[0] = portd; 1342 nid = portd;
1342 else 1343 else
1343 return 1; 1344 return 1;
1345 for (i = 0; i < spec->autocfg.line_outs; i++)
1346 if (spec->autocfg.line_out_pins[i] == nid)
1347 return 1;
1348 spec->autocfg.hp_pins[0] = nid;
1344 } 1349 }
1345 1350
1346 alc_init_auto_hp(codec); 1351 alc_init_auto_hp(codec);
@@ -1362,7 +1367,7 @@ static void alc_ssid_check(struct hda_codec *codec,
1362} 1367}
1363 1368
1364/* 1369/*
1365 * Fix-up pin default configurations 1370 * Fix-up pin default configurations and add default verbs
1366 */ 1371 */
1367 1372
1368struct alc_pincfg { 1373struct alc_pincfg {
@@ -1370,9 +1375,14 @@ struct alc_pincfg {
1370 u32 val; 1375 u32 val;
1371}; 1376};
1372 1377
1373static void alc_fix_pincfg(struct hda_codec *codec, 1378struct alc_fixup {
1379 const struct alc_pincfg *pins;
1380 const struct hda_verb *verbs;
1381};
1382
1383static void alc_pick_fixup(struct hda_codec *codec,
1374 const struct snd_pci_quirk *quirk, 1384 const struct snd_pci_quirk *quirk,
1375 const struct alc_pincfg **pinfix) 1385 const struct alc_fixup *fix)
1376{ 1386{
1377 const struct alc_pincfg *cfg; 1387 const struct alc_pincfg *cfg;
1378 1388
@@ -1380,9 +1390,14 @@ static void alc_fix_pincfg(struct hda_codec *codec,
1380 if (!quirk) 1390 if (!quirk)
1381 return; 1391 return;
1382 1392
1383 cfg = pinfix[quirk->value]; 1393 fix += quirk->value;
1384 for (; cfg->nid; cfg++) 1394 cfg = fix->pins;
1385 snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val); 1395 if (cfg) {
1396 for (; cfg->nid; cfg++)
1397 snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
1398 }
1399 if (fix->verbs)
1400 add_verb(codec->spec, fix->verbs);
1386} 1401}
1387 1402
1388/* 1403/*
@@ -9593,11 +9608,13 @@ static struct alc_pincfg alc882_abit_aw9d_pinfix[] = {
9593 { } 9608 { }
9594}; 9609};
9595 9610
9596static const struct alc_pincfg *alc882_pin_fixes[] = { 9611static const struct alc_fixup alc882_fixups[] = {
9597 [PINFIX_ABIT_AW9D_MAX] = alc882_abit_aw9d_pinfix, 9612 [PINFIX_ABIT_AW9D_MAX] = {
9613 .pins = alc882_abit_aw9d_pinfix
9614 },
9598}; 9615};
9599 9616
9600static struct snd_pci_quirk alc882_pinfix_tbl[] = { 9617static struct snd_pci_quirk alc882_fixup_tbl[] = {
9601 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX), 9618 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX),
9602 {} 9619 {}
9603}; 9620};
@@ -9869,7 +9886,7 @@ static int patch_alc882(struct hda_codec *codec)
9869 board_config = ALC882_AUTO; 9886 board_config = ALC882_AUTO;
9870 } 9887 }
9871 9888
9872 alc_fix_pincfg(codec, alc882_pinfix_tbl, alc882_pin_fixes); 9889 alc_pick_fixup(codec, alc882_fixup_tbl, alc882_fixups);
9873 9890
9874 if (board_config == ALC882_AUTO) { 9891 if (board_config == ALC882_AUTO) {
9875 /* automatic parse from the BIOS config */ 9892 /* automatic parse from the BIOS config */
@@ -12660,7 +12677,7 @@ static struct alc_config_preset alc268_presets[] = {
12660 .init_hook = alc268_toshiba_automute, 12677 .init_hook = alc268_toshiba_automute,
12661 }, 12678 },
12662 [ALC268_ACER] = { 12679 [ALC268_ACER] = {
12663 .mixers = { alc268_acer_mixer, alc268_capture_nosrc_mixer, 12680 .mixers = { alc268_acer_mixer, alc268_capture_alt_mixer,
12664 alc268_beep_mixer }, 12681 alc268_beep_mixer },
12665 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, 12682 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
12666 alc268_acer_verbs }, 12683 alc268_acer_verbs },
@@ -12842,12 +12859,15 @@ static int patch_alc268(struct hda_codec *codec)
12842 unsigned int wcap = get_wcaps(codec, 0x07); 12859 unsigned int wcap = get_wcaps(codec, 0x07);
12843 int i; 12860 int i;
12844 12861
12862 spec->capsrc_nids = alc268_capsrc_nids;
12845 /* get type */ 12863 /* get type */
12846 wcap = get_wcaps_type(wcap); 12864 wcap = get_wcaps_type(wcap);
12847 if (spec->auto_mic || 12865 if (spec->auto_mic ||
12848 wcap != AC_WID_AUD_IN || spec->input_mux->num_items == 1) { 12866 wcap != AC_WID_AUD_IN || spec->input_mux->num_items == 1) {
12849 spec->adc_nids = alc268_adc_nids_alt; 12867 spec->adc_nids = alc268_adc_nids_alt;
12850 spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids_alt); 12868 spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids_alt);
12869 if (spec->auto_mic)
12870 fixup_automic_adc(codec);
12851 if (spec->auto_mic || spec->input_mux->num_items == 1) 12871 if (spec->auto_mic || spec->input_mux->num_items == 1)
12852 add_mixer(spec, alc268_capture_nosrc_mixer); 12872 add_mixer(spec, alc268_capture_nosrc_mixer);
12853 else 12873 else
@@ -12857,7 +12877,6 @@ static int patch_alc268(struct hda_codec *codec)
12857 spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids); 12877 spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids);
12858 add_mixer(spec, alc268_capture_mixer); 12878 add_mixer(spec, alc268_capture_mixer);
12859 } 12879 }
12860 spec->capsrc_nids = alc268_capsrc_nids;
12861 /* set default input source */ 12880 /* set default input source */
12862 for (i = 0; i < spec->num_adc_nids; i++) 12881 for (i = 0; i < spec->num_adc_nids; i++)
12863 snd_hda_codec_write_cache(codec, alc268_capsrc_nids[i], 12882 snd_hda_codec_write_cache(codec, alc268_capsrc_nids[i],
@@ -14357,15 +14376,16 @@ static void alc861_auto_init_multi_out(struct hda_codec *codec)
14357static void alc861_auto_init_hp_out(struct hda_codec *codec) 14376static void alc861_auto_init_hp_out(struct hda_codec *codec)
14358{ 14377{
14359 struct alc_spec *spec = codec->spec; 14378 struct alc_spec *spec = codec->spec;
14360 hda_nid_t pin;
14361 14379
14362 pin = spec->autocfg.hp_pins[0]; 14380 if (spec->autocfg.hp_outs)
14363 if (pin) 14381 alc861_auto_set_output_and_unmute(codec,
14364 alc861_auto_set_output_and_unmute(codec, pin, PIN_HP, 14382 spec->autocfg.hp_pins[0],
14383 PIN_HP,
14365 spec->multiout.hp_nid); 14384 spec->multiout.hp_nid);
14366 pin = spec->autocfg.speaker_pins[0]; 14385 if (spec->autocfg.speaker_outs)
14367 if (pin) 14386 alc861_auto_set_output_and_unmute(codec,
14368 alc861_auto_set_output_and_unmute(codec, pin, PIN_OUT, 14387 spec->autocfg.speaker_pins[0],
14388 PIN_OUT,
14369 spec->multiout.dac_nids[0]); 14389 spec->multiout.dac_nids[0]);
14370} 14390}
14371 14391
@@ -15158,7 +15178,7 @@ static struct snd_pci_quirk alc861vd_cfg_tbl[] = {
15158 SND_PCI_QUIRK(0x1019, 0xa88d, "Realtek ALC660 demo", ALC660VD_3ST), 15178 SND_PCI_QUIRK(0x1019, 0xa88d, "Realtek ALC660 demo", ALC660VD_3ST),
15159 SND_PCI_QUIRK(0x103c, 0x30bf, "HP TX1000", ALC861VD_HP), 15179 SND_PCI_QUIRK(0x103c, 0x30bf, "HP TX1000", ALC861VD_HP),
15160 SND_PCI_QUIRK(0x1043, 0x12e2, "Asus z35m", ALC660VD_3ST), 15180 SND_PCI_QUIRK(0x1043, 0x12e2, "Asus z35m", ALC660VD_3ST),
15161 SND_PCI_QUIRK(0x1043, 0x1339, "Asus G1", ALC660VD_3ST), 15181 /*SND_PCI_QUIRK(0x1043, 0x1339, "Asus G1", ALC660VD_3ST),*/ /* auto */
15162 SND_PCI_QUIRK(0x1043, 0x1633, "Asus V1Sn", ALC660VD_ASUS_V1S), 15182 SND_PCI_QUIRK(0x1043, 0x1633, "Asus V1Sn", ALC660VD_ASUS_V1S),
15163 SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS", ALC660VD_3ST_DIG), 15183 SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS", ALC660VD_3ST_DIG),
15164 SND_PCI_QUIRK(0x10de, 0x03f0, "Realtek ALC660 demo", ALC660VD_3ST), 15184 SND_PCI_QUIRK(0x10de, 0x03f0, "Realtek ALC660 demo", ALC660VD_3ST),
@@ -15551,6 +15571,29 @@ static void alc861vd_auto_init(struct hda_codec *codec)
15551 alc_inithook(codec); 15571 alc_inithook(codec);
15552} 15572}
15553 15573
15574enum {
15575 ALC660VD_FIX_ASUS_GPIO1
15576};
15577
15578/* reset GPIO1 */
15579static const struct hda_verb alc660vd_fix_asus_gpio1_verbs[] = {
15580 {0x01, AC_VERB_SET_GPIO_MASK, 0x03},
15581 {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x01},
15582 {0x01, AC_VERB_SET_GPIO_DATA, 0x01},
15583 { }
15584};
15585
15586static const struct alc_fixup alc861vd_fixups[] = {
15587 [ALC660VD_FIX_ASUS_GPIO1] = {
15588 .verbs = alc660vd_fix_asus_gpio1_verbs,
15589 },
15590};
15591
15592static struct snd_pci_quirk alc861vd_fixup_tbl[] = {
15593 SND_PCI_QUIRK(0x1043, 0x1339, "ASUS A7-K", ALC660VD_FIX_ASUS_GPIO1),
15594 {}
15595};
15596
15554static int patch_alc861vd(struct hda_codec *codec) 15597static int patch_alc861vd(struct hda_codec *codec)
15555{ 15598{
15556 struct alc_spec *spec; 15599 struct alc_spec *spec;
@@ -15572,6 +15615,8 @@ static int patch_alc861vd(struct hda_codec *codec)
15572 board_config = ALC861VD_AUTO; 15615 board_config = ALC861VD_AUTO;
15573 } 15616 }
15574 15617
15618 alc_pick_fixup(codec, alc861vd_fixup_tbl, alc861vd_fixups);
15619
15575 if (board_config == ALC861VD_AUTO) { 15620 if (board_config == ALC861VD_AUTO) {
15576 /* automatic parse from the BIOS config */ 15621 /* automatic parse from the BIOS config */
15577 err = alc861vd_parse_auto_config(codec); 15622 err = alc861vd_parse_auto_config(codec);
@@ -16852,6 +16897,7 @@ static struct snd_pci_quirk alc662_cfg_tbl[] = {
16852 SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_ECS), 16897 SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_ECS),
16853 SND_PCI_QUIRK(0x105b, 0x0d47, "Foxconn 45CMX/45GMX/45CMX-K", 16898 SND_PCI_QUIRK(0x105b, 0x0d47, "Foxconn 45CMX/45GMX/45CMX-K",
16854 ALC662_3ST_6ch_DIG), 16899 ALC662_3ST_6ch_DIG),
16900 SND_PCI_QUIRK(0x1179, 0xff6e, "Toshiba NB200", ALC663_ASUS_MODE4),
16855 SND_PCI_QUIRK(0x144d, 0xca00, "Samsung NC10", ALC272_SAMSUNG_NC10), 16901 SND_PCI_QUIRK(0x144d, 0xca00, "Samsung NC10", ALC272_SAMSUNG_NC10),
16856 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte 945GCM-S2L", 16902 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte 945GCM-S2L",
16857 ALC662_3ST_6ch_DIG), 16903 ALC662_3ST_6ch_DIG),
@@ -17145,70 +17191,145 @@ static struct alc_config_preset alc662_presets[] = {
17145 * BIOS auto configuration 17191 * BIOS auto configuration
17146 */ 17192 */
17147 17193
17194/* convert from MIX nid to DAC */
17195static inline hda_nid_t alc662_mix_to_dac(hda_nid_t nid)
17196{
17197 if (nid == 0x0f)
17198 return 0x02;
17199 else if (nid >= 0x0c && nid <= 0x0e)
17200 return nid - 0x0c + 0x02;
17201 else
17202 return 0;
17203}
17204
17205/* get MIX nid connected to the given pin targeted to DAC */
17206static hda_nid_t alc662_dac_to_mix(struct hda_codec *codec, hda_nid_t pin,
17207 hda_nid_t dac)
17208{
17209 hda_nid_t mix[4];
17210 int i, num;
17211
17212 num = snd_hda_get_connections(codec, pin, mix, ARRAY_SIZE(mix));
17213 for (i = 0; i < num; i++) {
17214 if (alc662_mix_to_dac(mix[i]) == dac)
17215 return mix[i];
17216 }
17217 return 0;
17218}
17219
17220/* look for an empty DAC slot */
17221static hda_nid_t alc662_look_for_dac(struct hda_codec *codec, hda_nid_t pin)
17222{
17223 struct alc_spec *spec = codec->spec;
17224 hda_nid_t srcs[5];
17225 int i, j, num;
17226
17227 num = snd_hda_get_connections(codec, pin, srcs, ARRAY_SIZE(srcs));
17228 if (num < 0)
17229 return 0;
17230 for (i = 0; i < num; i++) {
17231 hda_nid_t nid = alc662_mix_to_dac(srcs[i]);
17232 if (!nid)
17233 continue;
17234 for (j = 0; j < spec->multiout.num_dacs; j++)
17235 if (spec->multiout.dac_nids[j] == nid)
17236 break;
17237 if (j >= spec->multiout.num_dacs)
17238 return nid;
17239 }
17240 return 0;
17241}
17242
17243/* fill in the dac_nids table from the parsed pin configuration */
17244static int alc662_auto_fill_dac_nids(struct hda_codec *codec,
17245 const struct auto_pin_cfg *cfg)
17246{
17247 struct alc_spec *spec = codec->spec;
17248 int i;
17249 hda_nid_t dac;
17250
17251 spec->multiout.dac_nids = spec->private_dac_nids;
17252 for (i = 0; i < cfg->line_outs; i++) {
17253 dac = alc662_look_for_dac(codec, cfg->line_out_pins[i]);
17254 if (!dac)
17255 continue;
17256 spec->multiout.dac_nids[spec->multiout.num_dacs++] = dac;
17257 }
17258 return 0;
17259}
17260
17261static int alc662_add_vol_ctl(struct alc_spec *spec, const char *pfx,
17262 hda_nid_t nid, unsigned int chs)
17263{
17264 char name[32];
17265 sprintf(name, "%s Playback Volume", pfx);
17266 return add_control(spec, ALC_CTL_WIDGET_VOL, name,
17267 HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_OUTPUT));
17268}
17269
17270static int alc662_add_sw_ctl(struct alc_spec *spec, const char *pfx,
17271 hda_nid_t nid, unsigned int chs)
17272{
17273 char name[32];
17274 sprintf(name, "%s Playback Switch", pfx);
17275 return add_control(spec, ALC_CTL_WIDGET_MUTE, name,
17276 HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_INPUT));
17277}
17278
17279#define alc662_add_stereo_vol(spec, pfx, nid) \
17280 alc662_add_vol_ctl(spec, pfx, nid, 3)
17281#define alc662_add_stereo_sw(spec, pfx, nid) \
17282 alc662_add_sw_ctl(spec, pfx, nid, 3)
17283
17148/* add playback controls from the parsed DAC table */ 17284/* add playback controls from the parsed DAC table */
17149static int alc662_auto_create_multi_out_ctls(struct alc_spec *spec, 17285static int alc662_auto_create_multi_out_ctls(struct hda_codec *codec,
17150 const struct auto_pin_cfg *cfg) 17286 const struct auto_pin_cfg *cfg)
17151{ 17287{
17152 char name[32]; 17288 struct alc_spec *spec = codec->spec;
17153 static const char *chname[4] = { 17289 static const char *chname[4] = {
17154 "Front", "Surround", NULL /*CLFE*/, "Side" 17290 "Front", "Surround", NULL /*CLFE*/, "Side"
17155 }; 17291 };
17156 hda_nid_t nid; 17292 hda_nid_t nid, mix;
17157 int i, err; 17293 int i, err;
17158 17294
17159 for (i = 0; i < cfg->line_outs; i++) { 17295 for (i = 0; i < cfg->line_outs; i++) {
17160 if (!spec->multiout.dac_nids[i]) 17296 nid = spec->multiout.dac_nids[i];
17297 if (!nid)
17298 continue;
17299 mix = alc662_dac_to_mix(codec, cfg->line_out_pins[i], nid);
17300 if (!mix)
17161 continue; 17301 continue;
17162 nid = alc880_idx_to_dac(i);
17163 if (i == 2) { 17302 if (i == 2) {
17164 /* Center/LFE */ 17303 /* Center/LFE */
17165 err = add_control(spec, ALC_CTL_WIDGET_VOL, 17304 err = alc662_add_vol_ctl(spec, "Center", nid, 1);
17166 "Center Playback Volume",
17167 HDA_COMPOSE_AMP_VAL(nid, 1, 0,
17168 HDA_OUTPUT));
17169 if (err < 0) 17305 if (err < 0)
17170 return err; 17306 return err;
17171 err = add_control(spec, ALC_CTL_WIDGET_VOL, 17307 err = alc662_add_vol_ctl(spec, "LFE", nid, 2);
17172 "LFE Playback Volume",
17173 HDA_COMPOSE_AMP_VAL(nid, 2, 0,
17174 HDA_OUTPUT));
17175 if (err < 0) 17308 if (err < 0)
17176 return err; 17309 return err;
17177 err = add_control(spec, ALC_CTL_WIDGET_MUTE, 17310 err = alc662_add_sw_ctl(spec, "Center", mix, 1);
17178 "Center Playback Switch",
17179 HDA_COMPOSE_AMP_VAL(0x0e, 1, 0,
17180 HDA_INPUT));
17181 if (err < 0) 17311 if (err < 0)
17182 return err; 17312 return err;
17183 err = add_control(spec, ALC_CTL_WIDGET_MUTE, 17313 err = alc662_add_sw_ctl(spec, "LFE", mix, 2);
17184 "LFE Playback Switch",
17185 HDA_COMPOSE_AMP_VAL(0x0e, 2, 0,
17186 HDA_INPUT));
17187 if (err < 0) 17314 if (err < 0)
17188 return err; 17315 return err;
17189 } else { 17316 } else {
17190 const char *pfx; 17317 const char *pfx;
17191 if (cfg->line_outs == 1 && 17318 if (cfg->line_outs == 1 &&
17192 cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) { 17319 cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
17193 if (!cfg->hp_pins) 17320 if (cfg->hp_outs)
17194 pfx = "Speaker"; 17321 pfx = "Speaker";
17195 else 17322 else
17196 pfx = "PCM"; 17323 pfx = "PCM";
17197 } else 17324 } else
17198 pfx = chname[i]; 17325 pfx = chname[i];
17199 sprintf(name, "%s Playback Volume", pfx); 17326 err = alc662_add_vol_ctl(spec, pfx, nid, 3);
17200 err = add_control(spec, ALC_CTL_WIDGET_VOL, name,
17201 HDA_COMPOSE_AMP_VAL(nid, 3, 0,
17202 HDA_OUTPUT));
17203 if (err < 0) 17327 if (err < 0)
17204 return err; 17328 return err;
17205 if (cfg->line_outs == 1 && 17329 if (cfg->line_outs == 1 &&
17206 cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) 17330 cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
17207 pfx = "Speaker"; 17331 pfx = "Speaker";
17208 sprintf(name, "%s Playback Switch", pfx); 17332 err = alc662_add_sw_ctl(spec, pfx, mix, 3);
17209 err = add_control(spec, ALC_CTL_WIDGET_MUTE, name,
17210 HDA_COMPOSE_AMP_VAL(alc880_idx_to_mixer(i),
17211 3, 0, HDA_INPUT));
17212 if (err < 0) 17333 if (err < 0)
17213 return err; 17334 return err;
17214 } 17335 }
@@ -17217,54 +17338,38 @@ static int alc662_auto_create_multi_out_ctls(struct alc_spec *spec,
17217} 17338}
17218 17339
17219/* add playback controls for speaker and HP outputs */ 17340/* add playback controls for speaker and HP outputs */
17220static int alc662_auto_create_extra_out(struct alc_spec *spec, hda_nid_t pin, 17341/* return DAC nid if any new DAC is assigned */
17342static int alc662_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
17221 const char *pfx) 17343 const char *pfx)
17222{ 17344{
17223 hda_nid_t nid; 17345 struct alc_spec *spec = codec->spec;
17346 hda_nid_t nid, mix;
17224 int err; 17347 int err;
17225 char name[32];
17226 17348
17227 if (!pin) 17349 if (!pin)
17228 return 0; 17350 return 0;
17229 17351 nid = alc662_look_for_dac(codec, pin);
17230 if (pin == 0x17) { 17352 if (!nid) {
17231 /* ALC663 has a mono output pin on 0x17 */ 17353 char name[32];
17354 /* the corresponding DAC is already occupied */
17355 if (!(get_wcaps(codec, pin) & AC_WCAP_OUT_AMP))
17356 return 0; /* no way */
17357 /* create a switch only */
17232 sprintf(name, "%s Playback Switch", pfx); 17358 sprintf(name, "%s Playback Switch", pfx);
17233 err = add_control(spec, ALC_CTL_WIDGET_MUTE, name, 17359 return add_control(spec, ALC_CTL_WIDGET_MUTE, name,
17234 HDA_COMPOSE_AMP_VAL(pin, 2, 0, HDA_OUTPUT)); 17360 HDA_COMPOSE_AMP_VAL(pin, 3, 0, HDA_OUTPUT));
17235 return err;
17236 } 17361 }
17237 17362
17238 if (alc880_is_fixed_pin(pin)) { 17363 mix = alc662_dac_to_mix(codec, pin, nid);
17239 nid = alc880_idx_to_dac(alc880_fixed_pin_idx(pin)); 17364 if (!mix)
17240 /* printk(KERN_DEBUG "DAC nid=%x\n",nid); */ 17365 return 0;
17241 /* specify the DAC as the extra output */ 17366 err = alc662_add_vol_ctl(spec, pfx, nid, 3);
17242 if (!spec->multiout.hp_nid) 17367 if (err < 0)
17243 spec->multiout.hp_nid = nid; 17368 return err;
17244 else 17369 err = alc662_add_sw_ctl(spec, pfx, mix, 3);
17245 spec->multiout.extra_out_nid[0] = nid; 17370 if (err < 0)
17246 /* control HP volume/switch on the output mixer amp */ 17371 return err;
17247 nid = alc880_idx_to_dac(alc880_fixed_pin_idx(pin)); 17372 return nid;
17248 sprintf(name, "%s Playback Volume", pfx);
17249 err = add_control(spec, ALC_CTL_WIDGET_VOL, name,
17250 HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT));
17251 if (err < 0)
17252 return err;
17253 sprintf(name, "%s Playback Switch", pfx);
17254 err = add_control(spec, ALC_CTL_BIND_MUTE, name,
17255 HDA_COMPOSE_AMP_VAL(nid, 3, 2, HDA_INPUT));
17256 if (err < 0)
17257 return err;
17258 } else if (alc880_is_multi_pin(pin)) {
17259 /* set manual connection */
17260 /* we have only a switch on HP-out PIN */
17261 sprintf(name, "%s Playback Switch", pfx);
17262 err = add_control(spec, ALC_CTL_WIDGET_MUTE, name,
17263 HDA_COMPOSE_AMP_VAL(pin, 3, 0, HDA_OUTPUT));
17264 if (err < 0)
17265 return err;
17266 }
17267 return 0;
17268} 17373}
17269 17374
17270/* create playback/capture controls for input pins */ 17375/* create playback/capture controls for input pins */
@@ -17273,30 +17378,35 @@ static int alc662_auto_create_extra_out(struct alc_spec *spec, hda_nid_t pin,
17273 17378
17274static void alc662_auto_set_output_and_unmute(struct hda_codec *codec, 17379static void alc662_auto_set_output_and_unmute(struct hda_codec *codec,
17275 hda_nid_t nid, int pin_type, 17380 hda_nid_t nid, int pin_type,
17276 int dac_idx) 17381 hda_nid_t dac)
17277{ 17382{
17383 int i, num;
17384 hda_nid_t srcs[4];
17385
17278 alc_set_pin_output(codec, nid, pin_type); 17386 alc_set_pin_output(codec, nid, pin_type);
17279 /* need the manual connection? */ 17387 /* need the manual connection? */
17280 if (alc880_is_multi_pin(nid)) { 17388 num = snd_hda_get_connections(codec, nid, srcs, ARRAY_SIZE(srcs));
17281 struct alc_spec *spec = codec->spec; 17389 if (num <= 1)
17282 int idx = alc880_multi_pin_idx(nid); 17390 return;
17283 snd_hda_codec_write(codec, alc880_idx_to_selector(idx), 0, 17391 for (i = 0; i < num; i++) {
17284 AC_VERB_SET_CONNECT_SEL, 17392 if (alc662_mix_to_dac(srcs[i]) != dac)
17285 alc880_dac_to_idx(spec->multiout.dac_nids[dac_idx])); 17393 continue;
17394 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, i);
17395 return;
17286 } 17396 }
17287} 17397}
17288 17398
17289static void alc662_auto_init_multi_out(struct hda_codec *codec) 17399static void alc662_auto_init_multi_out(struct hda_codec *codec)
17290{ 17400{
17291 struct alc_spec *spec = codec->spec; 17401 struct alc_spec *spec = codec->spec;
17402 int pin_type = get_pin_type(spec->autocfg.line_out_type);
17292 int i; 17403 int i;
17293 17404
17294 for (i = 0; i <= HDA_SIDE; i++) { 17405 for (i = 0; i <= HDA_SIDE; i++) {
17295 hda_nid_t nid = spec->autocfg.line_out_pins[i]; 17406 hda_nid_t nid = spec->autocfg.line_out_pins[i];
17296 int pin_type = get_pin_type(spec->autocfg.line_out_type);
17297 if (nid) 17407 if (nid)
17298 alc662_auto_set_output_and_unmute(codec, nid, pin_type, 17408 alc662_auto_set_output_and_unmute(codec, nid, pin_type,
17299 i); 17409 spec->multiout.dac_nids[i]);
17300 } 17410 }
17301} 17411}
17302 17412
@@ -17306,12 +17416,13 @@ static void alc662_auto_init_hp_out(struct hda_codec *codec)
17306 hda_nid_t pin; 17416 hda_nid_t pin;
17307 17417
17308 pin = spec->autocfg.hp_pins[0]; 17418 pin = spec->autocfg.hp_pins[0];
17309 if (pin) /* connect to front */ 17419 if (pin)
17310 /* use dac 0 */ 17420 alc662_auto_set_output_and_unmute(codec, pin, PIN_HP,
17311 alc662_auto_set_output_and_unmute(codec, pin, PIN_HP, 0); 17421 spec->multiout.hp_nid);
17312 pin = spec->autocfg.speaker_pins[0]; 17422 pin = spec->autocfg.speaker_pins[0];
17313 if (pin) 17423 if (pin)
17314 alc662_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0); 17424 alc662_auto_set_output_and_unmute(codec, pin, PIN_OUT,
17425 spec->multiout.extra_out_nid[0]);
17315} 17426}
17316 17427
17317#define ALC662_PIN_CD_NID ALC880_PIN_CD_NID 17428#define ALC662_PIN_CD_NID ALC880_PIN_CD_NID
@@ -17349,21 +17460,25 @@ static int alc662_parse_auto_config(struct hda_codec *codec)
17349 if (!spec->autocfg.line_outs) 17460 if (!spec->autocfg.line_outs)
17350 return 0; /* can't find valid BIOS pin config */ 17461 return 0; /* can't find valid BIOS pin config */
17351 17462
17352 err = alc880_auto_fill_dac_nids(spec, &spec->autocfg); 17463 err = alc662_auto_fill_dac_nids(codec, &spec->autocfg);
17353 if (err < 0) 17464 if (err < 0)
17354 return err; 17465 return err;
17355 err = alc662_auto_create_multi_out_ctls(spec, &spec->autocfg); 17466 err = alc662_auto_create_multi_out_ctls(codec, &spec->autocfg);
17356 if (err < 0) 17467 if (err < 0)
17357 return err; 17468 return err;
17358 err = alc662_auto_create_extra_out(spec, 17469 err = alc662_auto_create_extra_out(codec,
17359 spec->autocfg.speaker_pins[0], 17470 spec->autocfg.speaker_pins[0],
17360 "Speaker"); 17471 "Speaker");
17361 if (err < 0) 17472 if (err < 0)
17362 return err; 17473 return err;
17363 err = alc662_auto_create_extra_out(spec, spec->autocfg.hp_pins[0], 17474 if (err)
17475 spec->multiout.extra_out_nid[0] = err;
17476 err = alc662_auto_create_extra_out(codec, spec->autocfg.hp_pins[0],
17364 "Headphone"); 17477 "Headphone");
17365 if (err < 0) 17478 if (err < 0)
17366 return err; 17479 return err;
17480 if (err)
17481 spec->multiout.hp_nid = err;
17367 err = alc662_auto_create_input_ctls(codec, &spec->autocfg); 17482 err = alc662_auto_create_input_ctls(codec, &spec->autocfg);
17368 if (err < 0) 17483 if (err < 0)
17369 return err; 17484 return err;
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 826137ec3002..a9b26828a651 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -182,8 +182,8 @@ struct sigmatel_jack {
182 182
183struct sigmatel_mic_route { 183struct sigmatel_mic_route {
184 hda_nid_t pin; 184 hda_nid_t pin;
185 unsigned char mux_idx; 185 signed char mux_idx;
186 unsigned char dmux_idx; 186 signed char dmux_idx;
187}; 187};
188 188
189struct sigmatel_spec { 189struct sigmatel_spec {
@@ -3469,18 +3469,26 @@ static int set_mic_route(struct hda_codec *codec,
3469 break; 3469 break;
3470 if (i <= AUTO_PIN_FRONT_MIC) { 3470 if (i <= AUTO_PIN_FRONT_MIC) {
3471 /* analog pin */ 3471 /* analog pin */
3472 mic->dmux_idx = 0;
3473 i = get_connection_index(codec, spec->mux_nids[0], pin); 3472 i = get_connection_index(codec, spec->mux_nids[0], pin);
3474 if (i < 0) 3473 if (i < 0)
3475 return -1; 3474 return -1;
3476 mic->mux_idx = i; 3475 mic->mux_idx = i;
3476 mic->dmux_idx = -1;
3477 if (spec->dmux_nids)
3478 mic->dmux_idx = get_connection_index(codec,
3479 spec->dmux_nids[0],
3480 spec->mux_nids[0]);
3477 } else if (spec->dmux_nids) { 3481 } else if (spec->dmux_nids) {
3478 /* digital pin */ 3482 /* digital pin */
3479 mic->mux_idx = 0;
3480 i = get_connection_index(codec, spec->dmux_nids[0], pin); 3483 i = get_connection_index(codec, spec->dmux_nids[0], pin);
3481 if (i < 0) 3484 if (i < 0)
3482 return -1; 3485 return -1;
3483 mic->dmux_idx = i; 3486 mic->dmux_idx = i;
3487 mic->mux_idx = -1;
3488 if (spec->mux_nids)
3489 mic->mux_idx = get_connection_index(codec,
3490 spec->mux_nids[0],
3491 spec->dmux_nids[0]);
3484 } 3492 }
3485 return 0; 3493 return 0;
3486} 3494}
@@ -4557,11 +4565,11 @@ static void stac92xx_mic_detect(struct hda_codec *codec)
4557 mic = &spec->ext_mic; 4565 mic = &spec->ext_mic;
4558 else 4566 else
4559 mic = &spec->int_mic; 4567 mic = &spec->int_mic;
4560 if (mic->dmux_idx) 4568 if (mic->dmux_idx >= 0)
4561 snd_hda_codec_write_cache(codec, spec->dmux_nids[0], 0, 4569 snd_hda_codec_write_cache(codec, spec->dmux_nids[0], 0,
4562 AC_VERB_SET_CONNECT_SEL, 4570 AC_VERB_SET_CONNECT_SEL,
4563 mic->dmux_idx); 4571 mic->dmux_idx);
4564 else 4572 if (mic->mux_idx >= 0)
4565 snd_hda_codec_write_cache(codec, spec->mux_nids[0], 0, 4573 snd_hda_codec_write_cache(codec, spec->mux_nids[0], 0,
4566 AC_VERB_SET_CONNECT_SEL, 4574 AC_VERB_SET_CONNECT_SEL,
4567 mic->mux_idx); 4575 mic->mux_idx);
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index cecf1ffeeaaa..d74033a2cfbe 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -2259,7 +2259,7 @@ static int snd_ice1712_pro_peak_get(struct snd_kcontrol *kcontrol,
2259} 2259}
2260 2260
2261static struct snd_kcontrol_new snd_ice1712_mixer_pro_peak __devinitdata = { 2261static struct snd_kcontrol_new snd_ice1712_mixer_pro_peak __devinitdata = {
2262 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 2262 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
2263 .name = "Multi Track Peak", 2263 .name = "Multi Track Peak",
2264 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, 2264 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
2265 .info = snd_ice1712_pro_peak_info, 2265 .info = snd_ice1712_pro_peak_info,
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index af6e00148621..76b717dae4b6 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -1294,7 +1294,7 @@ static int __devinit snd_vt1724_pcm_spdif(struct snd_ice1712 *ice, int device)
1294 1294
1295 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, 1295 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
1296 snd_dma_pci_data(ice->pci), 1296 snd_dma_pci_data(ice->pci),
1297 64*1024, 64*1024); 1297 256*1024, 256*1024);
1298 1298
1299 ice->pcm = pcm; 1299 ice->pcm = pcm;
1300 1300
@@ -1408,7 +1408,7 @@ static int __devinit snd_vt1724_pcm_indep(struct snd_ice1712 *ice, int device)
1408 1408
1409 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, 1409 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
1410 snd_dma_pci_data(ice->pci), 1410 snd_dma_pci_data(ice->pci),
1411 64*1024, 64*1024); 1411 256*1024, 256*1024);
1412 1412
1413 ice->pcm_ds = pcm; 1413 ice->pcm_ds = pcm;
1414 1414
@@ -2110,7 +2110,7 @@ static int snd_vt1724_pro_peak_get(struct snd_kcontrol *kcontrol,
2110} 2110}
2111 2111
2112static struct snd_kcontrol_new snd_vt1724_mixer_pro_peak __devinitdata = { 2112static struct snd_kcontrol_new snd_vt1724_mixer_pro_peak __devinitdata = {
2113 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 2113 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
2114 .name = "Multi Track Peak", 2114 .name = "Multi Track Peak",
2115 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, 2115 .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
2116 .info = snd_vt1724_pro_peak_info, 2116 .info = snd_vt1724_pro_peak_info,
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 171ada535209..754867ed4785 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -1954,6 +1954,18 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
1954 .name = "Sony S1XP", 1954 .name = "Sony S1XP",
1955 .type = AC97_TUNE_INV_EAPD 1955 .type = AC97_TUNE_INV_EAPD
1956 }, 1956 },
1957 {
1958 .subvendor = 0x104d,
1959 .subdevice = 0x81c0,
1960 .name = "Sony VAIO VGN-T350P", /*AD1981B*/
1961 .type = AC97_TUNE_INV_EAPD
1962 },
1963 {
1964 .subvendor = 0x104d,
1965 .subdevice = 0x81c5,
1966 .name = "Sony VAIO VGN-B1VP", /*AD1981B*/
1967 .type = AC97_TUNE_INV_EAPD
1968 },
1957 { 1969 {
1958 .subvendor = 0x1043, 1970 .subvendor = 0x1043,
1959 .subdevice = 0x80f3, 1971 .subdevice = 0x80f3,
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index acfa4760da49..91683a349035 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -1626,7 +1626,7 @@ static int snd_via8233_dxs_volume_get(struct snd_kcontrol *kcontrol,
1626 struct snd_ctl_elem_value *ucontrol) 1626 struct snd_ctl_elem_value *ucontrol)
1627{ 1627{
1628 struct via82xx *chip = snd_kcontrol_chip(kcontrol); 1628 struct via82xx *chip = snd_kcontrol_chip(kcontrol);
1629 unsigned int idx = snd_ctl_get_ioff(kcontrol, &ucontrol->id); 1629 unsigned int idx = kcontrol->id.subdevice;
1630 1630
1631 ucontrol->value.integer.value[0] = VIA_DXS_MAX_VOLUME - chip->playback_volume[idx][0]; 1631 ucontrol->value.integer.value[0] = VIA_DXS_MAX_VOLUME - chip->playback_volume[idx][0];
1632 ucontrol->value.integer.value[1] = VIA_DXS_MAX_VOLUME - chip->playback_volume[idx][1]; 1632 ucontrol->value.integer.value[1] = VIA_DXS_MAX_VOLUME - chip->playback_volume[idx][1];
@@ -1646,7 +1646,7 @@ static int snd_via8233_dxs_volume_put(struct snd_kcontrol *kcontrol,
1646 struct snd_ctl_elem_value *ucontrol) 1646 struct snd_ctl_elem_value *ucontrol)
1647{ 1647{
1648 struct via82xx *chip = snd_kcontrol_chip(kcontrol); 1648 struct via82xx *chip = snd_kcontrol_chip(kcontrol);
1649 unsigned int idx = snd_ctl_get_ioff(kcontrol, &ucontrol->id); 1649 unsigned int idx = kcontrol->id.subdevice;
1650 unsigned long port = chip->port + 0x10 * idx; 1650 unsigned long port = chip->port + 0x10 * idx;
1651 unsigned char val; 1651 unsigned char val;
1652 int i, change = 0; 1652 int i, change = 0;
@@ -1705,11 +1705,12 @@ static struct snd_kcontrol_new snd_via8233_pcmdxs_volume_control __devinitdata =
1705}; 1705};
1706 1706
1707static struct snd_kcontrol_new snd_via8233_dxs_volume_control __devinitdata = { 1707static struct snd_kcontrol_new snd_via8233_dxs_volume_control __devinitdata = {
1708 .name = "VIA DXS Playback Volume", 1708 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
1709 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 1709 .device = 0,
1710 /* .subdevice set later */
1711 .name = "PCM Playback Volume",
1710 .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | 1712 .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
1711 SNDRV_CTL_ELEM_ACCESS_TLV_READ), 1713 SNDRV_CTL_ELEM_ACCESS_TLV_READ),
1712 .count = 4,
1713 .info = snd_via8233_dxs_volume_info, 1714 .info = snd_via8233_dxs_volume_info,
1714 .get = snd_via8233_dxs_volume_get, 1715 .get = snd_via8233_dxs_volume_get,
1715 .put = snd_via8233_dxs_volume_put, 1716 .put = snd_via8233_dxs_volume_put,
@@ -1936,10 +1937,18 @@ static int __devinit snd_via8233_init_misc(struct via82xx *chip)
1936 } 1937 }
1937 else /* Using DXS when PCM emulation is enabled is really weird */ 1938 else /* Using DXS when PCM emulation is enabled is really weird */
1938 { 1939 {
1939 /* Standalone DXS controls */ 1940 for (i = 0; i < 4; ++i) {
1940 err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_via8233_dxs_volume_control, chip)); 1941 struct snd_kcontrol *kctl;
1941 if (err < 0) 1942
1942 return err; 1943 kctl = snd_ctl_new1(
1944 &snd_via8233_dxs_volume_control, chip);
1945 if (!kctl)
1946 return -ENOMEM;
1947 kctl->id.subdevice = i;
1948 err = snd_ctl_add(chip->card, kctl);
1949 if (err < 0)
1950 return err;
1951 }
1943 } 1952 }
1944 } 1953 }
1945 /* select spdif data slot 10/11 */ 1954 /* select spdif data slot 10/11 */
diff --git a/sound/ppc/keywest.c b/sound/ppc/keywest.c
index 835fa19ed461..d06f780bd7e8 100644
--- a/sound/ppc/keywest.c
+++ b/sound/ppc/keywest.c
@@ -59,6 +59,18 @@ static int keywest_attach_adapter(struct i2c_adapter *adapter)
59 strlcpy(info.type, "keywest", I2C_NAME_SIZE); 59 strlcpy(info.type, "keywest", I2C_NAME_SIZE);
60 info.addr = keywest_ctx->addr; 60 info.addr = keywest_ctx->addr;
61 keywest_ctx->client = i2c_new_device(adapter, &info); 61 keywest_ctx->client = i2c_new_device(adapter, &info);
62 if (!keywest_ctx->client)
63 return -ENODEV;
64 /*
65 * We know the driver is already loaded, so the device should be
66 * already bound. If not it means binding failed, and then there
67 * is no point in keeping the device instantiated.
68 */
69 if (!keywest_ctx->client->driver) {
70 i2c_unregister_device(keywest_ctx->client);
71 keywest_ctx->client = NULL;
72 return -ENODEV;
73 }
62 74
63 /* 75 /*
64 * Let i2c-core delete that device on driver removal. 76 * Let i2c-core delete that device on driver removal.
@@ -86,7 +98,7 @@ static const struct i2c_device_id keywest_i2c_id[] = {
86 { } 98 { }
87}; 99};
88 100
89struct i2c_driver keywest_driver = { 101static struct i2c_driver keywest_driver = {
90 .driver = { 102 .driver = {
91 .name = "PMac Keywest Audio", 103 .name = "PMac Keywest Audio",
92 }, 104 },
diff --git a/sound/soc/blackfin/Kconfig b/sound/soc/blackfin/Kconfig
index ac927ffdc961..97f1a251e446 100644
--- a/sound/soc/blackfin/Kconfig
+++ b/sound/soc/blackfin/Kconfig
@@ -7,15 +7,6 @@ config SND_BF5XX_I2S
7 mode (supports single stereo In/Out). 7 mode (supports single stereo In/Out).
8 You will also need to select the audio interfaces to support below. 8 You will also need to select the audio interfaces to support below.
9 9
10config SND_BF5XX_TDM
11 tristate "SoC I2S(TDM mode) Audio for the ADI BF5xx chip"
12 depends on (BLACKFIN && SND_SOC)
13 help
14 Say Y or M if you want to add support for codecs attached to
15 the Blackfin SPORT (synchronous serial ports) interface in TDM
16 mode.
17 You will also need to select the audio interfaces to support below.
18
19config SND_BF5XX_SOC_SSM2602 10config SND_BF5XX_SOC_SSM2602
20 tristate "SoC SSM2602 Audio support for BF52x ezkit" 11 tristate "SoC SSM2602 Audio support for BF52x ezkit"
21 depends on SND_BF5XX_I2S 12 depends on SND_BF5XX_I2S
@@ -41,6 +32,31 @@ config SND_BFIN_AD73311_SE
41 Enter the GPIO used to control AD73311's SE pin. Acceptable 32 Enter the GPIO used to control AD73311's SE pin. Acceptable
42 values are 0 to 7 33 values are 0 to 7
43 34
35config SND_BF5XX_TDM
36 tristate "SoC I2S(TDM mode) Audio for the ADI BF5xx chip"
37 depends on (BLACKFIN && SND_SOC)
38 help
39 Say Y or M if you want to add support for codecs attached to
40 the Blackfin SPORT (synchronous serial ports) interface in TDM
41 mode.
42 You will also need to select the audio interfaces to support below.
43
44config SND_BF5XX_SOC_AD1836
45 tristate "SoC AD1836 Audio support for BF5xx"
46 depends on SND_BF5XX_TDM
47 select SND_BF5XX_SOC_TDM
48 select SND_SOC_AD1836
49 help
50 Say Y if you want to add support for SoC audio on BF5xx STAMP/EZKIT.
51
52config SND_BF5XX_SOC_AD1938
53 tristate "SoC AD1938 Audio support for Blackfin"
54 depends on SND_BF5XX_TDM
55 select SND_BF5XX_SOC_TDM
56 select SND_SOC_AD1938
57 help
58 Say Y if you want to add support for AD1938 codec on Blackfin.
59
44config SND_BF5XX_AC97 60config SND_BF5XX_AC97
45 tristate "SoC AC97 Audio for the ADI BF5xx chip" 61 tristate "SoC AC97 Audio for the ADI BF5xx chip"
46 depends on BLACKFIN 62 depends on BLACKFIN
@@ -71,6 +87,30 @@ config SND_BF5XX_MULTICHAN_SUPPORT
71 Say y if you want AC97 driver to support up to 5.1 channel audio. 87 Say y if you want AC97 driver to support up to 5.1 channel audio.
72 this mode will consume much more memory for DMA. 88 this mode will consume much more memory for DMA.
73 89
90config SND_BF5XX_HAVE_COLD_RESET
91 bool "BOARD has COLD Reset GPIO"
92 depends on SND_BF5XX_AC97
93 default y if BFIN548_EZKIT
94 default n if !BFIN548_EZKIT
95
96config SND_BF5XX_RESET_GPIO_NUM
97 int "Set a GPIO for cold reset"
98 depends on SND_BF5XX_HAVE_COLD_RESET
99 range 0 159
100 default 19 if BFIN548_EZKIT
101 default 5 if BFIN537_STAMP
102 default 0
103 help
104 Set the correct GPIO for RESET the sound chip.
105
106config SND_BF5XX_SOC_AD1980
107 tristate "SoC AD1980/1 Audio support for BF5xx"
108 depends on SND_BF5XX_AC97
109 select SND_BF5XX_SOC_AC97
110 select SND_SOC_AD1980
111 help
112 Say Y if you want to add support for SoC audio on BF5xx STAMP/EZKIT.
113
74config SND_BF5XX_SOC_SPORT 114config SND_BF5XX_SOC_SPORT
75 tristate 115 tristate
76 116
@@ -88,30 +128,6 @@ config SND_BF5XX_SOC_AC97
88 select SND_SOC_AC97_BUS 128 select SND_SOC_AC97_BUS
89 select SND_BF5XX_SOC_SPORT 129 select SND_BF5XX_SOC_SPORT
90 130
91config SND_BF5XX_SOC_AD1836
92 tristate "SoC AD1836 Audio support for BF5xx"
93 depends on SND_BF5XX_TDM
94 select SND_BF5XX_SOC_TDM
95 select SND_SOC_AD1836
96 help
97 Say Y if you want to add support for SoC audio on BF5xx STAMP/EZKIT.
98
99config SND_BF5XX_SOC_AD1980
100 tristate "SoC AD1980/1 Audio support for BF5xx"
101 depends on SND_BF5XX_AC97
102 select SND_BF5XX_SOC_AC97
103 select SND_SOC_AD1980
104 help
105 Say Y if you want to add support for SoC audio on BF5xx STAMP/EZKIT.
106
107config SND_BF5XX_SOC_AD1938
108 tristate "SoC AD1938 Audio support for Blackfin"
109 depends on SND_BF5XX_TDM
110 select SND_BF5XX_SOC_TDM
111 select SND_SOC_AD1938
112 help
113 Say Y if you want to add support for AD1938 codec on Blackfin.
114
115config SND_BF5XX_SPORT_NUM 131config SND_BF5XX_SPORT_NUM
116 int "Set a SPORT for Sound chip" 132 int "Set a SPORT for Sound chip"
117 depends on (SND_BF5XX_I2S || SND_BF5XX_AC97 || SND_BF5XX_TDM) 133 depends on (SND_BF5XX_I2S || SND_BF5XX_AC97 || SND_BF5XX_TDM)
@@ -120,19 +136,3 @@ config SND_BF5XX_SPORT_NUM
120 default 0 136 default 0
121 help 137 help
122 Set the correct SPORT for sound chip. 138 Set the correct SPORT for sound chip.
123
124config SND_BF5XX_HAVE_COLD_RESET
125 bool "BOARD has COLD Reset GPIO"
126 depends on SND_BF5XX_AC97
127 default y if BFIN548_EZKIT
128 default n if !BFIN548_EZKIT
129
130config SND_BF5XX_RESET_GPIO_NUM
131 int "Set a GPIO for cold reset"
132 depends on SND_BF5XX_HAVE_COLD_RESET
133 range 0 159
134 default 19 if BFIN548_EZKIT
135 default 5 if BFIN537_STAMP
136 default 0
137 help
138 Set the correct GPIO for RESET the sound chip.
diff --git a/sound/soc/blackfin/bf5xx-i2s.c b/sound/soc/blackfin/bf5xx-i2s.c
index 1e9d161c76c4..084b68884ada 100644
--- a/sound/soc/blackfin/bf5xx-i2s.c
+++ b/sound/soc/blackfin/bf5xx-i2s.c
@@ -77,12 +77,12 @@ static struct sport_param sport_params[2] = {
77 * TFS. When Port G is selected and EMAC then there is a conflict between 77 * TFS. When Port G is selected and EMAC then there is a conflict between
78 * the PHY interrupt line and TFS. Current settings prevent the conflict 78 * the PHY interrupt line and TFS. Current settings prevent the conflict
79 * by ignoring the TFS pin when Port G is selected. This allows both 79 * by ignoring the TFS pin when Port G is selected. This allows both
80 * ssm2602 using Port G and EMAC concurrently. 80 * codecs and EMAC using Port G concurrently.
81 */ 81 */
82#ifdef CONFIG_BF527_SPORT0_PORTF 82#ifdef CONFIG_BF527_SPORT0_PORTG
83#define LOCAL_SPORT0_TFS (P_SPORT0_TFS)
84#else
85#define LOCAL_SPORT0_TFS (0) 83#define LOCAL_SPORT0_TFS (0)
84#else
85#define LOCAL_SPORT0_TFS (P_SPORT0_TFS)
86#endif 86#endif
87 87
88static u16 sport_req[][7] = { {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 88static u16 sport_req[][7] = { {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
diff --git a/sound/soc/blackfin/bf5xx-tdm.c b/sound/soc/blackfin/bf5xx-tdm.c
index 3096badf09a5..ff546e91a22e 100644
--- a/sound/soc/blackfin/bf5xx-tdm.c
+++ b/sound/soc/blackfin/bf5xx-tdm.c
@@ -78,12 +78,12 @@ static struct sport_param sport_params[2] = {
78 * TFS. When Port G is selected and EMAC then there is a conflict between 78 * TFS. When Port G is selected and EMAC then there is a conflict between
79 * the PHY interrupt line and TFS. Current settings prevent the conflict 79 * the PHY interrupt line and TFS. Current settings prevent the conflict
80 * by ignoring the TFS pin when Port G is selected. This allows both 80 * by ignoring the TFS pin when Port G is selected. This allows both
81 * ssm2602 using Port G and EMAC concurrently. 81 * codecs and EMAC using Port G concurrently.
82 */ 82 */
83#ifdef CONFIG_BF527_SPORT0_PORTF 83#ifdef CONFIG_BF527_SPORT0_PORTG
84#define LOCAL_SPORT0_TFS (P_SPORT0_TFS)
85#else
86#define LOCAL_SPORT0_TFS (0) 84#define LOCAL_SPORT0_TFS (0)
85#else
86#define LOCAL_SPORT0_TFS (P_SPORT0_TFS)
87#endif 87#endif
88 88
89static u16 sport_req[][7] = { {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 89static u16 sport_req[][7] = { {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 3ff0373dff89..593d5b9c9f03 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -579,7 +579,7 @@ static const struct snd_kcontrol_new wm8350_left_capt_mixer_controls[] = {
579 SOC_DAPM_SINGLE_TLV("L3 Capture Volume", 579 SOC_DAPM_SINGLE_TLV("L3 Capture Volume",
580 WM8350_INPUT_MIXER_VOLUME_L, 9, 7, 0, out_mix_tlv), 580 WM8350_INPUT_MIXER_VOLUME_L, 9, 7, 0, out_mix_tlv),
581 SOC_DAPM_SINGLE("PGA Capture Switch", 581 SOC_DAPM_SINGLE("PGA Capture Switch",
582 WM8350_LEFT_INPUT_VOLUME, 14, 1, 0), 582 WM8350_LEFT_INPUT_VOLUME, 14, 1, 1),
583}; 583};
584 584
585/* Right Input Mixer */ 585/* Right Input Mixer */
@@ -589,7 +589,7 @@ static const struct snd_kcontrol_new wm8350_right_capt_mixer_controls[] = {
589 SOC_DAPM_SINGLE_TLV("L3 Capture Volume", 589 SOC_DAPM_SINGLE_TLV("L3 Capture Volume",
590 WM8350_INPUT_MIXER_VOLUME_R, 13, 7, 0, out_mix_tlv), 590 WM8350_INPUT_MIXER_VOLUME_R, 13, 7, 0, out_mix_tlv),
591 SOC_DAPM_SINGLE("PGA Capture Switch", 591 SOC_DAPM_SINGLE("PGA Capture Switch",
592 WM8350_RIGHT_INPUT_VOLUME, 14, 1, 0), 592 WM8350_RIGHT_INPUT_VOLUME, 14, 1, 1),
593}; 593};
594 594
595/* Left Mic Mixer */ 595/* Left Mic Mixer */
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index da97aae475a2..1ef2454c5205 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -790,7 +790,7 @@ static int wm8940_register(struct wm8940_priv *wm8940,
790 codec->reg_cache = &wm8940->reg_cache; 790 codec->reg_cache = &wm8940->reg_cache;
791 791
792 ret = snd_soc_codec_set_cache_io(codec, 8, 16, control); 792 ret = snd_soc_codec_set_cache_io(codec, 8, 16, control);
793 if (ret == 0) { 793 if (ret < 0) {
794 dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); 794 dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
795 return ret; 795 return ret;
796 } 796 }
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c
index 12a6c549ee6e..4ae707048021 100644
--- a/sound/soc/davinci/davinci-i2s.c
+++ b/sound/soc/davinci/davinci-i2s.c
@@ -97,22 +97,19 @@ enum {
97 DAVINCI_MCBSP_WORD_32, 97 DAVINCI_MCBSP_WORD_32,
98}; 98};
99 99
100static struct davinci_pcm_dma_params davinci_i2s_pcm_out = {
101 .name = "I2S PCM Stereo out",
102};
103
104static struct davinci_pcm_dma_params davinci_i2s_pcm_in = {
105 .name = "I2S PCM Stereo in",
106};
107
108struct davinci_mcbsp_dev { 100struct davinci_mcbsp_dev {
101 /*
102 * dma_params must be first because rtd->dai->cpu_dai->private_data
103 * is cast to a pointer of an array of struct davinci_pcm_dma_params in
104 * davinci_pcm_open.
105 */
106 struct davinci_pcm_dma_params dma_params[2];
109 void __iomem *base; 107 void __iomem *base;
110#define MOD_DSP_A 0 108#define MOD_DSP_A 0
111#define MOD_DSP_B 1 109#define MOD_DSP_B 1
112 int mode; 110 int mode;
113 u32 pcr; 111 u32 pcr;
114 struct clk *clk; 112 struct clk *clk;
115 struct davinci_pcm_dma_params *dma_params[2];
116}; 113};
117 114
118static inline void davinci_mcbsp_write_reg(struct davinci_mcbsp_dev *dev, 115static inline void davinci_mcbsp_write_reg(struct davinci_mcbsp_dev *dev,
@@ -215,14 +212,6 @@ static void davinci_mcbsp_stop(struct davinci_mcbsp_dev *dev, int playback)
215 toggle_clock(dev, playback); 212 toggle_clock(dev, playback);
216} 213}
217 214
218static int davinci_i2s_startup(struct snd_pcm_substream *substream,
219 struct snd_soc_dai *cpu_dai)
220{
221 struct davinci_mcbsp_dev *dev = cpu_dai->private_data;
222 cpu_dai->dma_data = dev->dma_params[substream->stream];
223 return 0;
224}
225
226#define DEFAULT_BITPERSAMPLE 16 215#define DEFAULT_BITPERSAMPLE 16
227 216
228static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai, 217static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
@@ -353,8 +342,9 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
353 struct snd_pcm_hw_params *params, 342 struct snd_pcm_hw_params *params,
354 struct snd_soc_dai *dai) 343 struct snd_soc_dai *dai)
355{ 344{
356 struct davinci_pcm_dma_params *dma_params = dai->dma_data;
357 struct davinci_mcbsp_dev *dev = dai->private_data; 345 struct davinci_mcbsp_dev *dev = dai->private_data;
346 struct davinci_pcm_dma_params *dma_params =
347 &dev->dma_params[substream->stream];
358 struct snd_interval *i = NULL; 348 struct snd_interval *i = NULL;
359 int mcbsp_word_length; 349 int mcbsp_word_length;
360 unsigned int rcr, xcr, srgr; 350 unsigned int rcr, xcr, srgr;
@@ -472,7 +462,6 @@ static void davinci_i2s_shutdown(struct snd_pcm_substream *substream,
472#define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000 462#define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000
473 463
474static struct snd_soc_dai_ops davinci_i2s_dai_ops = { 464static struct snd_soc_dai_ops davinci_i2s_dai_ops = {
475 .startup = davinci_i2s_startup,
476 .shutdown = davinci_i2s_shutdown, 465 .shutdown = davinci_i2s_shutdown,
477 .prepare = davinci_i2s_prepare, 466 .prepare = davinci_i2s_prepare,
478 .trigger = davinci_i2s_trigger, 467 .trigger = davinci_i2s_trigger,
@@ -534,12 +523,10 @@ static int davinci_i2s_probe(struct platform_device *pdev)
534 523
535 dev->base = (void __iomem *)IO_ADDRESS(mem->start); 524 dev->base = (void __iomem *)IO_ADDRESS(mem->start);
536 525
537 dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK] = &davinci_i2s_pcm_out; 526 dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].dma_addr =
538 dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK]->dma_addr =
539 (dma_addr_t)(io_v2p(dev->base) + DAVINCI_MCBSP_DXR_REG); 527 (dma_addr_t)(io_v2p(dev->base) + DAVINCI_MCBSP_DXR_REG);
540 528
541 dev->dma_params[SNDRV_PCM_STREAM_CAPTURE] = &davinci_i2s_pcm_in; 529 dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].dma_addr =
542 dev->dma_params[SNDRV_PCM_STREAM_CAPTURE]->dma_addr =
543 (dma_addr_t)(io_v2p(dev->base) + DAVINCI_MCBSP_DRR_REG); 530 (dma_addr_t)(io_v2p(dev->base) + DAVINCI_MCBSP_DRR_REG);
544 531
545 /* first TX, then RX */ 532 /* first TX, then RX */
@@ -549,7 +536,7 @@ static int davinci_i2s_probe(struct platform_device *pdev)
549 ret = -ENXIO; 536 ret = -ENXIO;
550 goto err_free_mem; 537 goto err_free_mem;
551 } 538 }
552 dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK]->channel = res->start; 539 dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].channel = res->start;
553 540
554 res = platform_get_resource(pdev, IORESOURCE_DMA, 1); 541 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
555 if (!res) { 542 if (!res) {
@@ -557,7 +544,7 @@ static int davinci_i2s_probe(struct platform_device *pdev)
557 ret = -ENXIO; 544 ret = -ENXIO;
558 goto err_free_mem; 545 goto err_free_mem;
559 } 546 }
560 dev->dma_params[SNDRV_PCM_STREAM_CAPTURE]->channel = res->start; 547 dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].channel = res->start;
561 548
562 davinci_i2s_dai.private_data = dev; 549 davinci_i2s_dai.private_data = dev;
563 ret = snd_soc_register_dai(&davinci_i2s_dai); 550 ret = snd_soc_register_dai(&davinci_i2s_dai);
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 7a06c0a86665..5d1f98a4c978 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -332,14 +332,6 @@ static inline void mcasp_set_ctl_reg(void __iomem *regs, u32 val)
332 printk(KERN_ERR "GBLCTL write error\n"); 332 printk(KERN_ERR "GBLCTL write error\n");
333} 333}
334 334
335static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
336 struct snd_soc_dai *cpu_dai)
337{
338 struct davinci_audio_dev *dev = cpu_dai->private_data;
339 cpu_dai->dma_data = dev->dma_params[substream->stream];
340 return 0;
341}
342
343static void mcasp_start_rx(struct davinci_audio_dev *dev) 335static void mcasp_start_rx(struct davinci_audio_dev *dev)
344{ 336{
345 mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXHCLKRST); 337 mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXHCLKRST);
@@ -386,17 +378,17 @@ static void mcasp_start_tx(struct davinci_audio_dev *dev)
386 378
387static void davinci_mcasp_start(struct davinci_audio_dev *dev, int stream) 379static void davinci_mcasp_start(struct davinci_audio_dev *dev, int stream)
388{ 380{
389 if (stream == SNDRV_PCM_STREAM_PLAYBACK) 381 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
382 if (dev->txnumevt) /* enable FIFO */
383 mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
384 FIFO_ENABLE);
390 mcasp_start_tx(dev); 385 mcasp_start_tx(dev);
391 else 386 } else {
387 if (dev->rxnumevt) /* enable FIFO */
388 mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
389 FIFO_ENABLE);
392 mcasp_start_rx(dev); 390 mcasp_start_rx(dev);
393 391 }
394 /* enable FIFO */
395 if (dev->txnumevt)
396 mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, FIFO_ENABLE);
397
398 if (dev->rxnumevt)
399 mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, FIFO_ENABLE);
400} 392}
401 393
402static void mcasp_stop_rx(struct davinci_audio_dev *dev) 394static void mcasp_stop_rx(struct davinci_audio_dev *dev)
@@ -413,17 +405,17 @@ static void mcasp_stop_tx(struct davinci_audio_dev *dev)
413 405
414static void davinci_mcasp_stop(struct davinci_audio_dev *dev, int stream) 406static void davinci_mcasp_stop(struct davinci_audio_dev *dev, int stream)
415{ 407{
416 if (stream == SNDRV_PCM_STREAM_PLAYBACK) 408 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
409 if (dev->txnumevt) /* disable FIFO */
410 mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
411 FIFO_ENABLE);
417 mcasp_stop_tx(dev); 412 mcasp_stop_tx(dev);
418 else 413 } else {
414 if (dev->rxnumevt) /* disable FIFO */
415 mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
416 FIFO_ENABLE);
419 mcasp_stop_rx(dev); 417 mcasp_stop_rx(dev);
420 418 }
421 /* disable FIFO */
422 if (dev->txnumevt)
423 mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, FIFO_ENABLE);
424
425 if (dev->rxnumevt)
426 mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, FIFO_ENABLE);
427} 419}
428 420
429static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai, 421static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
@@ -720,7 +712,7 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
720{ 712{
721 struct davinci_audio_dev *dev = cpu_dai->private_data; 713 struct davinci_audio_dev *dev = cpu_dai->private_data;
722 struct davinci_pcm_dma_params *dma_params = 714 struct davinci_pcm_dma_params *dma_params =
723 dev->dma_params[substream->stream]; 715 &dev->dma_params[substream->stream];
724 int word_length; 716 int word_length;
725 u8 numevt; 717 u8 numevt;
726 718
@@ -798,7 +790,6 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
798} 790}
799 791
800static struct snd_soc_dai_ops davinci_mcasp_dai_ops = { 792static struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
801 .startup = davinci_mcasp_startup,
802 .trigger = davinci_mcasp_trigger, 793 .trigger = davinci_mcasp_trigger,
803 .hw_params = davinci_mcasp_hw_params, 794 .hw_params = davinci_mcasp_hw_params,
804 .set_fmt = davinci_mcasp_set_dai_fmt, 795 .set_fmt = davinci_mcasp_set_dai_fmt,
@@ -849,20 +840,12 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
849 struct resource *mem, *ioarea, *res; 840 struct resource *mem, *ioarea, *res;
850 struct snd_platform_data *pdata; 841 struct snd_platform_data *pdata;
851 struct davinci_audio_dev *dev; 842 struct davinci_audio_dev *dev;
852 int count = 0;
853 int ret = 0; 843 int ret = 0;
854 844
855 dev = kzalloc(sizeof(struct davinci_audio_dev), GFP_KERNEL); 845 dev = kzalloc(sizeof(struct davinci_audio_dev), GFP_KERNEL);
856 if (!dev) 846 if (!dev)
857 return -ENOMEM; 847 return -ENOMEM;
858 848
859 dma_data = kzalloc(sizeof(struct davinci_pcm_dma_params) * 2,
860 GFP_KERNEL);
861 if (!dma_data) {
862 ret = -ENOMEM;
863 goto err_release_dev;
864 }
865
866 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 849 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
867 if (!mem) { 850 if (!mem) {
868 dev_err(&pdev->dev, "no mem resource?\n"); 851 dev_err(&pdev->dev, "no mem resource?\n");
@@ -897,11 +880,10 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
897 dev->txnumevt = pdata->txnumevt; 880 dev->txnumevt = pdata->txnumevt;
898 dev->rxnumevt = pdata->rxnumevt; 881 dev->rxnumevt = pdata->rxnumevt;
899 882
900 dma_data[count].name = "I2S PCM Stereo out"; 883 dma_data = &dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK];
901 dma_data[count].eventq_no = pdata->eventq_no; 884 dma_data->eventq_no = pdata->eventq_no;
902 dma_data[count].dma_addr = (dma_addr_t) (pdata->tx_dma_offset + 885 dma_data->dma_addr = (dma_addr_t) (pdata->tx_dma_offset +
903 io_v2p(dev->base)); 886 io_v2p(dev->base));
904 dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK] = &dma_data[count];
905 887
906 /* first TX, then RX */ 888 /* first TX, then RX */
907 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 889 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -910,13 +892,12 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
910 goto err_release_region; 892 goto err_release_region;
911 } 893 }
912 894
913 dma_data[count].channel = res->start; 895 dma_data->channel = res->start;
914 count++; 896
915 dma_data[count].name = "I2S PCM Stereo in"; 897 dma_data = &dev->dma_params[SNDRV_PCM_STREAM_CAPTURE];
916 dma_data[count].eventq_no = pdata->eventq_no; 898 dma_data->eventq_no = pdata->eventq_no;
917 dma_data[count].dma_addr = (dma_addr_t)(pdata->rx_dma_offset + 899 dma_data->dma_addr = (dma_addr_t)(pdata->rx_dma_offset +
918 io_v2p(dev->base)); 900 io_v2p(dev->base));
919 dev->dma_params[SNDRV_PCM_STREAM_CAPTURE] = &dma_data[count];
920 901
921 res = platform_get_resource(pdev, IORESOURCE_DMA, 1); 902 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
922 if (!res) { 903 if (!res) {
@@ -924,7 +905,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
924 goto err_release_region; 905 goto err_release_region;
925 } 906 }
926 907
927 dma_data[count].channel = res->start; 908 dma_data->channel = res->start;
928 davinci_mcasp_dai[pdata->op_mode].private_data = dev; 909 davinci_mcasp_dai[pdata->op_mode].private_data = dev;
929 davinci_mcasp_dai[pdata->op_mode].dev = &pdev->dev; 910 davinci_mcasp_dai[pdata->op_mode].dev = &pdev->dev;
930 ret = snd_soc_register_dai(&davinci_mcasp_dai[pdata->op_mode]); 911 ret = snd_soc_register_dai(&davinci_mcasp_dai[pdata->op_mode]);
@@ -936,8 +917,6 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
936err_release_region: 917err_release_region:
937 release_mem_region(mem->start, (mem->end - mem->start) + 1); 918 release_mem_region(mem->start, (mem->end - mem->start) + 1);
938err_release_data: 919err_release_data:
939 kfree(dma_data);
940err_release_dev:
941 kfree(dev); 920 kfree(dev);
942 921
943 return ret; 922 return ret;
@@ -946,7 +925,6 @@ err_release_dev:
946static int davinci_mcasp_remove(struct platform_device *pdev) 925static int davinci_mcasp_remove(struct platform_device *pdev)
947{ 926{
948 struct snd_platform_data *pdata = pdev->dev.platform_data; 927 struct snd_platform_data *pdata = pdev->dev.platform_data;
949 struct davinci_pcm_dma_params *dma_data;
950 struct davinci_audio_dev *dev; 928 struct davinci_audio_dev *dev;
951 struct resource *mem; 929 struct resource *mem;
952 930
@@ -959,8 +937,6 @@ static int davinci_mcasp_remove(struct platform_device *pdev)
959 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 937 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
960 release_mem_region(mem->start, (mem->end - mem->start) + 1); 938 release_mem_region(mem->start, (mem->end - mem->start) + 1);
961 939
962 dma_data = dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK];
963 kfree(dma_data);
964 kfree(dev); 940 kfree(dev);
965 941
966 return 0; 942 return 0;
diff --git a/sound/soc/davinci/davinci-mcasp.h b/sound/soc/davinci/davinci-mcasp.h
index 554354c1cc2f..9d179cc88f7b 100644
--- a/sound/soc/davinci/davinci-mcasp.h
+++ b/sound/soc/davinci/davinci-mcasp.h
@@ -39,10 +39,15 @@ enum {
39}; 39};
40 40
41struct davinci_audio_dev { 41struct davinci_audio_dev {
42 /*
43 * dma_params must be first because rtd->dai->cpu_dai->private_data
44 * is cast to a pointer of an array of struct davinci_pcm_dma_params in
45 * davinci_pcm_open.
46 */
47 struct davinci_pcm_dma_params dma_params[2];
42 void __iomem *base; 48 void __iomem *base;
43 int sample_rate; 49 int sample_rate;
44 struct clk *clk; 50 struct clk *clk;
45 struct davinci_pcm_dma_params *dma_params[2];
46 unsigned int codec_fmt; 51 unsigned int codec_fmt;
47 52
48 /* McASP specific data */ 53 /* McASP specific data */
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
index 2f7da49ed34f..c73a915f233f 100644
--- a/sound/soc/davinci/davinci-pcm.c
+++ b/sound/soc/davinci/davinci-pcm.c
@@ -126,16 +126,9 @@ static void davinci_pcm_dma_irq(unsigned lch, u16 ch_status, void *data)
126static int davinci_pcm_dma_request(struct snd_pcm_substream *substream) 126static int davinci_pcm_dma_request(struct snd_pcm_substream *substream)
127{ 127{
128 struct davinci_runtime_data *prtd = substream->runtime->private_data; 128 struct davinci_runtime_data *prtd = substream->runtime->private_data;
129 struct snd_soc_pcm_runtime *rtd = substream->private_data;
130 struct davinci_pcm_dma_params *dma_data = rtd->dai->cpu_dai->dma_data;
131 struct edmacc_param p_ram; 129 struct edmacc_param p_ram;
132 int ret; 130 int ret;
133 131
134 if (!dma_data)
135 return -ENODEV;
136
137 prtd->params = dma_data;
138
139 /* Request master DMA channel */ 132 /* Request master DMA channel */
140 ret = edma_alloc_channel(prtd->params->channel, 133 ret = edma_alloc_channel(prtd->params->channel,
141 davinci_pcm_dma_irq, substream, 134 davinci_pcm_dma_irq, substream,
@@ -244,6 +237,11 @@ static int davinci_pcm_open(struct snd_pcm_substream *substream)
244 struct snd_pcm_runtime *runtime = substream->runtime; 237 struct snd_pcm_runtime *runtime = substream->runtime;
245 struct davinci_runtime_data *prtd; 238 struct davinci_runtime_data *prtd;
246 int ret = 0; 239 int ret = 0;
240 struct snd_soc_pcm_runtime *rtd = substream->private_data;
241 struct davinci_pcm_dma_params *pa = rtd->dai->cpu_dai->private_data;
242 struct davinci_pcm_dma_params *params = &pa[substream->stream];
243 if (!params)
244 return -ENODEV;
247 245
248 snd_soc_set_runtime_hwparams(substream, &davinci_pcm_hardware); 246 snd_soc_set_runtime_hwparams(substream, &davinci_pcm_hardware);
249 /* ensure that buffer size is a multiple of period size */ 247 /* ensure that buffer size is a multiple of period size */
@@ -257,6 +255,7 @@ static int davinci_pcm_open(struct snd_pcm_substream *substream)
257 return -ENOMEM; 255 return -ENOMEM;
258 256
259 spin_lock_init(&prtd->lock); 257 spin_lock_init(&prtd->lock);
258 prtd->params = params;
260 259
261 runtime->private_data = prtd; 260 runtime->private_data = prtd;
262 261
diff --git a/sound/soc/davinci/davinci-pcm.h b/sound/soc/davinci/davinci-pcm.h
index 63d96253c73a..8746606efc89 100644
--- a/sound/soc/davinci/davinci-pcm.h
+++ b/sound/soc/davinci/davinci-pcm.h
@@ -17,7 +17,6 @@
17 17
18 18
19struct davinci_pcm_dma_params { 19struct davinci_pcm_dma_params {
20 char *name; /* stream identifier */
21 int channel; /* sync dma channel ID */ 20 int channel; /* sync dma channel ID */
22 unsigned short acnt; 21 unsigned short acnt;
23 dma_addr_t dma_addr; /* device physical address for DMA */ 22 dma_addr_t dma_addr; /* device physical address for DMA */
diff --git a/sound/soc/imx/mxc-ssi.c b/sound/soc/imx/mxc-ssi.c
index 3806ff2c0cd4..ccdefe60e752 100644
--- a/sound/soc/imx/mxc-ssi.c
+++ b/sound/soc/imx/mxc-ssi.c
@@ -397,14 +397,6 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai,
397 break; 397 break;
398 } 398 }
399 399
400 /* sync */
401 if (!(fmt & SND_SOC_DAIFMT_ASYNC))
402 scr |= SSI_SCR_SYN;
403
404 /* tdm - only for stereo atm */
405 if (fmt & SND_SOC_DAIFMT_TDM)
406 scr |= SSI_SCR_NET;
407
408 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) { 400 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
409 SSI1_STCR = stcr; 401 SSI1_STCR = stcr;
410 SSI1_SRCR = srcr; 402 SSI1_SRCR = srcr;
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 6375b4ea525d..dcb3181bb340 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -138,7 +138,7 @@ config SND_PXA2XX_SOC_MIOA701
138 138
139config SND_PXA2XX_SOC_IMOTE2 139config SND_PXA2XX_SOC_IMOTE2
140 tristate "SoC Audio support for IMote 2" 140 tristate "SoC Audio support for IMote 2"
141 depends on SND_PXA2XX_SOC && MACH_INTELMOTE2 141 depends on SND_PXA2XX_SOC && MACH_INTELMOTE2 && I2C
142 select SND_PXA2XX_SOC_I2S 142 select SND_PXA2XX_SOC_I2S
143 select SND_SOC_WM8940 143 select SND_SOC_WM8940
144 help 144 help
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index f79711b9fa5b..8de6f9dec4a2 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -524,7 +524,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget)
524 524
525 /* connected jack or spk ? */ 525 /* connected jack or spk ? */
526 if (widget->id == snd_soc_dapm_hp || widget->id == snd_soc_dapm_spk || 526 if (widget->id == snd_soc_dapm_hp || widget->id == snd_soc_dapm_spk ||
527 widget->id == snd_soc_dapm_line) 527 (widget->id == snd_soc_dapm_line && !list_empty(&widget->sources)))
528 return 1; 528 return 1;
529 } 529 }
530 530
@@ -573,7 +573,8 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget)
573 return 1; 573 return 1;
574 574
575 /* connected jack ? */ 575 /* connected jack ? */
576 if (widget->id == snd_soc_dapm_mic || widget->id == snd_soc_dapm_line) 576 if (widget->id == snd_soc_dapm_mic ||
577 (widget->id == snd_soc_dapm_line && !list_empty(&widget->sinks)))
577 return 1; 578 return 1;
578 } 579 }
579 580
diff --git a/sound/usb/usbmixer.c b/sound/usb/usbmixer.c
index ab5a3ac2ac47..9efcfd08d747 100644
--- a/sound/usb/usbmixer.c
+++ b/sound/usb/usbmixer.c
@@ -898,6 +898,11 @@ static struct snd_kcontrol_new usb_feature_unit_ctl = {
898 * build a feature control 898 * build a feature control
899 */ 899 */
900 900
901static size_t append_ctl_name(struct snd_kcontrol *kctl, const char *str)
902{
903 return strlcat(kctl->id.name, str, sizeof(kctl->id.name));
904}
905
901static void build_feature_ctl(struct mixer_build *state, unsigned char *desc, 906static void build_feature_ctl(struct mixer_build *state, unsigned char *desc,
902 unsigned int ctl_mask, int control, 907 unsigned int ctl_mask, int control,
903 struct usb_audio_term *iterm, int unitid) 908 struct usb_audio_term *iterm, int unitid)
@@ -978,13 +983,13 @@ static void build_feature_ctl(struct mixer_build *state, unsigned char *desc,
978 */ 983 */
979 if (! mapped_name && ! (state->oterm.type >> 16)) { 984 if (! mapped_name && ! (state->oterm.type >> 16)) {
980 if ((state->oterm.type & 0xff00) == 0x0100) { 985 if ((state->oterm.type & 0xff00) == 0x0100) {
981 len = strlcat(kctl->id.name, " Capture", sizeof(kctl->id.name)); 986 len = append_ctl_name(kctl, " Capture");
982 } else { 987 } else {
983 len = strlcat(kctl->id.name + len, " Playback", sizeof(kctl->id.name)); 988 len = append_ctl_name(kctl, " Playback");
984 } 989 }
985 } 990 }
986 strlcat(kctl->id.name + len, control == USB_FEATURE_MUTE ? " Switch" : " Volume", 991 append_ctl_name(kctl, control == USB_FEATURE_MUTE ?
987 sizeof(kctl->id.name)); 992 " Switch" : " Volume");
988 if (control == USB_FEATURE_VOLUME) { 993 if (control == USB_FEATURE_VOLUME) {
989 kctl->tlv.c = mixer_vol_tlv; 994 kctl->tlv.c = mixer_vol_tlv;
990 kctl->vd[0].access |= 995 kctl->vd[0].access |=
@@ -1143,7 +1148,7 @@ static void build_mixer_unit_ctl(struct mixer_build *state, unsigned char *desc,
1143 len = get_term_name(state, iterm, kctl->id.name, sizeof(kctl->id.name), 0); 1148 len = get_term_name(state, iterm, kctl->id.name, sizeof(kctl->id.name), 0);
1144 if (! len) 1149 if (! len)
1145 len = sprintf(kctl->id.name, "Mixer Source %d", in_ch + 1); 1150 len = sprintf(kctl->id.name, "Mixer Source %d", in_ch + 1);
1146 strlcat(kctl->id.name + len, " Volume", sizeof(kctl->id.name)); 1151 append_ctl_name(kctl, " Volume");
1147 1152
1148 snd_printdd(KERN_INFO "[%d] MU [%s] ch = %d, val = %d/%d\n", 1153 snd_printdd(KERN_INFO "[%d] MU [%s] ch = %d, val = %d/%d\n",
1149 cval->id, kctl->id.name, cval->channels, cval->min, cval->max); 1154 cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
@@ -1400,8 +1405,8 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, unsigned
1400 if (! len) 1405 if (! len)
1401 strlcpy(kctl->id.name, name, sizeof(kctl->id.name)); 1406 strlcpy(kctl->id.name, name, sizeof(kctl->id.name));
1402 } 1407 }
1403 strlcat(kctl->id.name, " ", sizeof(kctl->id.name)); 1408 append_ctl_name(kctl, " ");
1404 strlcat(kctl->id.name, valinfo->suffix, sizeof(kctl->id.name)); 1409 append_ctl_name(kctl, valinfo->suffix);
1405 1410
1406 snd_printdd(KERN_INFO "[%d] PU [%s] ch = %d, val = %d/%d\n", 1411 snd_printdd(KERN_INFO "[%d] PU [%s] ch = %d, val = %d/%d\n",
1407 cval->id, kctl->id.name, cval->channels, cval->min, cval->max); 1412 cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
@@ -1610,9 +1615,9 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, unsi
1610 strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name)); 1615 strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
1611 1616
1612 if ((state->oterm.type & 0xff00) == 0x0100) 1617 if ((state->oterm.type & 0xff00) == 0x0100)
1613 strlcat(kctl->id.name, " Capture Source", sizeof(kctl->id.name)); 1618 append_ctl_name(kctl, " Capture Source");
1614 else 1619 else
1615 strlcat(kctl->id.name, " Playback Source", sizeof(kctl->id.name)); 1620 append_ctl_name(kctl, " Playback Source");
1616 } 1621 }
1617 1622
1618 snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n", 1623 snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n",
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
index 1c2ed3090cce..a7910099d6fd 100644
--- a/tools/perf/Documentation/perf-timechart.txt
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -31,6 +31,9 @@ OPTIONS
31-w:: 31-w::
32--width=:: 32--width=::
33 Select the width of the SVG file (default: 1000) 33 Select the width of the SVG file (default: 1000)
34-p::
35--power-only::
36 Only output the CPU power section of the diagram
34 37
35 38
36SEE ALSO 39SEE ALSO
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index b5f1953b6144..5881943f0c34 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -728,7 +728,7 @@ $(BUILT_INS): perf$X
728common-cmds.h: util/generate-cmdlist.sh command-list.txt 728common-cmds.h: util/generate-cmdlist.sh command-list.txt
729 729
730common-cmds.h: $(wildcard Documentation/perf-*.txt) 730common-cmds.h: $(wildcard Documentation/perf-*.txt)
731 $(QUIET_GEN)util/generate-cmdlist.sh > $@+ && mv $@+ $@ 731 $(QUIET_GEN). util/generate-cmdlist.sh > $@+ && mv $@+ $@
732 732
733$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh 733$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh
734 $(QUIET_GEN)$(RM) $@ $@+ && \ 734 $(QUIET_GEN)$(RM) $@ $@+ && \
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index a5a050af8e7d..3eeef339c787 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -41,6 +41,7 @@ static int raw_samples = 0;
41static int system_wide = 0; 41static int system_wide = 0;
42static int profile_cpu = -1; 42static int profile_cpu = -1;
43static pid_t target_pid = -1; 43static pid_t target_pid = -1;
44static pid_t child_pid = -1;
44static int inherit = 1; 45static int inherit = 1;
45static int force = 0; 46static int force = 0;
46static int append_file = 0; 47static int append_file = 0;
@@ -184,6 +185,9 @@ static void sig_handler(int sig)
184 185
185static void sig_atexit(void) 186static void sig_atexit(void)
186{ 187{
188 if (child_pid != -1)
189 kill(child_pid, SIGTERM);
190
187 if (signr == -1) 191 if (signr == -1)
188 return; 192 return;
189 193
@@ -610,6 +614,8 @@ static int __cmd_record(int argc, const char **argv)
610 exit(-1); 614 exit(-1);
611 } 615 }
612 } 616 }
617
618 child_pid = pid;
613 } 619 }
614 620
615 if (realtime_prio) { 621 if (realtime_prio) {
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index e5f6ece65a13..3db31e7bf173 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -69,7 +69,8 @@ static int run_idx = 0;
69static int run_count = 1; 69static int run_count = 1;
70static int inherit = 1; 70static int inherit = 1;
71static int scale = 1; 71static int scale = 1;
72static int target_pid = -1; 72static pid_t target_pid = -1;
73static pid_t child_pid = -1;
73static int null_run = 0; 74static int null_run = 0;
74 75
75static int fd[MAX_NR_CPUS][MAX_COUNTERS]; 76static int fd[MAX_NR_CPUS][MAX_COUNTERS];
@@ -285,6 +286,8 @@ static int run_perf_stat(int argc __used, const char **argv)
285 exit(-1); 286 exit(-1);
286 } 287 }
287 288
289 child_pid = pid;
290
288 /* 291 /*
289 * Wait for the child to be ready to exec. 292 * Wait for the child to be ready to exec.
290 */ 293 */
@@ -433,6 +436,9 @@ static void skip_signal(int signo)
433 436
434static void sig_atexit(void) 437static void sig_atexit(void)
435{ 438{
439 if (child_pid != -1)
440 kill(child_pid, SIGTERM);
441
436 if (signr == -1) 442 if (signr == -1)
437 return; 443 return;
438 444
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 4405681b3134..702d8fe58fbc 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -46,6 +46,8 @@ static u64 turbo_frequency;
46 46
47static u64 first_time, last_time; 47static u64 first_time, last_time;
48 48
49static int power_only;
50
49 51
50static struct perf_header *header; 52static struct perf_header *header;
51 53
@@ -547,7 +549,7 @@ static void end_sample_processing(void)
547 u64 cpu; 549 u64 cpu;
548 struct power_event *pwr; 550 struct power_event *pwr;
549 551
550 for (cpu = 0; cpu < numcpus; cpu++) { 552 for (cpu = 0; cpu <= numcpus; cpu++) {
551 pwr = malloc(sizeof(struct power_event)); 553 pwr = malloc(sizeof(struct power_event));
552 if (!pwr) 554 if (!pwr)
553 return; 555 return;
@@ -871,7 +873,7 @@ static int determine_display_tasks(u64 threshold)
871 /* no exit marker, task kept running to the end */ 873 /* no exit marker, task kept running to the end */
872 if (p->end_time == 0) 874 if (p->end_time == 0)
873 p->end_time = last_time; 875 p->end_time = last_time;
874 if (p->total_time >= threshold) 876 if (p->total_time >= threshold && !power_only)
875 p->display = 1; 877 p->display = 1;
876 878
877 c = p->all; 879 c = p->all;
@@ -882,7 +884,7 @@ static int determine_display_tasks(u64 threshold)
882 if (c->start_time == 1) 884 if (c->start_time == 1)
883 c->start_time = first_time; 885 c->start_time = first_time;
884 886
885 if (c->total_time >= threshold) { 887 if (c->total_time >= threshold && !power_only) {
886 c->display = 1; 888 c->display = 1;
887 count++; 889 count++;
888 } 890 }
@@ -1134,6 +1136,8 @@ static const struct option options[] = {
1134 "output file name"), 1136 "output file name"),
1135 OPT_INTEGER('w', "width", &svg_page_width, 1137 OPT_INTEGER('w', "width", &svg_page_width,
1136 "page width"), 1138 "page width"),
1139 OPT_BOOLEAN('p', "power-only", &power_only,
1140 "output power data only"),
1137 OPT_END() 1141 OPT_END()
1138}; 1142};
1139 1143
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1ca88896eee4..37512e936235 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -782,6 +782,7 @@ static const char *skip_symbols[] = {
782 "exit_idle", 782 "exit_idle",
783 "mwait_idle", 783 "mwait_idle",
784 "mwait_idle_with_hints", 784 "mwait_idle_with_hints",
785 "poll_idle",
785 "ppc64_runlatch_off", 786 "ppc64_runlatch_off",
786 "pseries_dedicated_idle_sleep", 787 "pseries_dedicated_idle_sleep",
787 NULL 788 NULL
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index e9d256e2f47d..0c5e4f72f2ba 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -219,10 +219,6 @@ remap:
219more: 219more:
220 event = (event_t *)(buf + head); 220 event = (event_t *)(buf + head);
221 221
222 size = event->header.size;
223 if (!size)
224 size = 8;
225
226 if (head + event->header.size >= page_size * mmap_window) { 222 if (head + event->header.size >= page_size * mmap_window) {
227 unsigned long shift = page_size * (head / page_size); 223 unsigned long shift = page_size * (head / page_size);
228 int res; 224 int res;
@@ -237,7 +233,6 @@ more:
237 233
238 size = event->header.size; 234 size = event->header.size;
239 235
240
241 if (!size || process_event(event, offset, head) < 0) { 236 if (!size || process_event(event, offset, head) < 0) {
242 237
243 /* 238 /*
@@ -290,7 +285,6 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
290 usage_with_options(annotate_usage, options); 285 usage_with_options(annotate_usage, options);
291 } 286 }
292 287
293
294 setup_pager(); 288 setup_pager();
295 289
296 return __cmd_trace(); 290 return __cmd_trace();
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index f1946d107b10..fdd42a824c98 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -455,3 +455,6 @@ will need at least this:
455 455
456If your architecture does have hardware capabilities, you can override the 456If your architecture does have hardware capabilities, you can override the
457weak stub hw_perf_event_init() to register hardware counters. 457weak stub hw_perf_event_init() to register hardware counters.
458
459Architectures that have d-cache aliassing issues, such as Sparc and ARM,
460should select PERF_USE_VMALLOC in order to avoid these for perf mmap().
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index a778fd0f4ae4..856655d8b0b8 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -28,7 +28,7 @@ static u64 turbo_frequency, max_freq;
28 28
29int svg_page_width = 1000; 29int svg_page_width = 1000;
30 30
31#define MIN_TEXT_SIZE 0.001 31#define MIN_TEXT_SIZE 0.01
32 32
33static u64 total_height; 33static u64 total_height;
34static FILE *svgfile; 34static FILE *svgfile;
@@ -217,6 +217,18 @@ static char *cpu_model(void)
217 } 217 }
218 fclose(file); 218 fclose(file);
219 } 219 }
220
221 /* CPU type */
222 file = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies", "r");
223 if (file) {
224 while (fgets(buf, 255, file)) {
225 unsigned int freq;
226 freq = strtoull(buf, NULL, 10);
227 if (freq > max_freq)
228 max_freq = freq;
229 }
230 fclose(file);
231 }
220 return cpu_m; 232 return cpu_m;
221} 233}
222 234
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 559fb06210f5..47ea0609a760 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -324,8 +324,7 @@ static inline int elf_sym__is_function(const GElf_Sym *sym)
324{ 324{
325 return elf_sym__type(sym) == STT_FUNC && 325 return elf_sym__type(sym) == STT_FUNC &&
326 sym->st_name != 0 && 326 sym->st_name != 0 &&
327 sym->st_shndx != SHN_UNDEF && 327 sym->st_shndx != SHN_UNDEF;
328 sym->st_size != 0;
329} 328}
330 329
331static inline int elf_sym__is_label(const GElf_Sym *sym) 330static inline int elf_sym__is_label(const GElf_Sym *sym)
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index f6a8437141c8..55b41b9e3834 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -1968,10 +1968,11 @@ static const struct flag flags[] = {
1968 { "NET_TX_SOFTIRQ", 2 }, 1968 { "NET_TX_SOFTIRQ", 2 },
1969 { "NET_RX_SOFTIRQ", 3 }, 1969 { "NET_RX_SOFTIRQ", 3 },
1970 { "BLOCK_SOFTIRQ", 4 }, 1970 { "BLOCK_SOFTIRQ", 4 },
1971 { "TASKLET_SOFTIRQ", 5 }, 1971 { "BLOCK_IOPOLL_SOFTIRQ", 5 },
1972 { "SCHED_SOFTIRQ", 6 }, 1972 { "TASKLET_SOFTIRQ", 6 },
1973 { "HRTIMER_SOFTIRQ", 7 }, 1973 { "SCHED_SOFTIRQ", 7 },
1974 { "RCU_SOFTIRQ", 8 }, 1974 { "HRTIMER_SOFTIRQ", 8 },
1975 { "RCU_SOFTIRQ", 9 },
1975 1976
1976 { "HRTIMER_NORESTART", 0 }, 1977 { "HRTIMER_NORESTART", 0 },
1977 { "HRTIMER_RESTART", 1 }, 1978 { "HRTIMER_RESTART", 1 },
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b5e7e3f1183f..b7c78a403dc2 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -850,6 +850,19 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
850 850
851} 851}
852 852
853static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
854 struct mm_struct *mm,
855 unsigned long address,
856 pte_t pte)
857{
858 struct kvm *kvm = mmu_notifier_to_kvm(mn);
859
860 spin_lock(&kvm->mmu_lock);
861 kvm->mmu_notifier_seq++;
862 kvm_set_spte_hva(kvm, address, pte);
863 spin_unlock(&kvm->mmu_lock);
864}
865
853static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 866static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
854 struct mm_struct *mm, 867 struct mm_struct *mm,
855 unsigned long start, 868 unsigned long start,
@@ -929,6 +942,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
929 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, 942 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
930 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, 943 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
931 .clear_flush_young = kvm_mmu_notifier_clear_flush_young, 944 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
945 .change_pte = kvm_mmu_notifier_change_pte,
932 .release = kvm_mmu_notifier_release, 946 .release = kvm_mmu_notifier_release,
933}; 947};
934#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ 948#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
@@ -2625,7 +2639,7 @@ static int vcpu_stat_get(void *_offset, u64 *val)
2625 2639
2626DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n"); 2640DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2627 2641
2628static struct file_operations *stat_fops[] = { 2642static const struct file_operations *stat_fops[] = {
2629 [KVM_STAT_VCPU] = &vcpu_stat_fops, 2643 [KVM_STAT_VCPU] = &vcpu_stat_fops,
2630 [KVM_STAT_VM] = &vm_stat_fops, 2644 [KVM_STAT_VM] = &vm_stat_fops,
2631}; 2645};