aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/cpu-freq/governors.txt4
-rw-r--r--Documentation/filesystems/sharedsubtree.txt16
-rw-r--r--Documentation/kernel-parameters.txt9
-rw-r--r--Documentation/networking/ip-sysctl.txt8
-rw-r--r--MAINTAINERS22
-rw-r--r--Makefile2
-rw-r--r--arch/arm/include/asm/cacheflush.h3
-rw-r--r--arch/arm/kernel/setup.c1
-rw-r--r--arch/arm/mach-gemini/gpio.c4
-rw-r--r--arch/arm/mach-omap2/mmc-twl4030.c7
-rw-r--r--arch/arm/mach-omap2/mux.c12
-rw-r--r--arch/arm/mm/alignment.c3
-rw-r--r--arch/arm/tools/mach-types46
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c15
-rw-r--r--arch/ia64/include/asm/acpi.h1
-rw-r--r--arch/ia64/include/asm/elf.h4
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/microblaze/include/asm/io.h2
-rw-r--r--arch/microblaze/kernel/cpu/cache.c27
-rw-r--r--arch/microblaze/kernel/setup.c1
-rw-r--r--arch/mips/bcm47xx/prom.c8
-rw-r--r--arch/mips/configs/ip27_defconfig917
-rw-r--r--arch/mips/kernel/cpu-probe.c4
-rw-r--r--arch/mips/kernel/traps.c1
-rw-r--r--arch/mips/mm/c-octeon.c4
-rw-r--r--arch/mips/mm/cache.c2
-rw-r--r--arch/mips/mm/highmem.c1
-rw-r--r--arch/mips/sni/rm200.c2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/kernel/pci.c7
-rw-r--r--arch/parisc/kernel/signal.c4
-rw-r--r--arch/powerpc/mm/tlb_hash64.c12
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c3
-rw-r--r--arch/powerpc/platforms/85xx/smp.c21
-rw-r--r--arch/powerpc/platforms/pseries/xics.c14
-rw-r--r--arch/s390/hypfs/inode.c42
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S3
-rw-r--r--arch/sh/kernel/dwarf.c20
-rw-r--r--arch/sh/kernel/entry-common.S8
-rw-r--r--arch/sh/kernel/ptrace_64.c11
-rw-r--r--arch/sh/kernel/signal_64.c4
-rw-r--r--arch/sparc/include/asm/stat.h4
-rw-r--r--arch/sparc/kernel/kstack.h4
-rw-r--r--arch/sparc/kernel/of_device_32.c2
-rw-r--r--arch/sparc/kernel/pci.c7
-rw-r--r--arch/sparc/kernel/process_32.c2
-rw-r--r--arch/sparc/kernel/process_64.c8
-rw-r--r--arch/sparc/kernel/signal32.c10
-rw-r--r--arch/sparc/kernel/signal_32.c6
-rw-r--r--arch/sparc/kernel/signal_64.c8
-rw-r--r--arch/sparc/kernel/tsb.S6
-rw-r--r--arch/um/drivers/mconsole_kern.c2
-rw-r--r--arch/x86/include/asm/elf.h5
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/system.h4
-rw-r--r--arch/x86/kernel/acpi/boot.c13
-rw-r--r--arch/x86/kernel/apic/apic.c17
-rw-r--r--arch/x86/kernel/apic/probe_32.c29
-rw-r--r--arch/x86/kernel/apic/probe_64.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c3
-rw-r--r--arch/x86/kernel/hw_breakpoint.c30
-rw-r--r--arch/x86/kernel/mpparse.c7
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/ptrace.c7
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kvm/i8254.c3
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--block/blk-core.c11
-rw-r--r--block/cfq-iosched.c49
-rw-r--r--drivers/acpi/dock.c1
-rw-r--r--drivers/acpi/processor_idle.c36
-rw-r--r--drivers/acpi/processor_pdc.c14
-rw-r--r--drivers/acpi/processor_perflib.c6
-rw-r--r--drivers/acpi/scan.c27
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/block/cciss.c3
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c1
-rw-r--r--drivers/char/agp/amd64-agp.c5
-rw-r--r--drivers/char/tpm/tpm_infineon.c79
-rw-r--r--drivers/clocksource/cs5535-clockevt.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c3
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/ipu/ipu_idmac.c25
-rw-r--r--drivers/edac/amd64_edac.c15
-rw-r--r--drivers/edac/mpc85xx_edac.c8
-rw-r--r--drivers/firewire/net.c53
-rw-r--r--drivers/firewire/ohci.c13
-rw-r--r--drivers/gpu/drm/drm_edid.c47
-rw-r--r--drivers/gpu/drm/drm_mm.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c144
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c27
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c16
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c245
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c155
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c113
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c49
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c58
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c1
-rw-r--r--drivers/gpu/drm/radeon/Kconfig12
-rw-r--r--drivers/gpu/drm/radeon/atom.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c10
-rw-r--r--drivers/gpu/drm/radeon/r600.c8
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c107
-rw-r--r--drivers/gpu/drm/radeon/rv770.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c108
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c16
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c12
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/cma.c4
-rw-r--r--drivers/infiniband/core/uverbs.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c25
-rw-r--r--drivers/infiniband/core/uverbs_main.c82
-rw-r--r--drivers/input/input-polldev.c6
-rw-r--r--drivers/input/mouse/psmouse-base.c9
-rw-r--r--drivers/input/serio/i8042.c8
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c8
-rw-r--r--drivers/md/dm-log-userspace-transfer.c10
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-region-hash.c5
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-sysfs.c8
-rw-r--r--drivers/md/dm.c21
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c20
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig4
-rw-r--r--drivers/media/dvb/frontends/l64781.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c1
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c8
-rw-r--r--drivers/media/video/bt8xx/bttvp.h1
-rw-r--r--drivers/media/video/mt9t112.c2
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c2
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/mmc/card/mmc_test.c9
-rw-r--r--drivers/net/ax88796.c2
-rw-r--r--drivers/net/benet/be_cmds.c1
-rw-r--r--drivers/net/cxgb3/sge.c20
-rw-r--r--drivers/net/e1000/e1000_main.c19
-rw-r--r--drivers/net/igb/igb_main.c20
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c22
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c11
-rw-r--r--drivers/net/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/sfc/efx.c1
-rw-r--r--drivers/net/sfc/falcon_boards.c45
-rw-r--r--drivers/net/sfc/mcdi.c2
-rw-r--r--drivers/net/sfc/qt202x_phy.c2
-rw-r--r--drivers/net/sky2.c8
-rw-r--r--drivers/net/tc35815.c1
-rw-r--r--drivers/net/usb/cdc_ether.c5
-rw-r--r--drivers/net/via-velocity.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c4
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/main.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c22
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c6
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/power/wm97xx_battery.c10
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/lp3971.c4
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/scsi/zfcp_fc.c9
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c18
-rw-r--r--drivers/scsi/fcoe/libfcoe.c2
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c3
-rw-r--r--drivers/scsi/libfc/fc_lport.c3
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/libiscsi_tcp.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c8
-rw-r--r--drivers/serial/8250.c7
-rw-r--r--drivers/ssb/main.c3
-rw-r--r--drivers/usb/core/devio.c48
-rw-r--r--drivers/usb/gadget/f_eem.c3
-rw-r--r--drivers/usb/gadget/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/file_storage.c2
-rw-r--r--drivers/usb/gadget/multi.c2
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c1
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c1
-rw-r--r--drivers/usb/host/ehci-hub.c13
-rw-r--r--drivers/usb/host/fhci-tds.c6
-rw-r--r--drivers/usb/host/r8a66597-hcd.c41
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/otg/Kconfig1
-rw-r--r--drivers/usb/serial/ftdi_sio.c25
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h18
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/video/efifb.c11
-rw-r--r--drivers/watchdog/bfin_wdt.c13
-rw-r--r--fs/9p/v9fs.c33
-rw-r--r--fs/9p/v9fs_vfs.h1
-rw-r--r--fs/9p/vfs_file.c19
-rw-r--r--fs/9p/vfs_inode.c2
-rw-r--r--fs/autofs4/autofs_i.h7
-rw-r--r--fs/autofs4/dev-ioctl.c11
-rw-r--r--fs/autofs4/expire.c6
-rw-r--r--fs/autofs4/inode.c63
-rw-r--r--fs/autofs4/root.c474
-rw-r--r--fs/btrfs/file.c6
-rw-r--r--fs/cachefiles/namei.c12
-rw-r--r--fs/cifs/CHANGES4
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/connect.c30
-rw-r--r--fs/cifs/file.c4
-rw-r--r--fs/cifs/inode.c12
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cifs/sess.c11
-rw-r--r--fs/compat_ioctl.c9
-rw-r--r--fs/dcache.c70
-rw-r--r--fs/exec.c20
-rw-r--r--fs/ext4/file.c6
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/ops_inode.c113
-rw-r--r--fs/hpfs/anode.c2
-rw-r--r--fs/hpfs/dentry.c14
-rw-r--r--fs/hpfs/dir.c14
-rw-r--r--fs/hpfs/dnode.c21
-rw-r--r--fs/hpfs/ea.c7
-rw-r--r--fs/hpfs/hpfs_fn.h30
-rw-r--r--fs/hpfs/inode.c4
-rw-r--r--fs/hpfs/map.c6
-rw-r--r--fs/hpfs/name.c21
-rw-r--r--fs/hpfs/namei.c75
-rw-r--r--fs/hppfs/hppfs.c2
-rw-r--r--fs/internal.h2
-rw-r--r--fs/libfs.c77
-rw-r--r--fs/locks.c5
-rw-r--r--fs/namei.c50
-rw-r--r--fs/namespace.c53
-rw-r--r--fs/nfs/direct.c3
-rw-r--r--fs/nfs/fscache.c9
-rw-r--r--fs/nfs/inode.c8
-rw-r--r--fs/nfs/mount_clnt.c2
-rw-r--r--fs/nfs/nfs2xdr.c2
-rw-r--r--fs/nfs/nfs4proc.c8
-rw-r--r--fs/nfs/nfs4xdr.c6
-rw-r--r--fs/nfs/write.c3
-rw-r--r--fs/nfsctl.c5
-rw-r--r--fs/nfsd/export.c10
-rw-r--r--fs/nfsd/nfs4xdr.c12
-rw-r--r--fs/nfsd/vfs.c7
-rw-r--r--fs/nilfs2/dir.c14
-rw-r--r--fs/nilfs2/namei.c13
-rw-r--r--fs/nilfs2/nilfs.h4
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/buffer_head_io.c2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c6
-rw-r--r--fs/ocfs2/cluster/tcp.c10
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h4
-rw-r--r--fs/ocfs2/dlm/dlmapi.h2
-rw-r--r--fs/ocfs2/dlm/dlmast.c2
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c2
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlm/dlmlock.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c38
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c147
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c8
-rw-r--r--fs/ocfs2/dlmglue.c85
-rw-r--r--fs/ocfs2/export.c2
-rw-r--r--fs/ocfs2/extent_map.c2
-rw-r--r--fs/ocfs2/file.c18
-rw-r--r--fs/ocfs2/inode.c4
-rw-r--r--fs/ocfs2/ioctl.c14
-rw-r--r--fs/ocfs2/journal.c2
-rw-r--r--fs/ocfs2/ocfs2.h4
-rw-r--r--fs/ocfs2/ocfs2_fs.h11
-rw-r--r--fs/ocfs2/refcounttree.c12
-rw-r--r--fs/ocfs2/stack_o2cb.c12
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/ocfs2/symlink.c10
-rw-r--r--fs/ocfs2/uptodate.c4
-rw-r--r--fs/open.c2
-rw-r--r--fs/pnode.c28
-rw-r--r--fs/pnode.h5
-rw-r--r--fs/proc/base.c34
-rw-r--r--fs/proc/generic.c5
-rw-r--r--fs/proc/root.c6
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/super.c21
-rw-r--r--fs/sysfs/inode.c35
-rw-r--r--fs/udf/balloc.c2
-rw-r--r--fs/udf/dir.c4
-rw-r--r--fs/udf/inode.c2
-rw-r--r--fs/udf/namei.c20
-rw-r--r--fs/udf/symlink.c10
-rw-r--r--fs/ufs/dir.c10
-rw-r--r--fs/ufs/ufs.h4
-rw-r--r--include/drm/nouveau_drm.h1
-rw-r--r--include/drm/vmwgfx_drm.h20
-rw-r--r--include/linux/amba/bus.h6
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/fs.h28
-rw-r--r--include/linux/hw_breakpoint.h2
-rw-r--r--include/linux/input.h1
-rw-r--r--include/linux/kfifo.h2
-rw-r--r--include/linux/mnt_namespace.h1
-rw-r--r--include/linux/mount.h14
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/net/netns/conntrack.h3
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--init/do_mounts_initrd.c4
-rw-r--r--init/main.c11
-rw-r--r--ipc/mqueue.c120
-rw-r--r--kernel/audit_tree.c100
-rw-r--r--kernel/hw_breakpoint.c2
-rw-r--r--kernel/kfifo.c3
-rw-r--r--kernel/perf_event.c13
-rw-r--r--kernel/softirq.c15
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/sysctl_binary.c7
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--kernel/trace/trace_stack.c24
-rw-r--r--lib/idr.c4
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/migrate.c36
-rw-r--r--mm/oom_kill.c2
-rw-r--r--net/9p/client.c53
-rw-r--r--net/9p/trans_fd.c10
-rw-r--r--net/9p/trans_rdma.c9
-rw-r--r--net/9p/trans_virtio.c4
-rw-r--r--net/bluetooth/hci_conn.c3
-rw-r--r--net/bluetooth/hci_event.c1
-rw-r--r--net/bluetooth/hidp/core.c49
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bluetooth/rfcomm/core.c8
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/net-sysfs.c3
-rw-r--r--net/core/pktgen.c1
-rw-r--r--net/dccp/ccid.c2
-rw-r--r--net/dccp/ccid.h8
-rw-r--r--net/dccp/probe.c4
-rw-r--r--net/ipv4/devinet.c7
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/ipcomp.c6
-rw-r--r--net/ipv4/netfilter/arp_tables.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c4
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c22
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv6/addrconf.c16
-rw-r--r--net/ipv6/ipcomp6.c6
-rw-r--r--net/ipv6/netfilter/ip6_tables.c4
-rw-r--r--net/irda/irnet/irnet_ppp.c5
-rw-r--r--net/key/af_key.c15
-rw-r--r--net/mac80211/ibss.c2
-rw-r--r--net/mac80211/rate.c3
-rw-r--r--net/mac80211/scan.c18
-rw-r--r--net/netfilter/nf_conntrack_core.c116
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c2
-rw-r--r--net/netfilter/nf_conntrack_standalone.c7
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/sched/Kconfig16
-rw-r--r--net/sunrpc/rpc_pipe.c9
-rw-r--r--net/xfrm/xfrm_state.c12
-rw-r--r--security/smack/smack_lsm.c2
-rw-r--r--security/tomoyo/realpath.c25
-rw-r--r--sound/pci/hda/hda_intel.c5
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/util/event.c4
-rw-r--r--tools/perf/util/probe-event.c3
441 files changed, 4474 insertions, 2925 deletions
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index aed082f49d09..737988fca64d 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -145,8 +145,8 @@ show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT.
145up_threshold: defines what the average CPU usage between the samplings 145up_threshold: defines what the average CPU usage between the samplings
146of 'sampling_rate' needs to be for the kernel to make a decision on 146of 'sampling_rate' needs to be for the kernel to make a decision on
147whether it should increase the frequency. For example when it is set 147whether it should increase the frequency. For example when it is set
148to its default value of '80' it means that between the checking 148to its default value of '95' it means that between the checking
149intervals the CPU needs to be on average more than 80% in use to then 149intervals the CPU needs to be on average more than 95% in use to then
150decide that the CPU frequency needs to be increased. 150decide that the CPU frequency needs to be increased.
151 151
152ignore_nice_load: this parameter takes a value of '0' or '1'. When 152ignore_nice_load: this parameter takes a value of '0' or '1'. When
diff --git a/Documentation/filesystems/sharedsubtree.txt b/Documentation/filesystems/sharedsubtree.txt
index 23a181074f94..fc0e39af43c3 100644
--- a/Documentation/filesystems/sharedsubtree.txt
+++ b/Documentation/filesystems/sharedsubtree.txt
@@ -837,6 +837,9 @@ replicas continue to be exactly same.
837 individual lists does not affect propagation or the way propagation 837 individual lists does not affect propagation or the way propagation
838 tree is modified by operations. 838 tree is modified by operations.
839 839
840 All vfsmounts in a peer group have the same ->mnt_master. If it is
841 non-NULL, they form a contiguous (ordered) segment of slave list.
842
840 A example propagation tree looks as shown in the figure below. 843 A example propagation tree looks as shown in the figure below.
841 [ NOTE: Though it looks like a forest, if we consider all the shared 844 [ NOTE: Though it looks like a forest, if we consider all the shared
842 mounts as a conceptual entity called 'pnode', it becomes a tree] 845 mounts as a conceptual entity called 'pnode', it becomes a tree]
@@ -874,8 +877,19 @@ replicas continue to be exactly same.
874 877
875 NOTE: The propagation tree is orthogonal to the mount tree. 878 NOTE: The propagation tree is orthogonal to the mount tree.
876 879
8808B Locking:
881
882 ->mnt_share, ->mnt_slave, ->mnt_slave_list, ->mnt_master are protected
883 by namespace_sem (exclusive for modifications, shared for reading).
884
885 Normally we have ->mnt_flags modifications serialized by vfsmount_lock.
886 There are two exceptions: do_add_mount() and clone_mnt().
887 The former modifies a vfsmount that has not been visible in any shared
888 data structures yet.
889 The latter holds namespace_sem and the only references to vfsmount
890 are in lists that can't be traversed without namespace_sem.
877 891
8788B Algorithm: 8928C Algorithm:
879 893
880 The crux of the implementation resides in rbind/move operation. 894 The crux of the implementation resides in rbind/move operation.
881 895
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 736d45602886..e7848a0d99eb 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -199,6 +199,10 @@ and is between 256 and 4096 characters. It is defined in the file
199 acpi_display_output=video 199 acpi_display_output=video
200 See above. 200 See above.
201 201
202 acpi_early_pdc_eval [HW,ACPI] Evaluate processor _PDC methods
203 early. Needed on some platforms to properly
204 initialize the EC.
205
202 acpi_irq_balance [HW,ACPI] 206 acpi_irq_balance [HW,ACPI]
203 ACPI will balance active IRQs 207 ACPI will balance active IRQs
204 default in APIC mode 208 default in APIC mode
@@ -311,6 +315,11 @@ and is between 256 and 4096 characters. It is defined in the file
311 aic79xx= [HW,SCSI] 315 aic79xx= [HW,SCSI]
312 See Documentation/scsi/aic79xx.txt. 316 See Documentation/scsi/aic79xx.txt.
313 317
318 alignment= [KNL,ARM]
319 Allow the default userspace alignment fault handler
320 behaviour to be specified. Bit 0 enables warnings,
321 bit 1 enables fixups, and bit 2 sends a segfault.
322
314 amd_iommu= [HW,X86-84] 323 amd_iommu= [HW,X86-84]
315 Pass parameters to the AMD IOMMU driver in the system. 324 Pass parameters to the AMD IOMMU driver in the system.
316 Possible values are: 325 Possible values are:
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 006b39dec87d..e87f3cdc8a6a 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1074,10 +1074,10 @@ regen_max_retry - INTEGER
1074 Default: 5 1074 Default: 5
1075 1075
1076max_addresses - INTEGER 1076max_addresses - INTEGER
1077 Number of maximum addresses per interface. 0 disables limitation. 1077 Maximum number of autoconfigured addresses per interface. Setting
1078 It is recommended not set too large value (or 0) because it would 1078 to zero disables the limitation. It is not recommended to set this
1079 be too easy way to crash kernel to allow to create too much of 1079 value too large (or to zero) because it would be an easy way to
1080 autoconfigured addresses. 1080 crash the kernel by allowing too many addresses to be created.
1081 Default: 16 1081 Default: 16
1082 1082
1083disable_ipv6 - BOOLEAN 1083disable_ipv6 - BOOLEAN
diff --git a/MAINTAINERS b/MAINTAINERS
index 03f38c18f323..2533fc45a44a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -616,10 +616,10 @@ M: Richard Purdie <rpurdie@rpsys.net>
616S: Maintained 616S: Maintained
617 617
618ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE 618ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
619M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt> 619M: Paulius Zaleckas <paulius.zaleckas@gmail.com>
620L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 620L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
621T: git git://gitorious.org/linux-gemini/mainline.git 621T: git git://gitorious.org/linux-gemini/mainline.git
622S: Maintained 622S: Odd Fixes
623F: arch/arm/mach-gemini/ 623F: arch/arm/mach-gemini/
624 624
625ARM/EBSA110 MACHINE SUPPORT 625ARM/EBSA110 MACHINE SUPPORT
@@ -641,9 +641,9 @@ T: topgit git://git.openezx.org/openezx.git
641F: arch/arm/mach-pxa/ezx.c 641F: arch/arm/mach-pxa/ezx.c
642 642
643ARM/FARADAY FA526 PORT 643ARM/FARADAY FA526 PORT
644M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt> 644M: Paulius Zaleckas <paulius.zaleckas@gmail.com>
645L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 645L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
646S: Maintained 646S: Odd Fixes
647F: arch/arm/mm/*-fa* 647F: arch/arm/mm/*-fa*
648 648
649ARM/FOOTBRIDGE ARCHITECTURE 649ARM/FOOTBRIDGE ARCHITECTURE
@@ -1733,10 +1733,9 @@ F: include/linux/tfrc.h
1733F: net/dccp/ 1733F: net/dccp/
1734 1734
1735DECnet NETWORK LAYER 1735DECnet NETWORK LAYER
1736M: Christine Caulfield <christine.caulfield@googlemail.com>
1737W: http://linux-decnet.sourceforge.net 1736W: http://linux-decnet.sourceforge.net
1738L: linux-decnet-user@lists.sourceforge.net 1737L: linux-decnet-user@lists.sourceforge.net
1739S: Maintained 1738S: Orphan
1740F: Documentation/networking/decnet.txt 1739F: Documentation/networking/decnet.txt
1741F: net/decnet/ 1740F: net/decnet/
1742 1741
@@ -3411,8 +3410,10 @@ S: Maintained
3411F: drivers/scsi/sym53c8xx_2/ 3410F: drivers/scsi/sym53c8xx_2/
3412 3411
3413LTP (Linux Test Project) 3412LTP (Linux Test Project)
3414M: Subrata Modak <subrata@linux.vnet.ibm.com> 3413M: Rishikesh K Rajak <risrajak@linux.vnet.ibm.com>
3415M: Mike Frysinger <vapier@gentoo.org> 3414M: Garrett Cooper <yanegomi@gmail.com>
3415M: Mike Frysinger <vapier@gentoo.org>
3416M: Subrata Modak <subrata@linux.vnet.ibm.com>
3416L: ltp-list@lists.sourceforge.net (subscribers-only) 3417L: ltp-list@lists.sourceforge.net (subscribers-only)
3417W: http://ltp.sourceforge.net/ 3418W: http://ltp.sourceforge.net/
3418T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git 3419T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git
@@ -3488,9 +3489,9 @@ S: Maintained
3488F: drivers/net/wireless/libertas/ 3489F: drivers/net/wireless/libertas/
3489 3490
3490MARVELL MV643XX ETHERNET DRIVER 3491MARVELL MV643XX ETHERNET DRIVER
3491M: Lennert Buytenhek <buytenh@marvell.com> 3492M: Lennert Buytenhek <buytenh@wantstofly.org>
3492L: netdev@vger.kernel.org 3493L: netdev@vger.kernel.org
3493S: Supported 3494S: Maintained
3494F: drivers/net/mv643xx_eth.* 3495F: drivers/net/mv643xx_eth.*
3495F: include/linux/mv643xx.h 3496F: include/linux/mv643xx.h
3496 3497
@@ -3836,6 +3837,7 @@ NETWORKING DRIVERS
3836L: netdev@vger.kernel.org 3837L: netdev@vger.kernel.org
3837W: http://www.linuxfoundation.org/en/Net 3838W: http://www.linuxfoundation.org/en/Net
3838T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git 3839T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
3840T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
3839S: Odd Fixes 3841S: Odd Fixes
3840F: drivers/net/ 3842F: drivers/net/
3841F: include/linux/if_* 3843F: include/linux/if_*
diff --git a/Makefile b/Makefile
index f8e02e9491d0..1b24895212d8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 33 3SUBLEVEL = 33
4EXTRAVERSION = -rc7 4EXTRAVERSION =
5NAME = Man-Eating Seals of Antiquity 5NAME = Man-Eating Seals of Antiquity
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index c77d2fa1f6e5..8113bb5fb66e 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -42,7 +42,8 @@
42#endif 42#endif
43 43
44#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ 44#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
45 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) 45 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
46 defined(CONFIG_CPU_ARM1026)
46# define MULTI_CACHE 1 47# define MULTI_CACHE 1
47#endif 48#endif
48 49
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index c6c57b640b6b..621acad8ea43 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -102,6 +102,7 @@ struct cpu_cache_fns cpu_cache;
102#endif 102#endif
103#ifdef CONFIG_OUTER_CACHE 103#ifdef CONFIG_OUTER_CACHE
104struct outer_cache_fns outer_cache; 104struct outer_cache_fns outer_cache;
105EXPORT_SYMBOL(outer_cache);
105#endif 106#endif
106 107
107struct stack { 108struct stack {
diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c
index e7263854bc7b..fe3bd5ac8b10 100644
--- a/arch/arm/mach-gemini/gpio.c
+++ b/arch/arm/mach-gemini/gpio.c
@@ -86,7 +86,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
86 unsigned int reg_both, reg_level, reg_type; 86 unsigned int reg_both, reg_level, reg_type;
87 87
88 reg_type = __raw_readl(base + GPIO_INT_TYPE); 88 reg_type = __raw_readl(base + GPIO_INT_TYPE);
89 reg_level = __raw_readl(base + GPIO_INT_BOTH_EDGE); 89 reg_level = __raw_readl(base + GPIO_INT_LEVEL);
90 reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE); 90 reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE);
91 91
92 switch (type) { 92 switch (type) {
@@ -117,7 +117,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
117 } 117 }
118 118
119 __raw_writel(reg_type, base + GPIO_INT_TYPE); 119 __raw_writel(reg_type, base + GPIO_INT_TYPE);
120 __raw_writel(reg_level, base + GPIO_INT_BOTH_EDGE); 120 __raw_writel(reg_level, base + GPIO_INT_LEVEL);
121 __raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE); 121 __raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE);
122 122
123 gpio_ack_irq(irq); 123 gpio_ack_irq(irq);
diff --git a/arch/arm/mach-omap2/mmc-twl4030.c b/arch/arm/mach-omap2/mmc-twl4030.c
index 0c3c72d934bf..8afe9dd3f150 100644
--- a/arch/arm/mach-omap2/mmc-twl4030.c
+++ b/arch/arm/mach-omap2/mmc-twl4030.c
@@ -408,6 +408,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
408{ 408{
409 struct twl4030_hsmmc_info *c; 409 struct twl4030_hsmmc_info *c;
410 int nr_hsmmc = ARRAY_SIZE(hsmmc_data); 410 int nr_hsmmc = ARRAY_SIZE(hsmmc_data);
411 int i;
411 412
412 if (cpu_is_omap2430()) { 413 if (cpu_is_omap2430()) {
413 control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE; 414 control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
@@ -434,7 +435,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
434 mmc = kzalloc(sizeof(struct omap_mmc_platform_data), GFP_KERNEL); 435 mmc = kzalloc(sizeof(struct omap_mmc_platform_data), GFP_KERNEL);
435 if (!mmc) { 436 if (!mmc) {
436 pr_err("Cannot allocate memory for mmc device!\n"); 437 pr_err("Cannot allocate memory for mmc device!\n");
437 return; 438 goto done;
438 } 439 }
439 440
440 if (c->name) 441 if (c->name)
@@ -532,6 +533,10 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
532 continue; 533 continue;
533 c->dev = mmc->dev; 534 c->dev = mmc->dev;
534 } 535 }
536
537done:
538 for (i = 0; i < nr_hsmmc; i++)
539 kfree(hsmmc_data[i]);
535} 540}
536 541
537#endif 542#endif
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 5fedc50c58e4..5fef73f4743d 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -961,16 +961,14 @@ static void __init omap_mux_init_list(struct omap_mux *superset)
961 while (superset->reg_offset != OMAP_MUX_TERMINATOR) { 961 while (superset->reg_offset != OMAP_MUX_TERMINATOR) {
962 struct omap_mux *entry; 962 struct omap_mux *entry;
963 963
964#ifndef CONFIG_OMAP_MUX 964#ifdef CONFIG_OMAP_MUX
965 /* Skip pins that are not muxed as GPIO by bootloader */ 965 if (!superset->muxnames || !superset->muxnames[0]) {
966 if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) {
967 superset++; 966 superset++;
968 continue; 967 continue;
969 } 968 }
970#endif 969#else
971 970 /* Skip pins that are not muxed as GPIO by bootloader */
972#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS) 971 if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) {
973 if (!superset->muxnames || !superset->muxnames[0]) {
974 superset++; 972 superset++;
975 continue; 973 continue;
976 } 974 }
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index b270d6228fe2..62820eda84d9 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -11,6 +11,7 @@
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14#include <linux/moduleparam.h>
14#include <linux/compiler.h> 15#include <linux/compiler.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/errno.h> 17#include <linux/errno.h>
@@ -77,6 +78,8 @@ static unsigned long ai_dword;
77static unsigned long ai_multi; 78static unsigned long ai_multi;
78static int ai_usermode; 79static int ai_usermode;
79 80
81core_param(alignment, ai_usermode, int, 0600);
82
80#define UM_WARN (1 << 0) 83#define UM_WARN (1 << 0)
81#define UM_FIXUP (1 << 1) 84#define UM_FIXUP (1 << 1)
82#define UM_SIGNAL (1 << 2) 85#define UM_SIGNAL (1 << 2)
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 5a79fc6ee818..31c2f4c30a95 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
12# 12#
13# http://www.arm.linux.org.uk/developer/machines/?action=new 13# http://www.arm.linux.org.uk/developer/machines/?action=new
14# 14#
15# Last update: Thu Jan 28 22:15:54 2010 15# Last update: Sat Feb 20 14:16:15 2010
16# 16#
17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number 17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
18# 18#
@@ -2257,7 +2257,7 @@ oratisalog MACH_ORATISALOG ORATISALOG 2268
2257oratismadi MACH_ORATISMADI ORATISMADI 2269 2257oratismadi MACH_ORATISMADI ORATISMADI 2269
2258oratisot16 MACH_ORATISOT16 ORATISOT16 2270 2258oratisot16 MACH_ORATISOT16 ORATISOT16 2270
2259oratisdesk MACH_ORATISDESK ORATISDESK 2271 2259oratisdesk MACH_ORATISDESK ORATISDESK 2271
2260v2_ca9 MACH_V2P_CA9 V2P_CA9 2272 2260vexpress MACH_VEXPRESS VEXPRESS 2272
2261sintexo MACH_SINTEXO SINTEXO 2273 2261sintexo MACH_SINTEXO SINTEXO 2273
2262cm3389 MACH_CM3389 CM3389 2274 2262cm3389 MACH_CM3389 CM3389 2274
2263omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275 2263omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275
@@ -2636,3 +2636,45 @@ hw90240 MACH_HW90240 HW90240 2648
2636dm365_leopard MACH_DM365_LEOPARD DM365_LEOPARD 2649 2636dm365_leopard MACH_DM365_LEOPARD DM365_LEOPARD 2649
2637mityomapl138 MACH_MITYOMAPL138 MITYOMAPL138 2650 2637mityomapl138 MACH_MITYOMAPL138 MITYOMAPL138 2650
2638scat110 MACH_SCAT110 SCAT110 2651 2638scat110 MACH_SCAT110 SCAT110 2651
2639acer_a1 MACH_ACER_A1 ACER_A1 2652
2640cmcontrol MACH_CMCONTROL CMCONTROL 2653
2641pelco_lamar MACH_PELCO_LAMAR PELCO_LAMAR 2654
2642rfp43 MACH_RFP43 RFP43 2655
2643sk86r0301 MACH_SK86R0301 SK86R0301 2656
2644ctpxa MACH_CTPXA CTPXA 2657
2645epb_arm9_a MACH_EPB_ARM9_A EPB_ARM9_A 2658
2646guruplug MACH_GURUPLUG GURUPLUG 2659
2647spear310 MACH_SPEAR310 SPEAR310 2660
2648spear320 MACH_SPEAR320 SPEAR320 2661
2649robotx MACH_ROBOTX ROBOTX 2662
2650lsxhl MACH_LSXHL LSXHL 2663
2651smartlite MACH_SMARTLITE SMARTLITE 2664
2652cws2 MACH_CWS2 CWS2 2665
2653m619 MACH_M619 M619 2666
2654smartview MACH_SMARTVIEW SMARTVIEW 2667
2655lsa_salsa MACH_LSA_SALSA LSA_SALSA 2668
2656kizbox MACH_KIZBOX KIZBOX 2669
2657htccharmer MACH_HTCCHARMER HTCCHARMER 2670
2658guf_neso_lt MACH_GUF_NESO_LT GUF_NESO_LT 2671
2659pm9g45 MACH_PM9G45 PM9G45 2672
2660htcpanther MACH_HTCPANTHER HTCPANTHER 2673
2661htcpanther_cdma MACH_HTCPANTHER_CDMA HTCPANTHER_CDMA 2674
2662reb01 MACH_REB01 REB01 2675
2663aquila MACH_AQUILA AQUILA 2676
2664spark_sls_hw2 MACH_SPARK_SLS_HW2 SPARK_SLS_HW2 2677
2665sheeva_esata MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678
2666surf7x30 MACH_SURF7X30 SURF7X30 2679
2667micro2440 MACH_MICRO2440 MICRO2440 2680
2668am2440 MACH_AM2440 AM2440 2681
2669tq2440 MACH_TQ2440 TQ2440 2682
2670lpc2478oem MACH_LPC2478OEM LPC2478OEM 2683
2671ak880x MACH_AK880X AK880X 2684
2672cobra3530 MACH_COBRA3530 COBRA3530 2685
2673pmppb MACH_PMPPB PMPPB 2686
2674u6715 MACH_U6715 U6715 2687
2675axar1500_sender MACH_AXAR1500_SENDER AXAR1500_SENDER 2688
2676g30_dvb MACH_G30_DVB G30_DVB 2689
2677vc088x MACH_VC088X VC088X 2690
2678mioa702 MACH_MIOA702 MIOA702 2691
2679hpmin MACH_HPMIN HPMIN 2692
2680ak880xak MACH_AK880XAK AK880XAK 2693
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 1aa1ea5e9212..b13d1879e51b 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1325,7 +1325,7 @@ struct platform_device *__init
1325at32_add_device_mci(unsigned int id, struct mci_platform_data *data) 1325at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1326{ 1326{
1327 struct platform_device *pdev; 1327 struct platform_device *pdev;
1328 struct mci_dma_slave *slave; 1328 struct mci_dma_data *slave;
1329 u32 pioa_mask; 1329 u32 pioa_mask;
1330 u32 piob_mask; 1330 u32 piob_mask;
1331 1331
@@ -1344,7 +1344,9 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1344 ARRAY_SIZE(atmel_mci0_resource))) 1344 ARRAY_SIZE(atmel_mci0_resource)))
1345 goto fail; 1345 goto fail;
1346 1346
1347 slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL); 1347 slave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL);
1348 if (!slave)
1349 goto fail;
1348 1350
1349 slave->sdata.dma_dev = &dw_dmac0_device.dev; 1351 slave->sdata.dma_dev = &dw_dmac0_device.dev;
1350 slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT; 1352 slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
@@ -1357,7 +1359,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1357 1359
1358 if (platform_device_add_data(pdev, data, 1360 if (platform_device_add_data(pdev, data,
1359 sizeof(struct mci_platform_data))) 1361 sizeof(struct mci_platform_data)))
1360 goto fail; 1362 goto fail_free;
1361 1363
1362 /* CLK line is common to both slots */ 1364 /* CLK line is common to both slots */
1363 pioa_mask = 1 << 10; 1365 pioa_mask = 1 << 10;
@@ -1381,7 +1383,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1381 /* Slot is unused */ 1383 /* Slot is unused */
1382 break; 1384 break;
1383 default: 1385 default:
1384 goto fail; 1386 goto fail_free;
1385 } 1387 }
1386 1388
1387 select_peripheral(PIOA, pioa_mask, PERIPH_A, 0); 1389 select_peripheral(PIOA, pioa_mask, PERIPH_A, 0);
@@ -1408,7 +1410,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1408 break; 1410 break;
1409 default: 1411 default:
1410 if (!data->slot[0].bus_width) 1412 if (!data->slot[0].bus_width)
1411 goto fail; 1413 goto fail_free;
1412 1414
1413 data->slot[1].bus_width = 0; 1415 data->slot[1].bus_width = 0;
1414 break; 1416 break;
@@ -1419,9 +1421,10 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1419 platform_device_add(pdev); 1421 platform_device_add(pdev);
1420 return pdev; 1422 return pdev;
1421 1423
1424fail_free:
1425 kfree(slave);
1422fail: 1426fail:
1423 data->dma_slave = NULL; 1427 data->dma_slave = NULL;
1424 kfree(slave);
1425 platform_device_put(pdev); 1428 platform_device_put(pdev);
1426 return NULL; 1429 return NULL;
1427} 1430}
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 7ae58892ba8d..e97b255d97bc 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -94,6 +94,7 @@ ia64_acpi_release_global_lock (unsigned int *lock)
94#define acpi_noirq 0 /* ACPI always enabled on IA64 */ 94#define acpi_noirq 0 /* ACPI always enabled on IA64 */
95#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ 95#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
96#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ 96#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
97#define acpi_ht 0 /* no HT-only mode on IA64 */
97#endif 98#endif
98#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ 99#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
99static inline void disable_acpi(void) { } 100static inline void disable_acpi(void) { }
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index e14108b19c09..4c41656ede87 100644
--- a/arch/ia64/include/asm/elf.h
+++ b/arch/ia64/include/asm/elf.h
@@ -201,7 +201,9 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
201 relevant until we have real hardware to play with... */ 201 relevant until we have real hardware to play with... */
202#define ELF_PLATFORM NULL 202#define ELF_PLATFORM NULL
203 203
204#define SET_PERSONALITY(ex) set_personality(PER_LINUX) 204#define SET_PERSONALITY(ex) \
205 set_personality((current->personality & ~PER_MASK) | PER_LINUX)
206
205#define elf_read_implies_exec(ex, executable_stack) \ 207#define elf_read_implies_exec(ex, executable_stack) \
206 ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0) 208 ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
207 209
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index ece1bf994499..e456f062f241 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
71DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); 71DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
72EXPORT_PER_CPU_SYMBOL(__sn_hub_info); 72EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
73 73
74DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid); 74DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
75EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); 75EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
76 76
77DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); 77DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index fc9997b73c09..267c7c779e53 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -217,7 +217,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
217 * Little endian 217 * Little endian
218 */ 218 */
219 219
220#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a)); 220#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a))
221#define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a)) 221#define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a))
222 222
223#define in_le32(a) __le32_to_cpu(__raw_readl(a)) 223#define in_le32(a) __le32_to_cpu(__raw_readl(a))
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index d9d63831cc2f..2a56bccce4e0 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -172,16 +172,15 @@ do { \
172/* It is used only first parameter for OP - for wic, wdc */ 172/* It is used only first parameter for OP - for wic, wdc */
173#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \ 173#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
174do { \ 174do { \
175 int step = -line_length; \ 175 int volatile temp; \
176 int count = end - start; \ 176 BUG_ON(end - start <= 0); \
177 BUG_ON(count <= 0); \
178 \ 177 \
179 __asm__ __volatile__ (" 1: addk %0, %0, %1; \ 178 __asm__ __volatile__ (" 1: " #op " %1, r0; \
180 " #op " %0, r0; \ 179 cmpu %0, %1, %2; \
181 bgtid %1, 1b; \ 180 bgtid %0, 1b; \
182 addk %1, %1, %2; \ 181 addk %1, %1, %3; \
183 " : : "r" (start), "r" (count), \ 182 " : : "r" (temp), "r" (start), "r" (end),\
184 "r" (step) : "memory"); \ 183 "r" (line_length) : "memory"); \
185} while (0); 184} while (0);
186 185
187static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) 186static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
@@ -313,16 +312,6 @@ static void __invalidate_dcache_all_wb(void)
313 pr_debug("%s\n", __func__); 312 pr_debug("%s\n", __func__);
314 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, 313 CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
315 wdc.clear) 314 wdc.clear)
316
317#if 0
318 unsigned int i;
319
320 pr_debug("%s\n", __func__);
321
322 /* Just loop through cache size and invalidate it */
323 for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
324 __invalidate_dcache(0, i);
325#endif
326} 315}
327 316
328static void __invalidate_dcache_range_wb(unsigned long start, 317static void __invalidate_dcache_range_wb(unsigned long start,
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 5372b24ad049..bb8c4b9ccb80 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -54,6 +54,7 @@ void __init setup_arch(char **cmdline_p)
54 54
55 microblaze_cache_init(); 55 microblaze_cache_init();
56 56
57 invalidate_dcache();
57 enable_dcache(); 58 enable_dcache();
58 59
59 invalidate_icache(); 60 invalidate_icache();
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index c51405e57921..29d3cbf9555f 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -141,6 +141,14 @@ static __init void prom_init_mem(void)
141 break; 141 break;
142 } 142 }
143 143
144 /* Ignoring the last page when ddr size is 128M. Cached
145 * accesses to last page is causing the processor to prefetch
146 * using address above 128M stepping out of the ddr address
147 * space.
148 */
149 if (mem == 0x8000000)
150 mem -= 0x1000;
151
144 add_memory_region(0, mem, BOOT_MEM_RAM); 152 add_memory_region(0, mem, BOOT_MEM_RAM);
145} 153}
146 154
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index ed84b4cb3c8d..84b6503f10b9 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.23-rc2 3# Linux kernel version: 2.6.33-rc6
4# Tue Aug 7 13:04:24 2007 4# Wed Feb 3 18:12:31 2010
5# 5#
6CONFIG_MIPS=y 6CONFIG_MIPS=y
7 7
@@ -9,20 +9,28 @@ CONFIG_MIPS=y
9# Machine selection 9# Machine selection
10# 10#
11# CONFIG_MACH_ALCHEMY is not set 11# CONFIG_MACH_ALCHEMY is not set
12# CONFIG_AR7 is not set
13# CONFIG_BCM47XX is not set
14# CONFIG_BCM63XX is not set
12# CONFIG_MIPS_COBALT is not set 15# CONFIG_MIPS_COBALT is not set
13# CONFIG_MACH_DECSTATION is not set 16# CONFIG_MACH_DECSTATION is not set
14# CONFIG_MACH_JAZZ is not set 17# CONFIG_MACH_JAZZ is not set
15# CONFIG_LEMOTE_FULONG is not set 18# CONFIG_LASAT is not set
19# CONFIG_MACH_LOONGSON is not set
16# CONFIG_MIPS_MALTA is not set 20# CONFIG_MIPS_MALTA is not set
17# CONFIG_MIPS_SIM is not set 21# CONFIG_MIPS_SIM is not set
18# CONFIG_MARKEINS is not set 22# CONFIG_NEC_MARKEINS is not set
19# CONFIG_MACH_VR41XX is not set 23# CONFIG_MACH_VR41XX is not set
24# CONFIG_NXP_STB220 is not set
25# CONFIG_NXP_STB225 is not set
20# CONFIG_PNX8550_JBS is not set 26# CONFIG_PNX8550_JBS is not set
21# CONFIG_PNX8550_STB810 is not set 27# CONFIG_PNX8550_STB810 is not set
22# CONFIG_PMC_MSP is not set 28# CONFIG_PMC_MSP is not set
23# CONFIG_PMC_YOSEMITE is not set 29# CONFIG_PMC_YOSEMITE is not set
30# CONFIG_POWERTV is not set
24# CONFIG_SGI_IP22 is not set 31# CONFIG_SGI_IP22 is not set
25CONFIG_SGI_IP27=y 32CONFIG_SGI_IP27=y
33# CONFIG_SGI_IP28 is not set
26# CONFIG_SGI_IP32 is not set 34# CONFIG_SGI_IP32 is not set
27# CONFIG_SIBYTE_CRHINE is not set 35# CONFIG_SIBYTE_CRHINE is not set
28# CONFIG_SIBYTE_CARMEL is not set 36# CONFIG_SIBYTE_CARMEL is not set
@@ -33,32 +41,39 @@ CONFIG_SGI_IP27=y
33# CONFIG_SIBYTE_SENTOSA is not set 41# CONFIG_SIBYTE_SENTOSA is not set
34# CONFIG_SIBYTE_BIGSUR is not set 42# CONFIG_SIBYTE_BIGSUR is not set
35# CONFIG_SNI_RM is not set 43# CONFIG_SNI_RM is not set
36# CONFIG_TOSHIBA_JMR3927 is not set 44# CONFIG_MACH_TX39XX is not set
37# CONFIG_TOSHIBA_RBTX4927 is not set 45# CONFIG_MACH_TX49XX is not set
38# CONFIG_TOSHIBA_RBTX4938 is not set 46# CONFIG_MIKROTIK_RB532 is not set
39# CONFIG_WR_PPMC is not set 47# CONFIG_WR_PPMC is not set
48# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
49# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
50# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
40CONFIG_SGI_SN_M_MODE=y 51CONFIG_SGI_SN_M_MODE=y
41# CONFIG_SGI_SN_N_MODE is not set 52# CONFIG_SGI_SN_N_MODE is not set
42# CONFIG_MAPPED_KERNEL is not set 53# CONFIG_MAPPED_KERNEL is not set
43# CONFIG_REPLICATE_KTEXT is not set 54# CONFIG_REPLICATE_KTEXT is not set
44# CONFIG_REPLICATE_EXHANDLERS is not set 55# CONFIG_REPLICATE_EXHANDLERS is not set
56CONFIG_LOONGSON_UART_BASE=y
45CONFIG_RWSEM_GENERIC_SPINLOCK=y 57CONFIG_RWSEM_GENERIC_SPINLOCK=y
46# CONFIG_ARCH_HAS_ILOG2_U32 is not set 58# CONFIG_ARCH_HAS_ILOG2_U32 is not set
47# CONFIG_ARCH_HAS_ILOG2_U64 is not set 59# CONFIG_ARCH_HAS_ILOG2_U64 is not set
60CONFIG_ARCH_SUPPORTS_OPROFILE=y
48CONFIG_GENERIC_FIND_NEXT_BIT=y 61CONFIG_GENERIC_FIND_NEXT_BIT=y
49CONFIG_GENERIC_HWEIGHT=y 62CONFIG_GENERIC_HWEIGHT=y
50CONFIG_GENERIC_CALIBRATE_DELAY=y 63CONFIG_GENERIC_CALIBRATE_DELAY=y
64CONFIG_GENERIC_CLOCKEVENTS=y
51CONFIG_GENERIC_TIME=y 65CONFIG_GENERIC_TIME=y
52CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y 66CONFIG_GENERIC_CMOS_UPDATE=y
67CONFIG_SCHED_OMIT_FRAME_POINTER=y
53CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 68CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
54CONFIG_ARC=y 69CONFIG_ARC=y
55CONFIG_DMA_COHERENT=y 70CONFIG_DMA_COHERENT=y
56CONFIG_EARLY_PRINTK=y
57CONFIG_SYS_HAS_EARLY_PRINTK=y 71CONFIG_SYS_HAS_EARLY_PRINTK=y
58# CONFIG_NO_IOPORT is not set 72# CONFIG_NO_IOPORT is not set
59CONFIG_CPU_BIG_ENDIAN=y 73CONFIG_CPU_BIG_ENDIAN=y
60# CONFIG_CPU_LITTLE_ENDIAN is not set 74# CONFIG_CPU_LITTLE_ENDIAN is not set
61CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y 75CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
76CONFIG_DEFAULT_SGI_PARTITION=y
62CONFIG_MIPS_L1_CACHE_SHIFT=7 77CONFIG_MIPS_L1_CACHE_SHIFT=7
63CONFIG_ARC64=y 78CONFIG_ARC64=y
64CONFIG_BOOT_ELF64=y 79CONFIG_BOOT_ELF64=y
@@ -66,7 +81,8 @@ CONFIG_BOOT_ELF64=y
66# 81#
67# CPU selection 82# CPU selection
68# 83#
69# CONFIG_CPU_LOONGSON2 is not set 84# CONFIG_CPU_LOONGSON2E is not set
85# CONFIG_CPU_LOONGSON2F is not set
70# CONFIG_CPU_MIPS32_R1 is not set 86# CONFIG_CPU_MIPS32_R1 is not set
71# CONFIG_CPU_MIPS32_R2 is not set 87# CONFIG_CPU_MIPS32_R2 is not set
72# CONFIG_CPU_MIPS64_R1 is not set 88# CONFIG_CPU_MIPS64_R1 is not set
@@ -79,6 +95,7 @@ CONFIG_BOOT_ELF64=y
79# CONFIG_CPU_TX49XX is not set 95# CONFIG_CPU_TX49XX is not set
80# CONFIG_CPU_R5000 is not set 96# CONFIG_CPU_R5000 is not set
81# CONFIG_CPU_R5432 is not set 97# CONFIG_CPU_R5432 is not set
98# CONFIG_CPU_R5500 is not set
82# CONFIG_CPU_R6000 is not set 99# CONFIG_CPU_R6000 is not set
83# CONFIG_CPU_NEVADA is not set 100# CONFIG_CPU_NEVADA is not set
84# CONFIG_CPU_R8000 is not set 101# CONFIG_CPU_R8000 is not set
@@ -86,6 +103,7 @@ CONFIG_CPU_R10000=y
86# CONFIG_CPU_RM7000 is not set 103# CONFIG_CPU_RM7000 is not set
87# CONFIG_CPU_RM9000 is not set 104# CONFIG_CPU_RM9000 is not set
88# CONFIG_CPU_SB1 is not set 105# CONFIG_CPU_SB1 is not set
106# CONFIG_CPU_CAVIUM_OCTEON is not set
89CONFIG_SYS_HAS_CPU_R10000=y 107CONFIG_SYS_HAS_CPU_R10000=y
90CONFIG_SYS_SUPPORTS_64BIT_KERNEL=y 108CONFIG_SYS_SUPPORTS_64BIT_KERNEL=y
91CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y 109CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
@@ -99,6 +117,7 @@ CONFIG_64BIT=y
99CONFIG_PAGE_SIZE_4KB=y 117CONFIG_PAGE_SIZE_4KB=y
100# CONFIG_PAGE_SIZE_8KB is not set 118# CONFIG_PAGE_SIZE_8KB is not set
101# CONFIG_PAGE_SIZE_16KB is not set 119# CONFIG_PAGE_SIZE_16KB is not set
120# CONFIG_PAGE_SIZE_32KB is not set
102# CONFIG_PAGE_SIZE_64KB is not set 121# CONFIG_PAGE_SIZE_64KB is not set
103CONFIG_CPU_HAS_PREFETCH=y 122CONFIG_CPU_HAS_PREFETCH=y
104CONFIG_MIPS_MT_DISABLED=y 123CONFIG_MIPS_MT_DISABLED=y
@@ -110,6 +129,7 @@ CONFIG_GENERIC_IRQ_PROBE=y
110CONFIG_IRQ_PER_CPU=y 129CONFIG_IRQ_PER_CPU=y
111CONFIG_CPU_SUPPORTS_HIGHMEM=y 130CONFIG_CPU_SUPPORTS_HIGHMEM=y
112CONFIG_ARCH_DISCONTIGMEM_ENABLE=y 131CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
132CONFIG_ARCH_POPULATES_NODE_MAP=y
113CONFIG_NUMA=y 133CONFIG_NUMA=y
114CONFIG_SYS_SUPPORTS_NUMA=y 134CONFIG_SYS_SUPPORTS_NUMA=y
115CONFIG_NODES_SHIFT=6 135CONFIG_NODES_SHIFT=6
@@ -120,16 +140,22 @@ CONFIG_DISCONTIGMEM_MANUAL=y
120CONFIG_DISCONTIGMEM=y 140CONFIG_DISCONTIGMEM=y
121CONFIG_FLAT_NODE_MEM_MAP=y 141CONFIG_FLAT_NODE_MEM_MAP=y
122CONFIG_NEED_MULTIPLE_NODES=y 142CONFIG_NEED_MULTIPLE_NODES=y
123# CONFIG_SPARSEMEM_STATIC is not set 143CONFIG_PAGEFLAGS_EXTENDED=y
124CONFIG_SPLIT_PTLOCK_CPUS=4 144CONFIG_SPLIT_PTLOCK_CPUS=4
125CONFIG_MIGRATION=y 145CONFIG_MIGRATION=y
126CONFIG_RESOURCES_64BIT=y 146CONFIG_PHYS_ADDR_T_64BIT=y
127CONFIG_ZONE_DMA_FLAG=0 147CONFIG_ZONE_DMA_FLAG=0
128CONFIG_VIRT_TO_BUS=y 148CONFIG_VIRT_TO_BUS=y
149# CONFIG_KSM is not set
150CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
129CONFIG_SMP=y 151CONFIG_SMP=y
130CONFIG_SYS_SUPPORTS_SMP=y 152CONFIG_SYS_SUPPORTS_SMP=y
131CONFIG_NR_CPUS_DEFAULT_64=y 153CONFIG_NR_CPUS_DEFAULT_64=y
132CONFIG_NR_CPUS=64 154CONFIG_NR_CPUS=64
155CONFIG_TICK_ONESHOT=y
156CONFIG_NO_HZ=y
157CONFIG_HIGH_RES_TIMERS=y
158CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
133# CONFIG_HZ_48 is not set 159# CONFIG_HZ_48 is not set
134# CONFIG_HZ_100 is not set 160# CONFIG_HZ_100 is not set
135# CONFIG_HZ_128 is not set 161# CONFIG_HZ_128 is not set
@@ -142,13 +168,13 @@ CONFIG_HZ=1000
142CONFIG_PREEMPT_NONE=y 168CONFIG_PREEMPT_NONE=y
143# CONFIG_PREEMPT_VOLUNTARY is not set 169# CONFIG_PREEMPT_VOLUNTARY is not set
144# CONFIG_PREEMPT is not set 170# CONFIG_PREEMPT is not set
145CONFIG_PREEMPT_BKL=y
146# CONFIG_MIPS_INSANE_LARGE is not set 171# CONFIG_MIPS_INSANE_LARGE is not set
147# CONFIG_KEXEC is not set 172# CONFIG_KEXEC is not set
148CONFIG_SECCOMP=y 173CONFIG_SECCOMP=y
149CONFIG_LOCKDEP_SUPPORT=y 174CONFIG_LOCKDEP_SUPPORT=y
150CONFIG_STACKTRACE_SUPPORT=y 175CONFIG_STACKTRACE_SUPPORT=y
151CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 176CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
177CONFIG_CONSTRUCTORS=y
152 178
153# 179#
154# General setup 180# General setup
@@ -162,20 +188,41 @@ CONFIG_SWAP=y
162CONFIG_SYSVIPC=y 188CONFIG_SYSVIPC=y
163CONFIG_SYSVIPC_SYSCTL=y 189CONFIG_SYSVIPC_SYSCTL=y
164CONFIG_POSIX_MQUEUE=y 190CONFIG_POSIX_MQUEUE=y
191CONFIG_POSIX_MQUEUE_SYSCTL=y
165# CONFIG_BSD_PROCESS_ACCT is not set 192# CONFIG_BSD_PROCESS_ACCT is not set
166# CONFIG_TASKSTATS is not set 193# CONFIG_TASKSTATS is not set
167# CONFIG_USER_NS is not set
168# CONFIG_AUDIT is not set 194# CONFIG_AUDIT is not set
195
196#
197# RCU Subsystem
198#
199CONFIG_TREE_RCU=y
200# CONFIG_TREE_PREEMPT_RCU is not set
201# CONFIG_TINY_RCU is not set
202# CONFIG_RCU_TRACE is not set
203CONFIG_RCU_FANOUT=64
204# CONFIG_RCU_FANOUT_EXACT is not set
205# CONFIG_TREE_RCU_TRACE is not set
169CONFIG_IKCONFIG=y 206CONFIG_IKCONFIG=y
170CONFIG_IKCONFIG_PROC=y 207CONFIG_IKCONFIG_PROC=y
171CONFIG_LOG_BUF_SHIFT=15 208CONFIG_LOG_BUF_SHIFT=15
209# CONFIG_GROUP_SCHED is not set
172CONFIG_CGROUPS=y 210CONFIG_CGROUPS=y
211# CONFIG_CGROUP_DEBUG is not set
212# CONFIG_CGROUP_NS is not set
213# CONFIG_CGROUP_FREEZER is not set
214# CONFIG_CGROUP_DEVICE is not set
173CONFIG_CPUSETS=y 215CONFIG_CPUSETS=y
174CONFIG_SYSFS_DEPRECATED=y 216CONFIG_PROC_PID_CPUSET=y
217# CONFIG_CGROUP_CPUACCT is not set
218# CONFIG_RESOURCE_COUNTERS is not set
219# CONFIG_SYSFS_DEPRECATED_V2 is not set
175CONFIG_RELAY=y 220CONFIG_RELAY=y
221# CONFIG_NAMESPACES is not set
176# CONFIG_BLK_DEV_INITRD is not set 222# CONFIG_BLK_DEV_INITRD is not set
177# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 223# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
178CONFIG_SYSCTL=y 224CONFIG_SYSCTL=y
225CONFIG_ANON_INODES=y
179CONFIG_EMBEDDED=y 226CONFIG_EMBEDDED=y
180CONFIG_SYSCTL_SYSCALL=y 227CONFIG_SYSCTL_SYSCALL=y
181CONFIG_KALLSYMS=y 228CONFIG_KALLSYMS=y
@@ -184,44 +231,92 @@ CONFIG_HOTPLUG=y
184CONFIG_PRINTK=y 231CONFIG_PRINTK=y
185CONFIG_BUG=y 232CONFIG_BUG=y
186CONFIG_ELF_CORE=y 233CONFIG_ELF_CORE=y
234# CONFIG_PCSPKR_PLATFORM is not set
187CONFIG_BASE_FULL=y 235CONFIG_BASE_FULL=y
188CONFIG_FUTEX=y 236CONFIG_FUTEX=y
189CONFIG_ANON_INODES=y
190CONFIG_EPOLL=y 237CONFIG_EPOLL=y
191CONFIG_SIGNALFD=y 238CONFIG_SIGNALFD=y
192CONFIG_TIMERFD=y 239CONFIG_TIMERFD=y
193CONFIG_EVENTFD=y 240CONFIG_EVENTFD=y
194CONFIG_SHMEM=y 241CONFIG_SHMEM=y
242CONFIG_AIO=y
243
244#
245# Kernel Performance Events And Counters
246#
195CONFIG_VM_EVENT_COUNTERS=y 247CONFIG_VM_EVENT_COUNTERS=y
248CONFIG_PCI_QUIRKS=y
249CONFIG_COMPAT_BRK=y
196CONFIG_SLAB=y 250CONFIG_SLAB=y
197# CONFIG_SLUB is not set 251# CONFIG_SLUB is not set
198# CONFIG_SLOB is not set 252# CONFIG_SLOB is not set
253# CONFIG_PROFILING is not set
254CONFIG_HAVE_OPROFILE=y
255CONFIG_HAVE_SYSCALL_WRAPPERS=y
256CONFIG_USE_GENERIC_SMP_HELPERS=y
257
258#
259# GCOV-based kernel profiling
260#
261CONFIG_SLOW_WORK=y
262CONFIG_HAVE_GENERIC_DMA_COHERENT=y
263CONFIG_SLABINFO=y
199CONFIG_RT_MUTEXES=y 264CONFIG_RT_MUTEXES=y
200# CONFIG_TINY_SHMEM is not set
201CONFIG_BASE_SMALL=0 265CONFIG_BASE_SMALL=0
202CONFIG_MODULES=y 266CONFIG_MODULES=y
267# CONFIG_MODULE_FORCE_LOAD is not set
203CONFIG_MODULE_UNLOAD=y 268CONFIG_MODULE_UNLOAD=y
204# CONFIG_MODULE_FORCE_UNLOAD is not set 269# CONFIG_MODULE_FORCE_UNLOAD is not set
205# CONFIG_MODVERSIONS is not set 270# CONFIG_MODVERSIONS is not set
206CONFIG_MODULE_SRCVERSION_ALL=y 271CONFIG_MODULE_SRCVERSION_ALL=y
207CONFIG_KMOD=y
208CONFIG_STOP_MACHINE=y 272CONFIG_STOP_MACHINE=y
209CONFIG_BLOCK=y 273CONFIG_BLOCK=y
210# CONFIG_BLK_DEV_IO_TRACE is not set
211# CONFIG_BLK_DEV_BSG is not set 274# CONFIG_BLK_DEV_BSG is not set
275# CONFIG_BLK_DEV_INTEGRITY is not set
276# CONFIG_BLK_CGROUP is not set
277CONFIG_BLOCK_COMPAT=y
212 278
213# 279#
214# IO Schedulers 280# IO Schedulers
215# 281#
216CONFIG_IOSCHED_NOOP=y 282CONFIG_IOSCHED_NOOP=y
217CONFIG_IOSCHED_AS=y
218CONFIG_IOSCHED_DEADLINE=y 283CONFIG_IOSCHED_DEADLINE=y
219CONFIG_IOSCHED_CFQ=y 284CONFIG_IOSCHED_CFQ=y
220CONFIG_DEFAULT_AS=y 285# CONFIG_CFQ_GROUP_IOSCHED is not set
221# CONFIG_DEFAULT_DEADLINE is not set 286# CONFIG_DEFAULT_DEADLINE is not set
222# CONFIG_DEFAULT_CFQ is not set 287CONFIG_DEFAULT_CFQ=y
223# CONFIG_DEFAULT_NOOP is not set 288# CONFIG_DEFAULT_NOOP is not set
224CONFIG_DEFAULT_IOSCHED="anticipatory" 289CONFIG_DEFAULT_IOSCHED="cfq"
290# CONFIG_INLINE_SPIN_TRYLOCK is not set
291# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
292# CONFIG_INLINE_SPIN_LOCK is not set
293# CONFIG_INLINE_SPIN_LOCK_BH is not set
294# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
295# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
296CONFIG_INLINE_SPIN_UNLOCK=y
297# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
298CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
299# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
300# CONFIG_INLINE_READ_TRYLOCK is not set
301# CONFIG_INLINE_READ_LOCK is not set
302# CONFIG_INLINE_READ_LOCK_BH is not set
303# CONFIG_INLINE_READ_LOCK_IRQ is not set
304# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
305CONFIG_INLINE_READ_UNLOCK=y
306# CONFIG_INLINE_READ_UNLOCK_BH is not set
307CONFIG_INLINE_READ_UNLOCK_IRQ=y
308# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
309# CONFIG_INLINE_WRITE_TRYLOCK is not set
310# CONFIG_INLINE_WRITE_LOCK is not set
311# CONFIG_INLINE_WRITE_LOCK_BH is not set
312# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
313# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
314CONFIG_INLINE_WRITE_UNLOCK=y
315# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
316CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
317# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
318CONFIG_MUTEX_SPIN_ON_OWNER=y
319# CONFIG_FREEZER is not set
225 320
226# 321#
227# Bus options (PCI, PCMCIA, EISA, ISA, TC) 322# Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -230,11 +325,10 @@ CONFIG_HW_HAS_PCI=y
230CONFIG_PCI=y 325CONFIG_PCI=y
231CONFIG_PCI_DOMAINS=y 326CONFIG_PCI_DOMAINS=y
232# CONFIG_ARCH_SUPPORTS_MSI is not set 327# CONFIG_ARCH_SUPPORTS_MSI is not set
328# CONFIG_PCI_LEGACY is not set
329# CONFIG_PCI_STUB is not set
330# CONFIG_PCI_IOV is not set
233CONFIG_MMU=y 331CONFIG_MMU=y
234
235#
236# PCCARD (PCMCIA/CardBus) support
237#
238# CONFIG_PCCARD is not set 332# CONFIG_PCCARD is not set
239# CONFIG_HOTPLUG_PCI is not set 333# CONFIG_HOTPLUG_PCI is not set
240 334
@@ -242,8 +336,9 @@ CONFIG_MMU=y
242# Executable file formats 336# Executable file formats
243# 337#
244CONFIG_BINFMT_ELF=y 338CONFIG_BINFMT_ELF=y
339CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
340# CONFIG_HAVE_AOUT is not set
245# CONFIG_BINFMT_MISC is not set 341# CONFIG_BINFMT_MISC is not set
246# CONFIG_BUILD_ELF64 is not set
247CONFIG_MIPS32_COMPAT=y 342CONFIG_MIPS32_COMPAT=y
248CONFIG_COMPAT=y 343CONFIG_COMPAT=y
249CONFIG_SYSVIPC_COMPAT=y 344CONFIG_SYSVIPC_COMPAT=y
@@ -255,13 +350,10 @@ CONFIG_BINFMT_ELF32=y
255# Power management options 350# Power management options
256# 351#
257CONFIG_PM=y 352CONFIG_PM=y
258# CONFIG_PM_LEGACY is not set
259# CONFIG_PM_DEBUG is not set 353# CONFIG_PM_DEBUG is not set
260 354# CONFIG_PM_RUNTIME is not set
261#
262# Networking
263#
264CONFIG_NET=y 355CONFIG_NET=y
356CONFIG_COMPAT_NETLINK_MESSAGES=y
265 357
266# 358#
267# Networking options 359# Networking options
@@ -273,6 +365,8 @@ CONFIG_XFRM=y
273CONFIG_XFRM_USER=m 365CONFIG_XFRM_USER=m
274# CONFIG_XFRM_SUB_POLICY is not set 366# CONFIG_XFRM_SUB_POLICY is not set
275CONFIG_XFRM_MIGRATE=y 367CONFIG_XFRM_MIGRATE=y
368CONFIG_XFRM_STATISTICS=y
369CONFIG_XFRM_IPCOMP=m
276CONFIG_NET_KEY=y 370CONFIG_NET_KEY=y
277CONFIG_NET_KEY_MIGRATE=y 371CONFIG_NET_KEY_MIGRATE=y
278CONFIG_INET=y 372CONFIG_INET=y
@@ -292,19 +386,40 @@ CONFIG_IP_PNP=y
292# CONFIG_INET_ESP is not set 386# CONFIG_INET_ESP is not set
293# CONFIG_INET_IPCOMP is not set 387# CONFIG_INET_IPCOMP is not set
294# CONFIG_INET_XFRM_TUNNEL is not set 388# CONFIG_INET_XFRM_TUNNEL is not set
295# CONFIG_INET_TUNNEL is not set 389CONFIG_INET_TUNNEL=m
296CONFIG_INET_XFRM_MODE_TRANSPORT=m 390CONFIG_INET_XFRM_MODE_TRANSPORT=m
297CONFIG_INET_XFRM_MODE_TUNNEL=m 391CONFIG_INET_XFRM_MODE_TUNNEL=m
298CONFIG_INET_XFRM_MODE_BEET=m 392CONFIG_INET_XFRM_MODE_BEET=m
393CONFIG_INET_LRO=y
299CONFIG_INET_DIAG=y 394CONFIG_INET_DIAG=y
300CONFIG_INET_TCP_DIAG=y 395CONFIG_INET_TCP_DIAG=y
301# CONFIG_TCP_CONG_ADVANCED is not set 396# CONFIG_TCP_CONG_ADVANCED is not set
302CONFIG_TCP_CONG_CUBIC=y 397CONFIG_TCP_CONG_CUBIC=y
303CONFIG_DEFAULT_TCP_CONG="cubic" 398CONFIG_DEFAULT_TCP_CONG="cubic"
304CONFIG_TCP_MD5SIG=y 399CONFIG_TCP_MD5SIG=y
305# CONFIG_IPV6 is not set 400CONFIG_IPV6=y
306# CONFIG_INET6_XFRM_TUNNEL is not set 401CONFIG_IPV6_PRIVACY=y
307# CONFIG_INET6_TUNNEL is not set 402CONFIG_IPV6_ROUTER_PREF=y
403CONFIG_IPV6_ROUTE_INFO=y
404CONFIG_IPV6_OPTIMISTIC_DAD=y
405CONFIG_INET6_AH=m
406CONFIG_INET6_ESP=m
407CONFIG_INET6_IPCOMP=m
408CONFIG_IPV6_MIP6=m
409CONFIG_INET6_XFRM_TUNNEL=m
410CONFIG_INET6_TUNNEL=m
411CONFIG_INET6_XFRM_MODE_TRANSPORT=m
412CONFIG_INET6_XFRM_MODE_TUNNEL=m
413CONFIG_INET6_XFRM_MODE_BEET=m
414CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
415CONFIG_IPV6_SIT=m
416CONFIG_IPV6_SIT_6RD=y
417CONFIG_IPV6_NDISC_NODETYPE=y
418CONFIG_IPV6_TUNNEL=m
419CONFIG_IPV6_MULTIPLE_TABLES=y
420CONFIG_IPV6_SUBTREES=y
421CONFIG_IPV6_MROUTE=y
422CONFIG_IPV6_PIMSM_V2=y
308CONFIG_NETWORK_SECMARK=y 423CONFIG_NETWORK_SECMARK=y
309# CONFIG_NETFILTER is not set 424# CONFIG_NETFILTER is not set
310# CONFIG_IP_DCCP is not set 425# CONFIG_IP_DCCP is not set
@@ -314,9 +429,11 @@ CONFIG_IP_SCTP=m
314# CONFIG_SCTP_HMAC_NONE is not set 429# CONFIG_SCTP_HMAC_NONE is not set
315# CONFIG_SCTP_HMAC_SHA1 is not set 430# CONFIG_SCTP_HMAC_SHA1 is not set
316CONFIG_SCTP_HMAC_MD5=y 431CONFIG_SCTP_HMAC_MD5=y
432# CONFIG_RDS is not set
317# CONFIG_TIPC is not set 433# CONFIG_TIPC is not set
318# CONFIG_ATM is not set 434# CONFIG_ATM is not set
319# CONFIG_BRIDGE is not set 435# CONFIG_BRIDGE is not set
436# CONFIG_NET_DSA is not set
320# CONFIG_VLAN_8021Q is not set 437# CONFIG_VLAN_8021Q is not set
321# CONFIG_DECNET is not set 438# CONFIG_DECNET is not set
322# CONFIG_LLC2 is not set 439# CONFIG_LLC2 is not set
@@ -326,12 +443,9 @@ CONFIG_SCTP_HMAC_MD5=y
326# CONFIG_LAPB is not set 443# CONFIG_LAPB is not set
327# CONFIG_ECONET is not set 444# CONFIG_ECONET is not set
328# CONFIG_WAN_ROUTER is not set 445# CONFIG_WAN_ROUTER is not set
329 446# CONFIG_PHONET is not set
330# 447# CONFIG_IEEE802154 is not set
331# QoS and/or fair queueing
332#
333CONFIG_NET_SCHED=y 448CONFIG_NET_SCHED=y
334CONFIG_NET_SCH_FIFO=y
335 449
336# 450#
337# Queueing/Scheduling 451# Queueing/Scheduling
@@ -340,7 +454,7 @@ CONFIG_NET_SCH_CBQ=m
340CONFIG_NET_SCH_HTB=m 454CONFIG_NET_SCH_HTB=m
341CONFIG_NET_SCH_HFSC=m 455CONFIG_NET_SCH_HFSC=m
342CONFIG_NET_SCH_PRIO=m 456CONFIG_NET_SCH_PRIO=m
343CONFIG_NET_SCH_RR=m 457CONFIG_NET_SCH_MULTIQ=y
344CONFIG_NET_SCH_RED=m 458CONFIG_NET_SCH_RED=m
345CONFIG_NET_SCH_SFQ=m 459CONFIG_NET_SCH_SFQ=m
346CONFIG_NET_SCH_TEQL=m 460CONFIG_NET_SCH_TEQL=m
@@ -348,6 +462,7 @@ CONFIG_NET_SCH_TBF=m
348CONFIG_NET_SCH_GRED=m 462CONFIG_NET_SCH_GRED=m
349CONFIG_NET_SCH_DSMARK=m 463CONFIG_NET_SCH_DSMARK=m
350CONFIG_NET_SCH_NETEM=m 464CONFIG_NET_SCH_NETEM=m
465# CONFIG_NET_SCH_DRR is not set
351CONFIG_NET_SCH_INGRESS=m 466CONFIG_NET_SCH_INGRESS=m
352 467
353# 468#
@@ -364,41 +479,63 @@ CONFIG_NET_CLS_U32=m
364CONFIG_CLS_U32_MARK=y 479CONFIG_CLS_U32_MARK=y
365CONFIG_NET_CLS_RSVP=m 480CONFIG_NET_CLS_RSVP=m
366CONFIG_NET_CLS_RSVP6=m 481CONFIG_NET_CLS_RSVP6=m
482CONFIG_NET_CLS_FLOW=m
483CONFIG_NET_CLS_CGROUP=y
367# CONFIG_NET_EMATCH is not set 484# CONFIG_NET_EMATCH is not set
368CONFIG_NET_CLS_ACT=y 485CONFIG_NET_CLS_ACT=y
369CONFIG_NET_ACT_POLICE=y 486CONFIG_NET_ACT_POLICE=y
370CONFIG_NET_ACT_GACT=m 487CONFIG_NET_ACT_GACT=m
371CONFIG_GACT_PROB=y 488CONFIG_GACT_PROB=y
372CONFIG_NET_ACT_MIRRED=m 489CONFIG_NET_ACT_MIRRED=m
490CONFIG_NET_ACT_NAT=m
373CONFIG_NET_ACT_PEDIT=m 491CONFIG_NET_ACT_PEDIT=m
374# CONFIG_NET_ACT_SIMP is not set 492# CONFIG_NET_ACT_SIMP is not set
375CONFIG_NET_CLS_POLICE=y 493CONFIG_NET_ACT_SKBEDIT=m
376# CONFIG_NET_CLS_IND is not set 494# CONFIG_NET_CLS_IND is not set
495CONFIG_NET_SCH_FIFO=y
496# CONFIG_DCB is not set
377 497
378# 498#
379# Network testing 499# Network testing
380# 500#
381# CONFIG_NET_PKTGEN is not set 501# CONFIG_NET_PKTGEN is not set
382# CONFIG_HAMRADIO is not set 502# CONFIG_HAMRADIO is not set
503# CONFIG_CAN is not set
383# CONFIG_IRDA is not set 504# CONFIG_IRDA is not set
384# CONFIG_BT is not set 505# CONFIG_BT is not set
385# CONFIG_AF_RXRPC is not set 506# CONFIG_AF_RXRPC is not set
386 507CONFIG_FIB_RULES=y
387# 508CONFIG_WIRELESS=y
388# Wireless
389#
390CONFIG_CFG80211=m
391CONFIG_WIRELESS_EXT=y 509CONFIG_WIRELESS_EXT=y
510CONFIG_WEXT_CORE=y
511CONFIG_WEXT_PROC=y
512CONFIG_WEXT_SPY=y
513CONFIG_WEXT_PRIV=y
514CONFIG_CFG80211=m
515# CONFIG_NL80211_TESTMODE is not set
516# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
517# CONFIG_CFG80211_REG_DEBUG is not set
518CONFIG_CFG80211_DEFAULT_PS=y
519# CONFIG_WIRELESS_OLD_REGULATORY is not set
520CONFIG_CFG80211_WEXT=y
521CONFIG_WIRELESS_EXT_SYSFS=y
522CONFIG_LIB80211=m
523CONFIG_LIB80211_CRYPT_WEP=m
524CONFIG_LIB80211_CRYPT_CCMP=m
525CONFIG_LIB80211_CRYPT_TKIP=m
526# CONFIG_LIB80211_DEBUG is not set
392CONFIG_MAC80211=m 527CONFIG_MAC80211=m
393# CONFIG_MAC80211_DEBUG is not set 528CONFIG_MAC80211_RC_PID=y
394CONFIG_IEEE80211=m 529CONFIG_MAC80211_RC_MINSTREL=y
395# CONFIG_IEEE80211_DEBUG is not set 530# CONFIG_MAC80211_RC_DEFAULT_PID is not set
396CONFIG_IEEE80211_CRYPT_WEP=m 531CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
397CONFIG_IEEE80211_CRYPT_CCMP=m 532CONFIG_MAC80211_RC_DEFAULT="minstrel"
398CONFIG_IEEE80211_CRYPT_TKIP=m 533# CONFIG_MAC80211_MESH is not set
399CONFIG_IEEE80211_SOFTMAC=m 534CONFIG_MAC80211_LEDS=y
400# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set 535# CONFIG_MAC80211_DEBUG_MENU is not set
536# CONFIG_WIMAX is not set
401CONFIG_RFKILL=m 537CONFIG_RFKILL=m
538CONFIG_RFKILL_LEDS=y
402# CONFIG_NET_9P is not set 539# CONFIG_NET_9P is not set
403 540
404# 541#
@@ -408,9 +545,13 @@ CONFIG_RFKILL=m
408# 545#
409# Generic Driver Options 546# Generic Driver Options
410# 547#
548CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
549# CONFIG_DEVTMPFS is not set
411CONFIG_STANDALONE=y 550CONFIG_STANDALONE=y
412CONFIG_PREVENT_FIRMWARE_BUILD=y 551CONFIG_PREVENT_FIRMWARE_BUILD=y
413CONFIG_FW_LOADER=y 552CONFIG_FW_LOADER=y
553CONFIG_FIRMWARE_IN_KERNEL=y
554CONFIG_EXTRA_FIRMWARE=""
414# CONFIG_SYS_HYPERVISOR is not set 555# CONFIG_SYS_HYPERVISOR is not set
415CONFIG_CONNECTOR=m 556CONFIG_CONNECTOR=m
416# CONFIG_MTD is not set 557# CONFIG_MTD is not set
@@ -423,14 +564,19 @@ CONFIG_BLK_DEV=y
423# CONFIG_BLK_DEV_COW_COMMON is not set 564# CONFIG_BLK_DEV_COW_COMMON is not set
424CONFIG_BLK_DEV_LOOP=y 565CONFIG_BLK_DEV_LOOP=y
425CONFIG_BLK_DEV_CRYPTOLOOP=m 566CONFIG_BLK_DEV_CRYPTOLOOP=m
567# CONFIG_BLK_DEV_DRBD is not set
426# CONFIG_BLK_DEV_NBD is not set 568# CONFIG_BLK_DEV_NBD is not set
569CONFIG_BLK_DEV_OSD=m
427# CONFIG_BLK_DEV_SX8 is not set 570# CONFIG_BLK_DEV_SX8 is not set
428# CONFIG_BLK_DEV_RAM is not set 571# CONFIG_BLK_DEV_RAM is not set
429CONFIG_CDROM_PKTCDVD=m 572CONFIG_CDROM_PKTCDVD=m
430CONFIG_CDROM_PKTCDVD_BUFFERS=8 573CONFIG_CDROM_PKTCDVD_BUFFERS=8
431# CONFIG_CDROM_PKTCDVD_WCACHE is not set 574# CONFIG_CDROM_PKTCDVD_WCACHE is not set
432CONFIG_ATA_OVER_ETH=m 575CONFIG_ATA_OVER_ETH=m
576# CONFIG_BLK_DEV_HD is not set
433# CONFIG_MISC_DEVICES is not set 577# CONFIG_MISC_DEVICES is not set
578CONFIG_EEPROM_93CX6=m
579CONFIG_HAVE_IDE=y
434# CONFIG_IDE is not set 580# CONFIG_IDE is not set
435 581
436# 582#
@@ -453,10 +599,6 @@ CONFIG_BLK_DEV_SR=m
453CONFIG_BLK_DEV_SR_VENDOR=y 599CONFIG_BLK_DEV_SR_VENDOR=y
454CONFIG_CHR_DEV_SG=m 600CONFIG_CHR_DEV_SG=m
455CONFIG_CHR_DEV_SCH=m 601CONFIG_CHR_DEV_SCH=m
456
457#
458# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
459#
460# CONFIG_SCSI_MULTI_LUN is not set 602# CONFIG_SCSI_MULTI_LUN is not set
461CONFIG_SCSI_CONSTANTS=y 603CONFIG_SCSI_CONSTANTS=y
462CONFIG_SCSI_LOGGING=y 604CONFIG_SCSI_LOGGING=y
@@ -471,11 +613,18 @@ CONFIG_SCSI_FC_ATTRS=y
471CONFIG_SCSI_ISCSI_ATTRS=m 613CONFIG_SCSI_ISCSI_ATTRS=m
472CONFIG_SCSI_SAS_ATTRS=m 614CONFIG_SCSI_SAS_ATTRS=m
473CONFIG_SCSI_SAS_LIBSAS=m 615CONFIG_SCSI_SAS_LIBSAS=m
616CONFIG_SCSI_SAS_HOST_SMP=y
474# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set 617# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
618# CONFIG_SCSI_SRP_ATTRS is not set
475CONFIG_SCSI_LOWLEVEL=y 619CONFIG_SCSI_LOWLEVEL=y
476# CONFIG_ISCSI_TCP is not set 620# CONFIG_ISCSI_TCP is not set
621CONFIG_SCSI_CXGB3_ISCSI=m
622CONFIG_SCSI_BNX2_ISCSI=m
623CONFIG_BE2ISCSI=m
477# CONFIG_BLK_DEV_3W_XXXX_RAID is not set 624# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
625CONFIG_SCSI_HPSA=m
478# CONFIG_SCSI_3W_9XXX is not set 626# CONFIG_SCSI_3W_9XXX is not set
627CONFIG_SCSI_3W_SAS=m
479# CONFIG_SCSI_ACARD is not set 628# CONFIG_SCSI_ACARD is not set
480# CONFIG_SCSI_AACRAID is not set 629# CONFIG_SCSI_AACRAID is not set
481# CONFIG_SCSI_AIC7XXX is not set 630# CONFIG_SCSI_AIC7XXX is not set
@@ -483,11 +632,21 @@ CONFIG_SCSI_LOWLEVEL=y
483# CONFIG_SCSI_AIC79XX is not set 632# CONFIG_SCSI_AIC79XX is not set
484CONFIG_SCSI_AIC94XX=m 633CONFIG_SCSI_AIC94XX=m
485# CONFIG_AIC94XX_DEBUG is not set 634# CONFIG_AIC94XX_DEBUG is not set
635CONFIG_SCSI_MVSAS=m
636# CONFIG_SCSI_MVSAS_DEBUG is not set
637CONFIG_SCSI_DPT_I2O=m
638# CONFIG_SCSI_ADVANSYS is not set
486# CONFIG_SCSI_ARCMSR is not set 639# CONFIG_SCSI_ARCMSR is not set
487# CONFIG_MEGARAID_NEWGEN is not set 640# CONFIG_MEGARAID_NEWGEN is not set
488# CONFIG_MEGARAID_LEGACY is not set 641# CONFIG_MEGARAID_LEGACY is not set
489# CONFIG_MEGARAID_SAS is not set 642# CONFIG_MEGARAID_SAS is not set
643CONFIG_SCSI_MPT2SAS=m
644CONFIG_SCSI_MPT2SAS_MAX_SGE=128
645# CONFIG_SCSI_MPT2SAS_LOGGING is not set
490# CONFIG_SCSI_HPTIOP is not set 646# CONFIG_SCSI_HPTIOP is not set
647CONFIG_LIBFC=m
648# CONFIG_LIBFCOE is not set
649# CONFIG_FCOE is not set
491# CONFIG_SCSI_DMX3191D is not set 650# CONFIG_SCSI_DMX3191D is not set
492# CONFIG_SCSI_FUTURE_DOMAIN is not set 651# CONFIG_SCSI_FUTURE_DOMAIN is not set
493# CONFIG_SCSI_IPS is not set 652# CONFIG_SCSI_IPS is not set
@@ -502,16 +661,31 @@ CONFIG_SCSI_QLOGIC_1280=y
502# CONFIG_SCSI_DC395x is not set 661# CONFIG_SCSI_DC395x is not set
503# CONFIG_SCSI_DC390T is not set 662# CONFIG_SCSI_DC390T is not set
504# CONFIG_SCSI_DEBUG is not set 663# CONFIG_SCSI_DEBUG is not set
664CONFIG_SCSI_PMCRAID=m
665# CONFIG_SCSI_PM8001 is not set
505# CONFIG_SCSI_SRP is not set 666# CONFIG_SCSI_SRP is not set
667CONFIG_SCSI_BFA_FC=m
668CONFIG_SCSI_DH=m
669CONFIG_SCSI_DH_RDAC=m
670CONFIG_SCSI_DH_HP_SW=m
671CONFIG_SCSI_DH_EMC=m
672CONFIG_SCSI_DH_ALUA=m
673CONFIG_SCSI_OSD_INITIATOR=m
674CONFIG_SCSI_OSD_ULD=m
675CONFIG_SCSI_OSD_DPRINT_SENSE=1
676# CONFIG_SCSI_OSD_DEBUG is not set
506# CONFIG_ATA is not set 677# CONFIG_ATA is not set
507CONFIG_MD=y 678CONFIG_MD=y
508CONFIG_BLK_DEV_MD=y 679CONFIG_BLK_DEV_MD=y
680CONFIG_MD_AUTODETECT=y
509CONFIG_MD_LINEAR=m 681CONFIG_MD_LINEAR=m
510CONFIG_MD_RAID0=y 682CONFIG_MD_RAID0=y
511CONFIG_MD_RAID1=y 683CONFIG_MD_RAID1=y
512CONFIG_MD_RAID10=m 684CONFIG_MD_RAID10=m
513CONFIG_MD_RAID456=y 685CONFIG_MD_RAID456=y
514CONFIG_MD_RAID5_RESHAPE=y 686# CONFIG_MULTICORE_RAID456 is not set
687CONFIG_MD_RAID6_PQ=y
688# CONFIG_ASYNC_RAID6_TEST is not set
515CONFIG_MD_MULTIPATH=m 689CONFIG_MD_MULTIPATH=m
516CONFIG_MD_FAULTY=m 690CONFIG_MD_FAULTY=m
517CONFIG_BLK_DEV_DM=m 691CONFIG_BLK_DEV_DM=m
@@ -519,36 +693,39 @@ CONFIG_BLK_DEV_DM=m
519CONFIG_DM_CRYPT=m 693CONFIG_DM_CRYPT=m
520CONFIG_DM_SNAPSHOT=m 694CONFIG_DM_SNAPSHOT=m
521CONFIG_DM_MIRROR=m 695CONFIG_DM_MIRROR=m
696CONFIG_DM_LOG_USERSPACE=m
522CONFIG_DM_ZERO=m 697CONFIG_DM_ZERO=m
523CONFIG_DM_MULTIPATH=m 698CONFIG_DM_MULTIPATH=m
524CONFIG_DM_MULTIPATH_EMC=m 699CONFIG_DM_MULTIPATH_QL=m
525CONFIG_DM_MULTIPATH_RDAC=m 700CONFIG_DM_MULTIPATH_ST=m
526# CONFIG_DM_DELAY is not set 701# CONFIG_DM_DELAY is not set
702CONFIG_DM_UEVENT=y
703# CONFIG_FUSION is not set
527 704
528# 705#
529# Fusion MPT device support 706# IEEE 1394 (FireWire) support
530# 707#
531# CONFIG_FUSION is not set
532# CONFIG_FUSION_SPI is not set
533# CONFIG_FUSION_FC is not set
534# CONFIG_FUSION_SAS is not set
535 708
536# 709#
537# IEEE 1394 (FireWire) support 710# You can enable one or both FireWire driver stacks.
711#
712
713#
714# The newer stack is recommended.
538# 715#
539# CONFIG_FIREWIRE is not set 716# CONFIG_FIREWIRE is not set
540# CONFIG_IEEE1394 is not set 717# CONFIG_IEEE1394 is not set
541# CONFIG_I2O is not set 718# CONFIG_I2O is not set
542CONFIG_NETDEVICES=y 719CONFIG_NETDEVICES=y
543CONFIG_NETDEVICES_MULTIQUEUE=y
544CONFIG_IFB=m 720CONFIG_IFB=m
545# CONFIG_DUMMY is not set 721# CONFIG_DUMMY is not set
546# CONFIG_BONDING is not set 722# CONFIG_BONDING is not set
547CONFIG_MACVLAN=m 723CONFIG_MACVLAN=m
548# CONFIG_EQUALIZER is not set 724# CONFIG_EQUALIZER is not set
549# CONFIG_TUN is not set 725# CONFIG_TUN is not set
726CONFIG_VETH=m
550# CONFIG_ARCNET is not set 727# CONFIG_ARCNET is not set
551CONFIG_PHYLIB=m 728CONFIG_PHYLIB=y
552 729
553# 730#
554# MII PHY device drivers 731# MII PHY device drivers
@@ -562,23 +739,51 @@ CONFIG_VITESSE_PHY=m
562CONFIG_SMSC_PHY=m 739CONFIG_SMSC_PHY=m
563# CONFIG_BROADCOM_PHY is not set 740# CONFIG_BROADCOM_PHY is not set
564CONFIG_ICPLUS_PHY=m 741CONFIG_ICPLUS_PHY=m
742CONFIG_REALTEK_PHY=m
743CONFIG_NATIONAL_PHY=m
744CONFIG_STE10XP=m
745CONFIG_LSI_ET1011C_PHY=m
565# CONFIG_FIXED_PHY is not set 746# CONFIG_FIXED_PHY is not set
747CONFIG_MDIO_BITBANG=m
566CONFIG_NET_ETHERNET=y 748CONFIG_NET_ETHERNET=y
567CONFIG_MII=y 749CONFIG_MII=y
568CONFIG_AX88796=m 750CONFIG_AX88796=m
751CONFIG_AX88796_93CX6=y
569CONFIG_SGI_IOC3_ETH=y 752CONFIG_SGI_IOC3_ETH=y
570# CONFIG_HAPPYMEAL is not set 753# CONFIG_HAPPYMEAL is not set
571# CONFIG_SUNGEM is not set 754# CONFIG_SUNGEM is not set
572# CONFIG_CASSINI is not set 755# CONFIG_CASSINI is not set
573# CONFIG_NET_VENDOR_3COM is not set 756# CONFIG_NET_VENDOR_3COM is not set
757CONFIG_SMC91X=m
574# CONFIG_DM9000 is not set 758# CONFIG_DM9000 is not set
759CONFIG_ETHOC=m
760CONFIG_SMSC911X=m
761CONFIG_DNET=m
575# CONFIG_NET_TULIP is not set 762# CONFIG_NET_TULIP is not set
576# CONFIG_HP100 is not set 763# CONFIG_HP100 is not set
764# CONFIG_IBM_NEW_EMAC_ZMII is not set
765# CONFIG_IBM_NEW_EMAC_RGMII is not set
766# CONFIG_IBM_NEW_EMAC_TAH is not set
767# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
768# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
769# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
770# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
577# CONFIG_NET_PCI is not set 771# CONFIG_NET_PCI is not set
772CONFIG_B44=m
773CONFIG_B44_PCI_AUTOSELECT=y
774CONFIG_B44_PCICORE_AUTOSELECT=y
775CONFIG_B44_PCI=y
776CONFIG_KS8842=m
777CONFIG_KS8851_MLL=m
778CONFIG_ATL2=m
578CONFIG_NETDEV_1000=y 779CONFIG_NETDEV_1000=y
579# CONFIG_ACENIC is not set 780# CONFIG_ACENIC is not set
580# CONFIG_DL2K is not set 781# CONFIG_DL2K is not set
581# CONFIG_E1000 is not set 782# CONFIG_E1000 is not set
783CONFIG_E1000E=m
784CONFIG_IP1000=m
785CONFIG_IGB=m
786CONFIG_IGBVF=m
582# CONFIG_NS83820 is not set 787# CONFIG_NS83820 is not set
583# CONFIG_HAMACHI is not set 788# CONFIG_HAMACHI is not set
584# CONFIG_YELLOWFIN is not set 789# CONFIG_YELLOWFIN is not set
@@ -588,24 +793,75 @@ CONFIG_NETDEV_1000=y
588# CONFIG_SKY2 is not set 793# CONFIG_SKY2 is not set
589CONFIG_VIA_VELOCITY=m 794CONFIG_VIA_VELOCITY=m
590# CONFIG_TIGON3 is not set 795# CONFIG_TIGON3 is not set
591# CONFIG_BNX2 is not set 796CONFIG_BNX2=m
797CONFIG_CNIC=m
592CONFIG_QLA3XXX=m 798CONFIG_QLA3XXX=m
593# CONFIG_ATL1 is not set 799# CONFIG_ATL1 is not set
800CONFIG_ATL1E=m
801CONFIG_ATL1C=m
802CONFIG_JME=m
594CONFIG_NETDEV_10000=y 803CONFIG_NETDEV_10000=y
804CONFIG_MDIO=m
595# CONFIG_CHELSIO_T1 is not set 805# CONFIG_CHELSIO_T1 is not set
806CONFIG_CHELSIO_T3_DEPENDS=y
596CONFIG_CHELSIO_T3=m 807CONFIG_CHELSIO_T3=m
808CONFIG_ENIC=m
809CONFIG_IXGBE=m
597# CONFIG_IXGB is not set 810# CONFIG_IXGB is not set
598# CONFIG_S2IO is not set 811# CONFIG_S2IO is not set
812CONFIG_VXGE=m
813# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
599# CONFIG_MYRI10GE is not set 814# CONFIG_MYRI10GE is not set
600CONFIG_NETXEN_NIC=m 815CONFIG_NETXEN_NIC=m
601# CONFIG_MLX4_CORE is not set 816CONFIG_NIU=m
817CONFIG_MLX4_EN=m
818CONFIG_MLX4_CORE=m
819# CONFIG_MLX4_DEBUG is not set
820CONFIG_TEHUTI=m
821CONFIG_BNX2X=m
822CONFIG_QLGE=m
823CONFIG_SFC=m
824CONFIG_BE2NET=m
602# CONFIG_TR is not set 825# CONFIG_TR is not set
603 826CONFIG_WLAN=y
604# 827CONFIG_LIBERTAS_THINFIRM=m
605# Wireless LAN 828CONFIG_ATMEL=m
606# 829CONFIG_PCI_ATMEL=m
607# CONFIG_WLAN_PRE80211 is not set 830CONFIG_PRISM54=m
608CONFIG_WLAN_80211=y 831CONFIG_RTL8180=m
832CONFIG_ADM8211=m
833# CONFIG_MAC80211_HWSIM is not set
834CONFIG_MWL8K=m
835CONFIG_ATH_COMMON=m
836# CONFIG_ATH_DEBUG is not set
837CONFIG_ATH5K=m
838# CONFIG_ATH5K_DEBUG is not set
839CONFIG_ATH9K_HW=m
840CONFIG_ATH9K_COMMON=m
841CONFIG_ATH9K=m
842CONFIG_B43=m
843CONFIG_B43_PCI_AUTOSELECT=y
844CONFIG_B43_PCICORE_AUTOSELECT=y
845CONFIG_B43_PHY_LP=y
846CONFIG_B43_LEDS=y
847CONFIG_B43_HWRNG=y
848# CONFIG_B43_DEBUG is not set
849CONFIG_B43LEGACY=m
850CONFIG_B43LEGACY_PCI_AUTOSELECT=y
851CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
852CONFIG_B43LEGACY_LEDS=y
853CONFIG_B43LEGACY_HWRNG=y
854# CONFIG_B43LEGACY_DEBUG is not set
855CONFIG_B43LEGACY_DMA=y
856CONFIG_B43LEGACY_PIO=y
857CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
858# CONFIG_B43LEGACY_DMA_MODE is not set
859# CONFIG_B43LEGACY_PIO_MODE is not set
860CONFIG_HOSTAP=m
861CONFIG_HOSTAP_FIRMWARE=y
862CONFIG_HOSTAP_FIRMWARE_NVRAM=y
863CONFIG_HOSTAP_PLX=m
864CONFIG_HOSTAP_PCI=m
609CONFIG_IPW2100=m 865CONFIG_IPW2100=m
610CONFIG_IPW2100_MONITOR=y 866CONFIG_IPW2100_MONITOR=y
611CONFIG_IPW2100_DEBUG=y 867CONFIG_IPW2100_DEBUG=y
@@ -615,38 +871,57 @@ CONFIG_IPW2200_RADIOTAP=y
615CONFIG_IPW2200_PROMISCUOUS=y 871CONFIG_IPW2200_PROMISCUOUS=y
616CONFIG_IPW2200_QOS=y 872CONFIG_IPW2200_QOS=y
617CONFIG_IPW2200_DEBUG=y 873CONFIG_IPW2200_DEBUG=y
874CONFIG_LIBIPW=m
875# CONFIG_LIBIPW_DEBUG is not set
876CONFIG_IWLWIFI=m
877CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT=y
878# CONFIG_IWLWIFI_DEBUG is not set
879CONFIG_IWLAGN=m
880CONFIG_IWL4965=y
881CONFIG_IWL5000=y
882CONFIG_IWL3945=m
883CONFIG_IWL3945_SPECTRUM_MEASUREMENT=y
618CONFIG_LIBERTAS=m 884CONFIG_LIBERTAS=m
619# CONFIG_LIBERTAS_DEBUG is not set 885# CONFIG_LIBERTAS_DEBUG is not set
620CONFIG_HERMES=m 886CONFIG_HERMES=m
887# CONFIG_HERMES_CACHE_FW_ON_INIT is not set
621CONFIG_PLX_HERMES=m 888CONFIG_PLX_HERMES=m
622CONFIG_TMD_HERMES=m 889CONFIG_TMD_HERMES=m
623CONFIG_NORTEL_HERMES=m 890CONFIG_NORTEL_HERMES=m
624CONFIG_PCI_HERMES=m 891CONFIG_PCI_HERMES=m
625CONFIG_ATMEL=m 892CONFIG_P54_COMMON=m
626CONFIG_PCI_ATMEL=m 893CONFIG_P54_PCI=m
627CONFIG_PRISM54=m 894CONFIG_P54_LEDS=y
628CONFIG_HOSTAP=m 895CONFIG_RT2X00=m
629CONFIG_HOSTAP_FIRMWARE=y 896CONFIG_RT2400PCI=m
630CONFIG_HOSTAP_FIRMWARE_NVRAM=y 897CONFIG_RT2500PCI=m
631CONFIG_HOSTAP_PLX=m 898CONFIG_RT61PCI=m
632CONFIG_HOSTAP_PCI=m 899CONFIG_RT2800PCI_PCI=m
633CONFIG_BCM43XX=m 900CONFIG_RT2800PCI=m
634CONFIG_BCM43XX_DEBUG=y 901CONFIG_RT2800_LIB=m
635CONFIG_BCM43XX_DMA=y 902CONFIG_RT2X00_LIB_PCI=m
636CONFIG_BCM43XX_PIO=y 903CONFIG_RT2X00_LIB=m
637CONFIG_BCM43XX_DMA_AND_PIO_MODE=y 904CONFIG_RT2X00_LIB_HT=y
638# CONFIG_BCM43XX_DMA_MODE is not set 905CONFIG_RT2X00_LIB_FIRMWARE=y
639# CONFIG_BCM43XX_PIO_MODE is not set 906CONFIG_RT2X00_LIB_CRYPTO=y
907CONFIG_RT2X00_LIB_LEDS=y
908# CONFIG_RT2X00_DEBUG is not set
909CONFIG_WL12XX=m
910CONFIG_WL1251=m
911
912#
913# Enable WiMAX (Networking options) to see the WiMAX drivers
914#
640# CONFIG_WAN is not set 915# CONFIG_WAN is not set
641# CONFIG_FDDI is not set 916# CONFIG_FDDI is not set
642# CONFIG_HIPPI is not set 917# CONFIG_HIPPI is not set
643# CONFIG_PPP is not set 918# CONFIG_PPP is not set
644# CONFIG_SLIP is not set 919# CONFIG_SLIP is not set
645# CONFIG_NET_FC is not set 920# CONFIG_NET_FC is not set
646# CONFIG_SHAPER is not set
647# CONFIG_NETCONSOLE is not set 921# CONFIG_NETCONSOLE is not set
648# CONFIG_NETPOLL is not set 922# CONFIG_NETPOLL is not set
649# CONFIG_NET_POLL_CONTROLLER is not set 923# CONFIG_NET_POLL_CONTROLLER is not set
924# CONFIG_VMXNET3 is not set
650# CONFIG_ISDN is not set 925# CONFIG_ISDN is not set
651# CONFIG_PHONE is not set 926# CONFIG_PHONE is not set
652 927
@@ -664,13 +939,16 @@ CONFIG_SERIO_SERPORT=y
664# CONFIG_SERIO_PCIPS2 is not set 939# CONFIG_SERIO_PCIPS2 is not set
665CONFIG_SERIO_LIBPS2=m 940CONFIG_SERIO_LIBPS2=m
666CONFIG_SERIO_RAW=m 941CONFIG_SERIO_RAW=m
942CONFIG_SERIO_ALTERA_PS2=m
667# CONFIG_GAMEPORT is not set 943# CONFIG_GAMEPORT is not set
668 944
669# 945#
670# Character devices 946# Character devices
671# 947#
672# CONFIG_VT is not set 948# CONFIG_VT is not set
949CONFIG_DEVKMEM=y
673# CONFIG_SERIAL_NONSTANDARD is not set 950# CONFIG_SERIAL_NONSTANDARD is not set
951CONFIG_NOZOMI=m
674 952
675# 953#
676# Serial drivers 954# Serial drivers
@@ -693,95 +971,258 @@ CONFIG_SERIAL_CORE=y
693CONFIG_SERIAL_CORE_CONSOLE=y 971CONFIG_SERIAL_CORE_CONSOLE=y
694# CONFIG_SERIAL_JSM is not set 972# CONFIG_SERIAL_JSM is not set
695CONFIG_UNIX98_PTYS=y 973CONFIG_UNIX98_PTYS=y
974CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
696CONFIG_LEGACY_PTYS=y 975CONFIG_LEGACY_PTYS=y
697CONFIG_LEGACY_PTY_COUNT=256 976CONFIG_LEGACY_PTY_COUNT=256
698# CONFIG_IPMI_HANDLER is not set 977# CONFIG_IPMI_HANDLER is not set
699# CONFIG_WATCHDOG is not set
700CONFIG_HW_RANDOM=m 978CONFIG_HW_RANDOM=m
701# CONFIG_RTC is not set 979CONFIG_HW_RANDOM_TIMERIOMEM=m
702# CONFIG_R3964 is not set 980# CONFIG_R3964 is not set
703# CONFIG_APPLICOM is not set 981# CONFIG_APPLICOM is not set
704# CONFIG_DRM is not set
705# CONFIG_RAW_DRIVER is not set 982# CONFIG_RAW_DRIVER is not set
706# CONFIG_TCG_TPM is not set 983# CONFIG_TCG_TPM is not set
707CONFIG_DEVPORT=y 984CONFIG_DEVPORT=y
708# CONFIG_I2C is not set 985CONFIG_I2C=m
986CONFIG_I2C_BOARDINFO=y
987CONFIG_I2C_COMPAT=y
988CONFIG_I2C_CHARDEV=m
989CONFIG_I2C_HELPER_AUTO=y
990CONFIG_I2C_ALGOBIT=m
991CONFIG_I2C_ALGOPCA=m
992
993#
994# I2C Hardware Bus support
995#
996
997#
998# PC SMBus host controller drivers
999#
1000CONFIG_I2C_ALI1535=m
1001CONFIG_I2C_ALI1563=m
1002CONFIG_I2C_ALI15X3=m
1003CONFIG_I2C_AMD756=m
1004CONFIG_I2C_AMD8111=m
1005CONFIG_I2C_I801=m
1006CONFIG_I2C_ISCH=m
1007CONFIG_I2C_PIIX4=m
1008CONFIG_I2C_NFORCE2=m
1009CONFIG_I2C_SIS5595=m
1010CONFIG_I2C_SIS630=m
1011CONFIG_I2C_SIS96X=m
1012CONFIG_I2C_VIA=m
1013CONFIG_I2C_VIAPRO=m
709 1014
710# 1015#
711# SPI support 1016# I2C system bus drivers (mostly embedded / system-on-chip)
712# 1017#
1018CONFIG_I2C_OCORES=m
1019CONFIG_I2C_SIMTEC=m
1020
1021#
1022# External I2C/SMBus adapter drivers
1023#
1024CONFIG_I2C_PARPORT_LIGHT=m
1025CONFIG_I2C_TAOS_EVM=m
1026
1027#
1028# Other I2C/SMBus bus drivers
1029#
1030CONFIG_I2C_PCA_PLATFORM=m
1031CONFIG_I2C_STUB=m
1032
1033#
1034# Miscellaneous I2C Chip support
1035#
1036CONFIG_SENSORS_TSL2550=m
1037# CONFIG_I2C_DEBUG_CORE is not set
1038# CONFIG_I2C_DEBUG_ALGO is not set
1039# CONFIG_I2C_DEBUG_BUS is not set
1040# CONFIG_I2C_DEBUG_CHIP is not set
713# CONFIG_SPI is not set 1041# CONFIG_SPI is not set
714# CONFIG_SPI_MASTER is not set 1042
1043#
1044# PPS support
1045#
1046CONFIG_PPS=m
1047# CONFIG_PPS_DEBUG is not set
715# CONFIG_W1 is not set 1048# CONFIG_W1 is not set
716# CONFIG_POWER_SUPPLY is not set 1049# CONFIG_POWER_SUPPLY is not set
717# CONFIG_HWMON is not set 1050# CONFIG_HWMON is not set
1051CONFIG_THERMAL=m
1052# CONFIG_WATCHDOG is not set
1053CONFIG_SSB_POSSIBLE=y
718 1054
719# 1055#
720# Multifunction device drivers 1056# Sonics Silicon Backplane
721# 1057#
722# CONFIG_MFD_SM501 is not set 1058CONFIG_SSB=m
1059CONFIG_SSB_SPROM=y
1060CONFIG_SSB_PCIHOST_POSSIBLE=y
1061CONFIG_SSB_PCIHOST=y
1062CONFIG_SSB_B43_PCI_BRIDGE=y
1063# CONFIG_SSB_SILENT is not set
1064# CONFIG_SSB_DEBUG is not set
1065CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
1066CONFIG_SSB_DRIVER_PCICORE=y
1067# CONFIG_SSB_DRIVER_MIPS is not set
723 1068
724# 1069#
725# Multimedia devices 1070# Multifunction device drivers
726# 1071#
727# CONFIG_VIDEO_DEV is not set 1072# CONFIG_MFD_CORE is not set
728# CONFIG_DVB_CORE is not set 1073# CONFIG_MFD_SM501 is not set
729# CONFIG_DAB is not set 1074# CONFIG_HTC_PASIC3 is not set
1075# CONFIG_MFD_TMIO is not set
1076# CONFIG_MFD_WM8400 is not set
1077CONFIG_MFD_WM8350=m
1078CONFIG_MFD_WM8350_I2C=m
1079CONFIG_MFD_PCF50633=m
1080CONFIG_PCF50633_ADC=m
1081CONFIG_PCF50633_GPIO=m
1082CONFIG_AB3100_CORE=m
1083CONFIG_AB3100_OTP=m
1084# CONFIG_REGULATOR is not set
1085# CONFIG_MEDIA_SUPPORT is not set
730 1086
731# 1087#
732# Graphics support 1088# Graphics support
733# 1089#
734# CONFIG_BACKLIGHT_LCD_SUPPORT is not set 1090# CONFIG_VGA_ARB is not set
735 1091# CONFIG_DRM is not set
736#
737# Display device support
738#
739# CONFIG_DISPLAY_SUPPORT is not set
740# CONFIG_VGASTATE is not set 1092# CONFIG_VGASTATE is not set
741# CONFIG_VIDEO_OUTPUT_CONTROL is not set 1093# CONFIG_VIDEO_OUTPUT_CONTROL is not set
742# CONFIG_FB is not set 1094# CONFIG_FB is not set
1095# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
743 1096
744# 1097#
745# Sound 1098# Display device support
746# 1099#
1100# CONFIG_DISPLAY_SUPPORT is not set
747# CONFIG_SOUND is not set 1101# CONFIG_SOUND is not set
748CONFIG_USB_SUPPORT=y 1102CONFIG_USB_SUPPORT=y
749CONFIG_USB_ARCH_HAS_HCD=y 1103CONFIG_USB_ARCH_HAS_HCD=y
750CONFIG_USB_ARCH_HAS_OHCI=y 1104CONFIG_USB_ARCH_HAS_OHCI=y
751CONFIG_USB_ARCH_HAS_EHCI=y 1105CONFIG_USB_ARCH_HAS_EHCI=y
752# CONFIG_USB is not set 1106# CONFIG_USB is not set
1107# CONFIG_USB_OTG_WHITELIST is not set
1108# CONFIG_USB_OTG_BLACKLIST_HUB is not set
753 1109
754# 1110#
755# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' 1111# Enable Host or Gadget support to see Inventra options
756# 1112#
757 1113
758# 1114#
759# USB Gadget Support 1115# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
760# 1116#
761# CONFIG_USB_GADGET is not set 1117# CONFIG_USB_GADGET is not set
1118
1119#
1120# OTG and related infrastructure
1121#
1122# CONFIG_UWB is not set
762# CONFIG_MMC is not set 1123# CONFIG_MMC is not set
763# CONFIG_NEW_LEDS is not set 1124# CONFIG_MEMSTICK is not set
764# CONFIG_INFINIBAND is not set 1125CONFIG_NEW_LEDS=y
765# CONFIG_RTC_CLASS is not set 1126CONFIG_LEDS_CLASS=m
1127
1128#
1129# LED drivers
1130#
1131CONFIG_LEDS_LP3944=m
1132CONFIG_LEDS_PCA955X=m
1133CONFIG_LEDS_WM8350=m
1134CONFIG_LEDS_BD2802=m
1135
1136#
1137# LED Triggers
1138#
1139CONFIG_LEDS_TRIGGERS=y
1140CONFIG_LEDS_TRIGGER_TIMER=m
1141CONFIG_LEDS_TRIGGER_HEARTBEAT=m
1142CONFIG_LEDS_TRIGGER_BACKLIGHT=m
1143CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
766 1144
767# 1145#
768# DMA Engine support 1146# iptables trigger is under Netfilter config (LED target)
769# 1147#
770# CONFIG_DMA_ENGINE is not set 1148# CONFIG_ACCESSIBILITY is not set
1149# CONFIG_INFINIBAND is not set
1150CONFIG_RTC_LIB=y
1151CONFIG_RTC_CLASS=y
1152CONFIG_RTC_HCTOSYS=y
1153CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
1154# CONFIG_RTC_DEBUG is not set
771 1155
772# 1156#
773# DMA Clients 1157# RTC interfaces
774# 1158#
1159CONFIG_RTC_INTF_SYSFS=y
1160CONFIG_RTC_INTF_PROC=y
1161CONFIG_RTC_INTF_DEV=y
1162# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
1163# CONFIG_RTC_DRV_TEST is not set
775 1164
776# 1165#
777# DMA Devices 1166# I2C RTC drivers
1167#
1168# CONFIG_RTC_DRV_DS1307 is not set
1169# CONFIG_RTC_DRV_DS1374 is not set
1170# CONFIG_RTC_DRV_DS1672 is not set
1171# CONFIG_RTC_DRV_MAX6900 is not set
1172# CONFIG_RTC_DRV_RS5C372 is not set
1173# CONFIG_RTC_DRV_ISL1208 is not set
1174# CONFIG_RTC_DRV_X1205 is not set
1175# CONFIG_RTC_DRV_PCF8563 is not set
1176# CONFIG_RTC_DRV_PCF8583 is not set
1177# CONFIG_RTC_DRV_M41T80 is not set
1178# CONFIG_RTC_DRV_BQ32K is not set
1179# CONFIG_RTC_DRV_S35390A is not set
1180# CONFIG_RTC_DRV_FM3130 is not set
1181# CONFIG_RTC_DRV_RX8581 is not set
1182# CONFIG_RTC_DRV_RX8025 is not set
1183
1184#
1185# SPI RTC drivers
1186#
1187
778# 1188#
1189# Platform RTC drivers
1190#
1191# CONFIG_RTC_DRV_CMOS is not set
1192# CONFIG_RTC_DRV_DS1286 is not set
1193# CONFIG_RTC_DRV_DS1511 is not set
1194# CONFIG_RTC_DRV_DS1553 is not set
1195# CONFIG_RTC_DRV_DS1742 is not set
1196# CONFIG_RTC_DRV_STK17TA8 is not set
1197# CONFIG_RTC_DRV_M48T86 is not set
1198CONFIG_RTC_DRV_M48T35=y
1199# CONFIG_RTC_DRV_M48T59 is not set
1200# CONFIG_RTC_DRV_MSM6242 is not set
1201# CONFIG_RTC_DRV_BQ4802 is not set
1202# CONFIG_RTC_DRV_RP5C01 is not set
1203# CONFIG_RTC_DRV_V3020 is not set
1204# CONFIG_RTC_DRV_WM8350 is not set
1205# CONFIG_RTC_DRV_PCF50633 is not set
1206CONFIG_RTC_DRV_AB3100=m
779 1207
780# 1208#
781# Userspace I/O 1209# on-CPU RTC drivers
782# 1210#
1211# CONFIG_DMADEVICES is not set
1212# CONFIG_AUXDISPLAY is not set
783CONFIG_UIO=y 1213CONFIG_UIO=y
784# CONFIG_UIO_CIF is not set 1214# CONFIG_UIO_CIF is not set
1215# CONFIG_UIO_PDRV is not set
1216# CONFIG_UIO_PDRV_GENIRQ is not set
1217CONFIG_UIO_SMX=m
1218CONFIG_UIO_AEC=m
1219CONFIG_UIO_SERCOS3=m
1220CONFIG_UIO_PCI_GENERIC=m
1221
1222#
1223# TI VLYNQ
1224#
1225# CONFIG_STAGING is not set
785 1226
786# 1227#
787# File systems 1228# File systems
@@ -792,36 +1233,58 @@ CONFIG_EXT2_FS_POSIX_ACL=y
792CONFIG_EXT2_FS_SECURITY=y 1233CONFIG_EXT2_FS_SECURITY=y
793# CONFIG_EXT2_FS_XIP is not set 1234# CONFIG_EXT2_FS_XIP is not set
794CONFIG_EXT3_FS=y 1235CONFIG_EXT3_FS=y
1236# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
795CONFIG_EXT3_FS_XATTR=y 1237CONFIG_EXT3_FS_XATTR=y
796CONFIG_EXT3_FS_POSIX_ACL=y 1238CONFIG_EXT3_FS_POSIX_ACL=y
797CONFIG_EXT3_FS_SECURITY=y 1239CONFIG_EXT3_FS_SECURITY=y
798# CONFIG_EXT4DEV_FS is not set 1240CONFIG_EXT4_FS=y
1241CONFIG_EXT4_FS_XATTR=y
1242CONFIG_EXT4_FS_POSIX_ACL=y
1243CONFIG_EXT4_FS_SECURITY=y
1244# CONFIG_EXT4_DEBUG is not set
799CONFIG_JBD=y 1245CONFIG_JBD=y
800CONFIG_JBD_DEBUG=y 1246CONFIG_JBD2=y
801CONFIG_FS_MBCACHE=y 1247CONFIG_FS_MBCACHE=y
802# CONFIG_REISERFS_FS is not set 1248# CONFIG_REISERFS_FS is not set
803# CONFIG_JFS_FS is not set 1249# CONFIG_JFS_FS is not set
804CONFIG_FS_POSIX_ACL=y 1250CONFIG_FS_POSIX_ACL=y
805CONFIG_XFS_FS=m 1251CONFIG_XFS_FS=m
806CONFIG_XFS_QUOTA=y 1252CONFIG_XFS_QUOTA=y
807CONFIG_XFS_SECURITY=y
808CONFIG_XFS_POSIX_ACL=y 1253CONFIG_XFS_POSIX_ACL=y
809# CONFIG_XFS_RT is not set 1254# CONFIG_XFS_RT is not set
1255# CONFIG_XFS_DEBUG is not set
810# CONFIG_GFS2_FS is not set 1256# CONFIG_GFS2_FS is not set
811# CONFIG_OCFS2_FS is not set 1257# CONFIG_OCFS2_FS is not set
812# CONFIG_MINIX_FS is not set 1258CONFIG_BTRFS_FS=m
813# CONFIG_ROMFS_FS is not set 1259CONFIG_BTRFS_FS_POSIX_ACL=y
1260# CONFIG_NILFS2_FS is not set
1261CONFIG_FILE_LOCKING=y
1262CONFIG_FSNOTIFY=y
1263CONFIG_DNOTIFY=y
814CONFIG_INOTIFY=y 1264CONFIG_INOTIFY=y
815CONFIG_INOTIFY_USER=y 1265CONFIG_INOTIFY_USER=y
816# CONFIG_QUOTA is not set 1266# CONFIG_QUOTA is not set
1267CONFIG_QUOTA_NETLINK_INTERFACE=y
817CONFIG_QUOTACTL=y 1268CONFIG_QUOTACTL=y
818CONFIG_DNOTIFY=y
819CONFIG_AUTOFS_FS=m 1269CONFIG_AUTOFS_FS=m
820# CONFIG_AUTOFS4_FS is not set 1270# CONFIG_AUTOFS4_FS is not set
821CONFIG_FUSE_FS=m 1271CONFIG_FUSE_FS=m
1272CONFIG_CUSE=m
822CONFIG_GENERIC_ACL=y 1273CONFIG_GENERIC_ACL=y
823 1274
824# 1275#
1276# Caches
1277#
1278CONFIG_FSCACHE=m
1279CONFIG_FSCACHE_STATS=y
1280# CONFIG_FSCACHE_HISTOGRAM is not set
1281# CONFIG_FSCACHE_DEBUG is not set
1282# CONFIG_FSCACHE_OBJECT_LIST is not set
1283CONFIG_CACHEFILES=m
1284# CONFIG_CACHEFILES_DEBUG is not set
1285# CONFIG_CACHEFILES_HISTOGRAM is not set
1286
1287#
825# CD-ROM/DVD Filesystems 1288# CD-ROM/DVD Filesystems
826# 1289#
827# CONFIG_ISO9660_FS is not set 1290# CONFIG_ISO9660_FS is not set
@@ -840,16 +1303,13 @@ CONFIG_GENERIC_ACL=y
840CONFIG_PROC_FS=y 1303CONFIG_PROC_FS=y
841CONFIG_PROC_KCORE=y 1304CONFIG_PROC_KCORE=y
842CONFIG_PROC_SYSCTL=y 1305CONFIG_PROC_SYSCTL=y
1306CONFIG_PROC_PAGE_MONITOR=y
843CONFIG_SYSFS=y 1307CONFIG_SYSFS=y
844CONFIG_TMPFS=y 1308CONFIG_TMPFS=y
845CONFIG_TMPFS_POSIX_ACL=y 1309CONFIG_TMPFS_POSIX_ACL=y
846# CONFIG_HUGETLB_PAGE is not set 1310# CONFIG_HUGETLB_PAGE is not set
847CONFIG_RAMFS=y
848CONFIG_CONFIGFS_FS=m 1311CONFIG_CONFIGFS_FS=m
849 1312CONFIG_MISC_FILESYSTEMS=y
850#
851# Miscellaneous filesystems
852#
853# CONFIG_ADFS_FS is not set 1313# CONFIG_ADFS_FS is not set
854# CONFIG_AFFS_FS is not set 1314# CONFIG_AFFS_FS is not set
855# CONFIG_ECRYPT_FS is not set 1315# CONFIG_ECRYPT_FS is not set
@@ -859,28 +1319,32 @@ CONFIG_CONFIGFS_FS=m
859# CONFIG_BFS_FS is not set 1319# CONFIG_BFS_FS is not set
860# CONFIG_EFS_FS is not set 1320# CONFIG_EFS_FS is not set
861# CONFIG_CRAMFS is not set 1321# CONFIG_CRAMFS is not set
1322CONFIG_SQUASHFS=m
1323# CONFIG_SQUASHFS_EMBEDDED is not set
1324CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
862# CONFIG_VXFS_FS is not set 1325# CONFIG_VXFS_FS is not set
1326# CONFIG_MINIX_FS is not set
1327CONFIG_OMFS_FS=m
863# CONFIG_HPFS_FS is not set 1328# CONFIG_HPFS_FS is not set
864# CONFIG_QNX4FS_FS is not set 1329# CONFIG_QNX4FS_FS is not set
1330# CONFIG_ROMFS_FS is not set
865# CONFIG_SYSV_FS is not set 1331# CONFIG_SYSV_FS is not set
866# CONFIG_UFS_FS is not set 1332# CONFIG_UFS_FS is not set
867 1333CONFIG_EXOFS_FS=m
868# 1334# CONFIG_EXOFS_DEBUG is not set
869# Network File Systems 1335CONFIG_NETWORK_FILESYSTEMS=y
870#
871CONFIG_NFS_FS=y 1336CONFIG_NFS_FS=y
872CONFIG_NFS_V3=y 1337CONFIG_NFS_V3=y
873# CONFIG_NFS_V3_ACL is not set 1338# CONFIG_NFS_V3_ACL is not set
874# CONFIG_NFS_V4 is not set 1339# CONFIG_NFS_V4 is not set
875# CONFIG_NFS_DIRECTIO is not set
876# CONFIG_NFSD is not set
877# CONFIG_ROOT_NFS is not set 1340# CONFIG_ROOT_NFS is not set
1341# CONFIG_NFSD is not set
878CONFIG_LOCKD=y 1342CONFIG_LOCKD=y
879CONFIG_LOCKD_V4=y 1343CONFIG_LOCKD_V4=y
1344CONFIG_EXPORTFS=m
880CONFIG_NFS_COMMON=y 1345CONFIG_NFS_COMMON=y
881CONFIG_SUNRPC=y 1346CONFIG_SUNRPC=y
882CONFIG_SUNRPC_GSS=y 1347CONFIG_SUNRPC_GSS=y
883# CONFIG_SUNRPC_BIND34 is not set
884CONFIG_RPCSEC_GSS_KRB5=y 1348CONFIG_RPCSEC_GSS_KRB5=y
885# CONFIG_RPCSEC_GSS_SPKM3 is not set 1349# CONFIG_RPCSEC_GSS_SPKM3 is not set
886# CONFIG_SMB_FS is not set 1350# CONFIG_SMB_FS is not set
@@ -910,35 +1374,37 @@ CONFIG_SGI_PARTITION=y
910# CONFIG_KARMA_PARTITION is not set 1374# CONFIG_KARMA_PARTITION is not set
911# CONFIG_EFI_PARTITION is not set 1375# CONFIG_EFI_PARTITION is not set
912# CONFIG_SYSV68_PARTITION is not set 1376# CONFIG_SYSV68_PARTITION is not set
913
914#
915# Native Language Support
916#
917# CONFIG_NLS is not set 1377# CONFIG_NLS is not set
918
919#
920# Distributed Lock Manager
921#
922CONFIG_DLM=m 1378CONFIG_DLM=m
923# CONFIG_DLM_DEBUG is not set 1379# CONFIG_DLM_DEBUG is not set
924 1380
925# 1381#
926# Profiling support
927#
928# CONFIG_PROFILING is not set
929
930#
931# Kernel hacking 1382# Kernel hacking
932# 1383#
933CONFIG_TRACE_IRQFLAGS_SUPPORT=y 1384CONFIG_TRACE_IRQFLAGS_SUPPORT=y
934# CONFIG_PRINTK_TIME is not set 1385# CONFIG_PRINTK_TIME is not set
1386CONFIG_ENABLE_WARN_DEPRECATED=y
935CONFIG_ENABLE_MUST_CHECK=y 1387CONFIG_ENABLE_MUST_CHECK=y
1388CONFIG_FRAME_WARN=2048
936# CONFIG_MAGIC_SYSRQ is not set 1389# CONFIG_MAGIC_SYSRQ is not set
1390# CONFIG_STRIP_ASM_SYMS is not set
937# CONFIG_UNUSED_SYMBOLS is not set 1391# CONFIG_UNUSED_SYMBOLS is not set
938# CONFIG_DEBUG_FS is not set 1392# CONFIG_DEBUG_FS is not set
939# CONFIG_HEADERS_CHECK is not set 1393# CONFIG_HEADERS_CHECK is not set
940# CONFIG_DEBUG_KERNEL is not set 1394# CONFIG_DEBUG_KERNEL is not set
941CONFIG_CROSSCOMPILE=y 1395# CONFIG_DEBUG_MEMORY_INIT is not set
1396# CONFIG_RCU_CPU_STALL_DETECTOR is not set
1397# CONFIG_SYSCTL_SYSCALL_CHECK is not set
1398CONFIG_HAVE_FUNCTION_TRACER=y
1399CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
1400CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
1401CONFIG_HAVE_DYNAMIC_FTRACE=y
1402CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
1403CONFIG_TRACING_SUPPORT=y
1404# CONFIG_FTRACE is not set
1405# CONFIG_SAMPLES is not set
1406CONFIG_HAVE_ARCH_KGDB=y
1407CONFIG_EARLY_PRINTK=y
942# CONFIG_CMDLINE_BOOL is not set 1408# CONFIG_CMDLINE_BOOL is not set
943 1409
944# 1410#
@@ -947,65 +1413,140 @@ CONFIG_CROSSCOMPILE=y
947CONFIG_KEYS=y 1413CONFIG_KEYS=y
948CONFIG_KEYS_DEBUG_PROC_KEYS=y 1414CONFIG_KEYS_DEBUG_PROC_KEYS=y
949# CONFIG_SECURITY is not set 1415# CONFIG_SECURITY is not set
950CONFIG_XOR_BLOCKS=m 1416CONFIG_SECURITYFS=y
951CONFIG_ASYNC_CORE=m 1417# CONFIG_DEFAULT_SECURITY_SELINUX is not set
952CONFIG_ASYNC_MEMCPY=m 1418# CONFIG_DEFAULT_SECURITY_SMACK is not set
953CONFIG_ASYNC_XOR=m 1419# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
1420CONFIG_DEFAULT_SECURITY_DAC=y
1421CONFIG_DEFAULT_SECURITY=""
1422CONFIG_XOR_BLOCKS=y
1423CONFIG_ASYNC_CORE=y
1424CONFIG_ASYNC_MEMCPY=y
1425CONFIG_ASYNC_XOR=y
1426CONFIG_ASYNC_PQ=y
1427CONFIG_ASYNC_RAID6_RECOV=y
954CONFIG_CRYPTO=y 1428CONFIG_CRYPTO=y
1429
1430#
1431# Crypto core or helper
1432#
1433CONFIG_CRYPTO_FIPS=y
955CONFIG_CRYPTO_ALGAPI=y 1434CONFIG_CRYPTO_ALGAPI=y
956CONFIG_CRYPTO_ABLKCIPHER=m 1435CONFIG_CRYPTO_ALGAPI2=y
1436CONFIG_CRYPTO_AEAD=m
1437CONFIG_CRYPTO_AEAD2=y
957CONFIG_CRYPTO_BLKCIPHER=y 1438CONFIG_CRYPTO_BLKCIPHER=y
1439CONFIG_CRYPTO_BLKCIPHER2=y
958CONFIG_CRYPTO_HASH=y 1440CONFIG_CRYPTO_HASH=y
1441CONFIG_CRYPTO_HASH2=y
1442CONFIG_CRYPTO_RNG=m
1443CONFIG_CRYPTO_RNG2=y
1444CONFIG_CRYPTO_PCOMP=y
959CONFIG_CRYPTO_MANAGER=y 1445CONFIG_CRYPTO_MANAGER=y
1446CONFIG_CRYPTO_MANAGER2=y
1447CONFIG_CRYPTO_GF128MUL=m
1448CONFIG_CRYPTO_NULL=m
1449CONFIG_CRYPTO_WORKQUEUE=y
1450CONFIG_CRYPTO_CRYPTD=m
1451CONFIG_CRYPTO_AUTHENC=m
1452# CONFIG_CRYPTO_TEST is not set
1453
1454#
1455# Authenticated Encryption with Associated Data
1456#
1457CONFIG_CRYPTO_CCM=m
1458CONFIG_CRYPTO_GCM=m
1459CONFIG_CRYPTO_SEQIV=m
1460
1461#
1462# Block modes
1463#
1464CONFIG_CRYPTO_CBC=y
1465CONFIG_CRYPTO_CTR=m
1466CONFIG_CRYPTO_CTS=m
1467CONFIG_CRYPTO_ECB=m
1468CONFIG_CRYPTO_LRW=m
1469CONFIG_CRYPTO_PCBC=m
1470CONFIG_CRYPTO_XTS=m
1471
1472#
1473# Hash modes
1474#
960CONFIG_CRYPTO_HMAC=y 1475CONFIG_CRYPTO_HMAC=y
961CONFIG_CRYPTO_XCBC=m 1476CONFIG_CRYPTO_XCBC=m
962CONFIG_CRYPTO_NULL=m 1477CONFIG_CRYPTO_VMAC=m
1478
1479#
1480# Digest
1481#
1482CONFIG_CRYPTO_CRC32C=m
1483CONFIG_CRYPTO_GHASH=m
963CONFIG_CRYPTO_MD4=m 1484CONFIG_CRYPTO_MD4=m
964CONFIG_CRYPTO_MD5=y 1485CONFIG_CRYPTO_MD5=y
1486CONFIG_CRYPTO_MICHAEL_MIC=m
1487CONFIG_CRYPTO_RMD128=m
1488CONFIG_CRYPTO_RMD160=m
1489CONFIG_CRYPTO_RMD256=m
1490CONFIG_CRYPTO_RMD320=m
965CONFIG_CRYPTO_SHA1=m 1491CONFIG_CRYPTO_SHA1=m
966CONFIG_CRYPTO_SHA256=m 1492CONFIG_CRYPTO_SHA256=m
967CONFIG_CRYPTO_SHA512=m 1493CONFIG_CRYPTO_SHA512=m
968CONFIG_CRYPTO_WP512=m
969CONFIG_CRYPTO_TGR192=m 1494CONFIG_CRYPTO_TGR192=m
970CONFIG_CRYPTO_GF128MUL=m 1495CONFIG_CRYPTO_WP512=m
971CONFIG_CRYPTO_ECB=m 1496
972CONFIG_CRYPTO_CBC=y 1497#
973CONFIG_CRYPTO_PCBC=m 1498# Ciphers
974CONFIG_CRYPTO_LRW=m 1499#
975CONFIG_CRYPTO_CRYPTD=m
976CONFIG_CRYPTO_DES=y
977CONFIG_CRYPTO_FCRYPT=m
978CONFIG_CRYPTO_BLOWFISH=m
979CONFIG_CRYPTO_TWOFISH=m
980CONFIG_CRYPTO_TWOFISH_COMMON=m
981CONFIG_CRYPTO_SERPENT=m
982CONFIG_CRYPTO_AES=m 1500CONFIG_CRYPTO_AES=m
1501CONFIG_CRYPTO_ANUBIS=m
1502CONFIG_CRYPTO_ARC4=m
1503CONFIG_CRYPTO_BLOWFISH=m
1504CONFIG_CRYPTO_CAMELLIA=m
983CONFIG_CRYPTO_CAST5=m 1505CONFIG_CRYPTO_CAST5=m
984CONFIG_CRYPTO_CAST6=m 1506CONFIG_CRYPTO_CAST6=m
985CONFIG_CRYPTO_TEA=m 1507CONFIG_CRYPTO_DES=y
986CONFIG_CRYPTO_ARC4=m 1508CONFIG_CRYPTO_FCRYPT=m
987CONFIG_CRYPTO_KHAZAD=m 1509CONFIG_CRYPTO_KHAZAD=m
988CONFIG_CRYPTO_ANUBIS=m 1510CONFIG_CRYPTO_SALSA20=m
1511CONFIG_CRYPTO_SEED=m
1512CONFIG_CRYPTO_SERPENT=m
1513CONFIG_CRYPTO_TEA=m
1514CONFIG_CRYPTO_TWOFISH=m
1515CONFIG_CRYPTO_TWOFISH_COMMON=m
1516
1517#
1518# Compression
1519#
989CONFIG_CRYPTO_DEFLATE=m 1520CONFIG_CRYPTO_DEFLATE=m
990CONFIG_CRYPTO_MICHAEL_MIC=m 1521CONFIG_CRYPTO_ZLIB=m
991CONFIG_CRYPTO_CRC32C=m 1522CONFIG_CRYPTO_LZO=m
992CONFIG_CRYPTO_CAMELLIA=m 1523
993# CONFIG_CRYPTO_TEST is not set 1524#
1525# Random Number Generation
1526#
1527CONFIG_CRYPTO_ANSI_CPRNG=m
994CONFIG_CRYPTO_HW=y 1528CONFIG_CRYPTO_HW=y
1529CONFIG_CRYPTO_DEV_HIFN_795X=m
1530# CONFIG_CRYPTO_DEV_HIFN_795X_RNG is not set
1531# CONFIG_BINARY_PRINTF is not set
995 1532
996# 1533#
997# Library routines 1534# Library routines
998# 1535#
999CONFIG_BITREVERSE=y 1536CONFIG_BITREVERSE=y
1537CONFIG_GENERIC_FIND_LAST_BIT=y
1000CONFIG_CRC_CCITT=m 1538CONFIG_CRC_CCITT=m
1001# CONFIG_CRC16 is not set 1539CONFIG_CRC16=y
1002# CONFIG_CRC_ITU_T is not set 1540CONFIG_CRC_T10DIF=m
1541CONFIG_CRC_ITU_T=m
1003CONFIG_CRC32=y 1542CONFIG_CRC32=y
1004# CONFIG_CRC7 is not set 1543CONFIG_CRC7=m
1005CONFIG_LIBCRC32C=m 1544CONFIG_LIBCRC32C=m
1006CONFIG_ZLIB_INFLATE=m 1545CONFIG_ZLIB_INFLATE=m
1007CONFIG_ZLIB_DEFLATE=m 1546CONFIG_ZLIB_DEFLATE=m
1008CONFIG_PLIST=y 1547CONFIG_LZO_COMPRESS=m
1548CONFIG_LZO_DECOMPRESS=m
1009CONFIG_HAS_IOMEM=y 1549CONFIG_HAS_IOMEM=y
1010CONFIG_HAS_IOPORT=y 1550CONFIG_HAS_IOPORT=y
1011CONFIG_HAS_DMA=y 1551CONFIG_HAS_DMA=y
1552CONFIG_NLATTR=y
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 9c187a64649b..758ad426c57f 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -287,9 +287,9 @@ static inline int __cpu_has_fpu(void)
287static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) 287static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
288{ 288{
289#ifdef __NEED_VMBITS_PROBE 289#ifdef __NEED_VMBITS_PROBE
290 write_c0_entryhi(0x3ffffffffffff000ULL); 290 write_c0_entryhi(0x3fffffffffffe000ULL);
291 back_to_back_c0_hazard(); 291 back_to_back_c0_hazard();
292 c->vmbits = fls64(read_c0_entryhi() & 0x3ffffffffffff000ULL); 292 c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
293#endif 293#endif
294} 294}
295 295
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 338dfe8ed002..31b204b26ba0 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1501,6 +1501,7 @@ void __cpuinit per_cpu_trap_init(void)
1501 cp0_perfcount_irq = -1; 1501 cp0_perfcount_irq = -1;
1502 } else { 1502 } else {
1503 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; 1503 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
1504 cp0_compare_irq_shift = cp0_compare_irq;
1504 cp0_perfcount_irq = -1; 1505 cp0_perfcount_irq = -1;
1505 } 1506 }
1506 1507
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 94e05e5733c1..e06f1af760a7 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -174,7 +174,7 @@ static void octeon_flush_cache_page(struct vm_area_struct *vma,
174 * Probe Octeon's caches 174 * Probe Octeon's caches
175 * 175 *
176 */ 176 */
177static void __devinit probe_octeon(void) 177static void __cpuinit probe_octeon(void)
178{ 178{
179 unsigned long icache_size; 179 unsigned long icache_size;
180 unsigned long dcache_size; 180 unsigned long dcache_size;
@@ -235,7 +235,7 @@ static void __devinit probe_octeon(void)
235 * Setup the Octeon cache flush routines 235 * Setup the Octeon cache flush routines
236 * 236 *
237 */ 237 */
238void __devinit octeon_cache_init(void) 238void __cpuinit octeon_cache_init(void)
239{ 239{
240 extern unsigned long ebase; 240 extern unsigned long ebase;
241 extern char except_vec2_octeon; 241 extern char except_vec2_octeon;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 102b2dfa542a..e716cafc346d 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -155,7 +155,7 @@ static inline void setup_protection_map(void)
155 protection_map[15] = PAGE_SHARED; 155 protection_map[15] = PAGE_SHARED;
156} 156}
157 157
158void __devinit cpu_cache_init(void) 158void __cpuinit cpu_cache_init(void)
159{ 159{
160 if (cpu_has_3k_cache) { 160 if (cpu_has_3k_cache) {
161 extern void __weak r3k_cache_init(void); 161 extern void __weak r3k_cache_init(void);
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index e274fda329f4..127d732474bf 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -1,5 +1,6 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/highmem.h> 2#include <linux/highmem.h>
3#include <linux/sched.h>
3#include <linux/smp.h> 4#include <linux/smp.h>
4#include <asm/fixmap.h> 5#include <asm/fixmap.h>
5#include <asm/tlbflush.h> 6#include <asm/tlbflush.h>
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
index 46f00691f448..31e2583ec622 100644
--- a/arch/mips/sni/rm200.c
+++ b/arch/mips/sni/rm200.c
@@ -404,7 +404,7 @@ void __init sni_rm200_i8259_irqs(void)
404 if (!rm200_pic_master) 404 if (!rm200_pic_master)
405 return; 405 return;
406 rm200_pic_slave = ioremap_nocache(0x160000a0, 4); 406 rm200_pic_slave = ioremap_nocache(0x160000a0, 4);
407 if (!rm200_pic_master) { 407 if (!rm200_pic_slave) {
408 iounmap(rm200_pic_master); 408 iounmap(rm200_pic_master);
409 return; 409 return;
410 } 410 }
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 524d9352f17e..f388dc68f605 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,7 +18,6 @@ config PARISC
18 select BUG 18 select BUG
19 select HAVE_PERF_EVENTS 19 select HAVE_PERF_EVENTS
20 select GENERIC_ATOMIC64 if !64BIT 20 select GENERIC_ATOMIC64 if !64BIT
21 select HAVE_ARCH_TRACEHOOK
22 help 21 help
23 The PA-RISC microprocessor is designed by Hewlett-Packard and used 22 The PA-RISC microprocessor is designed by Hewlett-Packard and used
24 in many of their workstations & servers (HP9000 700 and 800 series, 23 in many of their workstations & servers (HP9000 700 and 800 series,
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index f7064abc3bb6..9e74bfe071dc 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -18,7 +18,6 @@
18 18
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/system.h> 20#include <asm/system.h>
21#include <asm/cache.h> /* for L1_CACHE_BYTES */
22#include <asm/superio.h> 21#include <asm/superio.h>
23 22
24#define DEBUG_RESOURCES 0 23#define DEBUG_RESOURCES 0
@@ -123,6 +122,10 @@ static int __init pcibios_init(void)
123 } else { 122 } else {
124 printk(KERN_WARNING "pci_bios != NULL but init() is!\n"); 123 printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
125 } 124 }
125
126 /* Set the CLS for PCI as early as possible. */
127 pci_cache_line_size = pci_dfl_cache_line_size;
128
126 return 0; 129 return 0;
127} 130}
128 131
@@ -171,7 +174,7 @@ void pcibios_set_master(struct pci_dev *dev)
171 ** upper byte is PCI_LATENCY_TIMER. 174 ** upper byte is PCI_LATENCY_TIMER.
172 */ 175 */
173 pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, 176 pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
174 (0x80 << 8) | (L1_CACHE_BYTES / sizeof(u32))); 177 (0x80 << 8) | pci_cache_line_size);
175} 178}
176 179
177 180
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index fb37ac52e46c..35c827e94e31 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -468,7 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
468 recalc_sigpending(); 468 recalc_sigpending();
469 spin_unlock_irq(&current->sighand->siglock); 469 spin_unlock_irq(&current->sighand->siglock);
470 470
471 tracehook_signal_handler(sig, info, ka, regs, 0); 471 tracehook_signal_handler(sig, info, ka, regs,
472 test_thread_flag(TIF_SINGLESTEP) ||
473 test_thread_flag(TIF_BLOCKSTEP));
472 474
473 return 1; 475 return 1;
474} 476}
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 282d9306361f..1ec06576f619 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -63,15 +63,21 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
63 if (huge) { 63 if (huge) {
64#ifdef CONFIG_HUGETLB_PAGE 64#ifdef CONFIG_HUGETLB_PAGE
65 psize = get_slice_psize(mm, addr); 65 psize = get_slice_psize(mm, addr);
66 /* Mask the address for the correct page size */
67 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
66#else 68#else
67 BUG(); 69 BUG();
68 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ 70 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
69#endif 71#endif
70 } else 72 } else {
71 psize = pte_pagesize_index(mm, addr, pte); 73 psize = pte_pagesize_index(mm, addr, pte);
74 /* Mask the address for the standard page size. If we
75 * have a 64k page kernel, but the hardware does not
76 * support 64k pages, this might be different from the
77 * hardware page size encoded in the slice table. */
78 addr &= PAGE_MASK;
79 }
72 80
73 /* Mask the address for the correct page size */
74 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
75 81
76 /* Build full vaddr */ 82 /* Build full vaddr */
77 if (!is_kernel_addr(addr)) { 83 if (!is_kernel_addr(addr)) {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 21f61b8c445b..cc29c0f5300d 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -338,7 +338,8 @@ static void __init mpc85xx_mds_pic_init(void)
338 } 338 }
339 339
340 mpic = mpic_alloc(np, r.start, 340 mpic = mpic_alloc(np, r.start,
341 MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 341 MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
342 MPIC_BROKEN_FRR_NIRQS,
342 0, 256, " OpenPIC "); 343 0, 256, " OpenPIC ");
343 BUG_ON(mpic == NULL); 344 BUG_ON(mpic == NULL);
344 of_node_put(np); 345 of_node_put(np);
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 04160a4cc699..a15f582300d8 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -46,6 +46,7 @@ smp_85xx_kick_cpu(int nr)
46 __iomem u32 *bptr_vaddr; 46 __iomem u32 *bptr_vaddr;
47 struct device_node *np; 47 struct device_node *np;
48 int n = 0; 48 int n = 0;
49 int ioremappable;
49 50
50 WARN_ON (nr < 0 || nr >= NR_CPUS); 51 WARN_ON (nr < 0 || nr >= NR_CPUS);
51 52
@@ -59,21 +60,37 @@ smp_85xx_kick_cpu(int nr)
59 return; 60 return;
60 } 61 }
61 62
63 /*
64 * A secondary core could be in a spinloop in the bootpage
65 * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
66 * The bootpage and highmem can be accessed via ioremap(), but
67 * we need to directly access the spinloop if its in lowmem.
68 */
69 ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
70
62 /* Map the spin table */ 71 /* Map the spin table */
63 bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY); 72 if (ioremappable)
73 bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
74 else
75 bptr_vaddr = phys_to_virt(*cpu_rel_addr);
64 76
65 local_irq_save(flags); 77 local_irq_save(flags);
66 78
67 out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr); 79 out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
68 out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); 80 out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
69 81
82 if (!ioremappable)
83 flush_dcache_range((ulong)bptr_vaddr,
84 (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
85
70 /* Wait a bit for the CPU to ack. */ 86 /* Wait a bit for the CPU to ack. */
71 while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) 87 while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
72 mdelay(1); 88 mdelay(1);
73 89
74 local_irq_restore(flags); 90 local_irq_restore(flags);
75 91
76 iounmap(bptr_vaddr); 92 if (ioremappable)
93 iounmap(bptr_vaddr);
77 94
78 pr_debug("waited %d msecs for CPU #%d.\n", n, nr); 95 pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
79} 96}
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 1ee66db003be..f5f79196721c 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -784,9 +784,13 @@ static void xics_set_cpu_priority(unsigned char cppr)
784{ 784{
785 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 785 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
786 786
787 BUG_ON(os_cppr->index != 0); 787 /*
788 * we only really want to set the priority when there's
789 * just one cppr value on the stack
790 */
791 WARN_ON(os_cppr->index != 0);
788 792
789 os_cppr->stack[os_cppr->index] = cppr; 793 os_cppr->stack[0] = cppr;
790 794
791 if (firmware_has_feature(FW_FEATURE_LPAR)) 795 if (firmware_has_feature(FW_FEATURE_LPAR))
792 lpar_cppr_info(cppr); 796 lpar_cppr_info(cppr);
@@ -821,8 +825,14 @@ void xics_setup_cpu(void)
821 825
822void xics_teardown_cpu(void) 826void xics_teardown_cpu(void)
823{ 827{
828 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
824 int cpu = smp_processor_id(); 829 int cpu = smp_processor_id();
825 830
831 /*
832 * we have to reset the cppr index to 0 because we're
833 * not going to return from the IPI
834 */
835 os_cppr->index = 0;
826 xics_set_cpu_priority(0); 836 xics_set_cpu_priority(0);
827 837
828 /* Clear any pending IPI request */ 838 /* Clear any pending IPI request */
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 341aff2687a5..cd128b07beda 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -288,46 +288,30 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
288 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 288 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
289 sb->s_magic = HYPFS_MAGIC; 289 sb->s_magic = HYPFS_MAGIC;
290 sb->s_op = &hypfs_s_ops; 290 sb->s_op = &hypfs_s_ops;
291 if (hypfs_parse_options(data, sb)) { 291 if (hypfs_parse_options(data, sb))
292 rc = -EINVAL; 292 return -EINVAL;
293 goto err_alloc;
294 }
295 root_inode = hypfs_make_inode(sb, S_IFDIR | 0755); 293 root_inode = hypfs_make_inode(sb, S_IFDIR | 0755);
296 if (!root_inode) { 294 if (!root_inode)
297 rc = -ENOMEM; 295 return -ENOMEM;
298 goto err_alloc;
299 }
300 root_inode->i_op = &simple_dir_inode_operations; 296 root_inode->i_op = &simple_dir_inode_operations;
301 root_inode->i_fop = &simple_dir_operations; 297 root_inode->i_fop = &simple_dir_operations;
302 root_dentry = d_alloc_root(root_inode); 298 sb->s_root = root_dentry = d_alloc_root(root_inode);
303 if (!root_dentry) { 299 if (!root_dentry) {
304 iput(root_inode); 300 iput(root_inode);
305 rc = -ENOMEM; 301 return -ENOMEM;
306 goto err_alloc;
307 } 302 }
308 if (MACHINE_IS_VM) 303 if (MACHINE_IS_VM)
309 rc = hypfs_vm_create_files(sb, root_dentry); 304 rc = hypfs_vm_create_files(sb, root_dentry);
310 else 305 else
311 rc = hypfs_diag_create_files(sb, root_dentry); 306 rc = hypfs_diag_create_files(sb, root_dentry);
312 if (rc) 307 if (rc)
313 goto err_tree; 308 return rc;
314 sbi->update_file = hypfs_create_update_file(sb, root_dentry); 309 sbi->update_file = hypfs_create_update_file(sb, root_dentry);
315 if (IS_ERR(sbi->update_file)) { 310 if (IS_ERR(sbi->update_file))
316 rc = PTR_ERR(sbi->update_file); 311 return PTR_ERR(sbi->update_file);
317 goto err_tree;
318 }
319 hypfs_update_update(sb); 312 hypfs_update_update(sb);
320 sb->s_root = root_dentry;
321 pr_info("Hypervisor filesystem mounted\n"); 313 pr_info("Hypervisor filesystem mounted\n");
322 return 0; 314 return 0;
323
324err_tree:
325 hypfs_delete_tree(root_dentry);
326 d_genocide(root_dentry);
327 dput(root_dentry);
328err_alloc:
329 kfree(sbi);
330 return rc;
331} 315}
332 316
333static int hypfs_get_super(struct file_system_type *fst, int flags, 317static int hypfs_get_super(struct file_system_type *fst, int flags,
@@ -340,12 +324,12 @@ static void hypfs_kill_super(struct super_block *sb)
340{ 324{
341 struct hypfs_sb_info *sb_info = sb->s_fs_info; 325 struct hypfs_sb_info *sb_info = sb->s_fs_info;
342 326
343 if (sb->s_root) { 327 if (sb->s_root)
344 hypfs_delete_tree(sb->s_root); 328 hypfs_delete_tree(sb->s_root);
329 if (sb_info->update_file)
345 hypfs_remove(sb_info->update_file); 330 hypfs_remove(sb_info->update_file);
346 kfree(sb->s_fs_info); 331 kfree(sb->s_fs_info);
347 sb->s_fs_info = NULL; 332 sb->s_fs_info = NULL;
348 }
349 kill_litter_super(sb); 333 kill_litter_super(sb);
350} 334}
351 335
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index f2ef4b619ce1..c25dfac7dd76 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -293,12 +293,12 @@ struct _lowcore
293 __u64 clock_comparator; /* 0x02d0 */ 293 __u64 clock_comparator; /* 0x02d0 */
294 __u32 machine_flags; /* 0x02d8 */ 294 __u32 machine_flags; /* 0x02d8 */
295 __u32 ftrace_func; /* 0x02dc */ 295 __u32 ftrace_func; /* 0x02dc */
296 __u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */ 296 __u8 pad_0x02e0[0x0300-0x02e0]; /* 0x02e0 */
297 297
298 /* Interrupt response block */ 298 /* Interrupt response block */
299 __u8 irb[64]; /* 0x0300 */ 299 __u8 irb[64]; /* 0x0300 */
300 300
301 __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */ 301 __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */
302 302
303 /* 303 /*
304 * 0xe00 contains the address of the IPL Parameter Information 304 * 0xe00 contains the address of the IPL Parameter Information
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 3f7e2a22c7c2..f6a389c996cb 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -132,7 +132,6 @@ ENTRY(tlb_protection_violation_store)
132 mov #1, r5 132 mov #1, r5
133 133
134call_handle_tlbmiss: 134call_handle_tlbmiss:
135 setup_frame_reg
136 mov.l 1f, r0 135 mov.l 1f, r0
137 mov r5, r8 136 mov r5, r8
138 mov.l @r0, r6 137 mov.l @r0, r6
@@ -365,6 +364,8 @@ handle_exception:
365 mov.l @k2, k2 ! read out vector and keep in k2 364 mov.l @k2, k2 ! read out vector and keep in k2
366 365
367handle_exception_special: 366handle_exception_special:
367 setup_frame_reg
368
368 ! Setup return address and jump to exception handler 369 ! Setup return address and jump to exception handler
369 mov.l 7f, r9 ! fetch return address 370 mov.l 7f, r9 ! fetch return address
370 stc r2_bank, r0 ! k2 (vector) 371 stc r2_bank, r0 ! k2 (vector)
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index 88d28ec3780a..e51168064e56 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -540,6 +540,8 @@ void dwarf_free_frame(struct dwarf_frame *frame)
540 mempool_free(frame, dwarf_frame_pool); 540 mempool_free(frame, dwarf_frame_pool);
541} 541}
542 542
543extern void ret_from_irq(void);
544
543/** 545/**
544 * dwarf_unwind_stack - unwind the stack 546 * dwarf_unwind_stack - unwind the stack
545 * 547 *
@@ -678,6 +680,24 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
678 addr = frame->cfa + reg->addr; 680 addr = frame->cfa + reg->addr;
679 frame->return_addr = __raw_readl(addr); 681 frame->return_addr = __raw_readl(addr);
680 682
683 /*
684 * Ah, the joys of unwinding through interrupts.
685 *
686 * Interrupts are tricky - the DWARF info needs to be _really_
687 * accurate and unfortunately I'm seeing a lot of bogus DWARF
688 * info. For example, I've seen interrupts occur in epilogues
689 * just after the frame pointer (r14) had been restored. The
690 * problem was that the DWARF info claimed that the CFA could be
691 * reached by using the value of the frame pointer before it was
692 * restored.
693 *
694 * So until the compiler can be trusted to produce reliable
695 * DWARF info when it really matters, let's stop unwinding once
696 * we've calculated the function that was interrupted.
697 */
698 if (prev && prev->pc == (unsigned long)ret_from_irq)
699 frame->return_addr = 0;
700
681 return frame; 701 return frame;
682 702
683bail: 703bail:
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index f0abd58c3a69..2b15ae60c3a0 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -70,8 +70,14 @@ ret_from_exception:
70 CFI_STARTPROC simple 70 CFI_STARTPROC simple
71 CFI_DEF_CFA r14, 0 71 CFI_DEF_CFA r14, 0
72 CFI_REL_OFFSET 17, 64 72 CFI_REL_OFFSET 17, 64
73 CFI_REL_OFFSET 15, 0 73 CFI_REL_OFFSET 15, 60
74 CFI_REL_OFFSET 14, 56 74 CFI_REL_OFFSET 14, 56
75 CFI_REL_OFFSET 13, 52
76 CFI_REL_OFFSET 12, 48
77 CFI_REL_OFFSET 11, 44
78 CFI_REL_OFFSET 10, 40
79 CFI_REL_OFFSET 9, 36
80 CFI_REL_OFFSET 8, 32
75 preempt_stop() 81 preempt_stop()
76ENTRY(ret_from_irq) 82ENTRY(ret_from_irq)
77 ! 83 !
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index 873ebdc4f98e..b063eb8b18e3 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -133,6 +133,8 @@ void user_enable_single_step(struct task_struct *child)
133 struct pt_regs *regs = child->thread.uregs; 133 struct pt_regs *regs = child->thread.uregs;
134 134
135 regs->sr |= SR_SSTEP; /* auto-resetting upon exception */ 135 regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
136
137 set_tsk_thread_flag(child, TIF_SINGLESTEP);
136} 138}
137 139
138void user_disable_single_step(struct task_struct *child) 140void user_disable_single_step(struct task_struct *child)
@@ -140,6 +142,8 @@ void user_disable_single_step(struct task_struct *child)
140 struct pt_regs *regs = child->thread.uregs; 142 struct pt_regs *regs = child->thread.uregs;
141 143
142 regs->sr &= ~SR_SSTEP; 144 regs->sr &= ~SR_SSTEP;
145
146 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
143} 147}
144 148
145static int genregs_get(struct task_struct *target, 149static int genregs_get(struct task_struct *target,
@@ -454,6 +458,8 @@ asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
454 458
455asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) 459asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
456{ 460{
461 int step;
462
457 if (unlikely(current->audit_context)) 463 if (unlikely(current->audit_context))
458 audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]), 464 audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
459 regs->regs[9]); 465 regs->regs[9]);
@@ -461,8 +467,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
461 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 467 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
462 trace_sys_exit(regs, regs->regs[9]); 468 trace_sys_exit(regs, regs->regs[9]);
463 469
464 if (test_thread_flag(TIF_SYSCALL_TRACE)) 470 step = test_thread_flag(TIF_SINGLESTEP);
465 tracehook_report_syscall_exit(regs, 0); 471 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
472 tracehook_report_syscall_exit(regs, step);
466} 473}
467 474
468/* Called with interrupts disabled */ 475/* Called with interrupts disabled */
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index ce76dbdef294..580e97d46ca5 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -118,7 +118,9 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset)
118 * clear the TS_RESTORE_SIGMASK flag. 118 * clear the TS_RESTORE_SIGMASK flag.
119 */ 119 */
120 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 120 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
121 tracehook_signal_handler(signr, &info, &ka, regs, 0); 121
122 tracehook_signal_handler(signr, &info, &ka, regs,
123 test_thread_flag(TIF_SINGLESTEP));
122 return 1; 124 return 1;
123 } 125 }
124 } 126 }
diff --git a/arch/sparc/include/asm/stat.h b/arch/sparc/include/asm/stat.h
index 55db5eca08e2..39327d6a57eb 100644
--- a/arch/sparc/include/asm/stat.h
+++ b/arch/sparc/include/asm/stat.h
@@ -53,8 +53,8 @@ struct stat {
53 ino_t st_ino; 53 ino_t st_ino;
54 mode_t st_mode; 54 mode_t st_mode;
55 short st_nlink; 55 short st_nlink;
56 uid_t st_uid; 56 uid16_t st_uid;
57 gid_t st_gid; 57 gid16_t st_gid;
58 unsigned short st_rdev; 58 unsigned short st_rdev;
59 off_t st_size; 59 off_t st_size;
60 time_t st_atime; 60 time_t st_atime;
diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h
index 4248d969272f..5247283d1c03 100644
--- a/arch/sparc/kernel/kstack.h
+++ b/arch/sparc/kernel/kstack.h
@@ -11,6 +11,10 @@ static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
11{ 11{
12 unsigned long base = (unsigned long) tp; 12 unsigned long base = (unsigned long) tp;
13 13
14 /* Stack pointer must be 16-byte aligned. */
15 if (sp & (16UL - 1))
16 return false;
17
14 if (sp >= (base + sizeof(struct thread_info)) && 18 if (sp >= (base + sizeof(struct thread_info)) &&
15 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf))) 19 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
16 return true; 20 return true;
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index 4c26eb59e742..53a58b349849 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -105,7 +105,7 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
105 105
106static int of_bus_ambapp_match(struct device_node *np) 106static int of_bus_ambapp_match(struct device_node *np)
107{ 107{
108 return !strcmp(np->name, "ambapp"); 108 return !strcmp(np->type, "ambapp");
109} 109}
110 110
111static void of_bus_ambapp_count_cells(struct device_node *child, 111static void of_bus_ambapp_count_cells(struct device_node *child,
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 539e83f8e087..592b03d85167 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -247,6 +247,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
247 struct pci_bus *bus, int devfn) 247 struct pci_bus *bus, int devfn)
248{ 248{
249 struct dev_archdata *sd; 249 struct dev_archdata *sd;
250 struct pci_slot *slot;
250 struct of_device *op; 251 struct of_device *op;
251 struct pci_dev *dev; 252 struct pci_dev *dev;
252 const char *type; 253 const char *type;
@@ -286,6 +287,11 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
286 dev->dev.bus = &pci_bus_type; 287 dev->dev.bus = &pci_bus_type;
287 dev->devfn = devfn; 288 dev->devfn = devfn;
288 dev->multifunction = 0; /* maybe a lie? */ 289 dev->multifunction = 0; /* maybe a lie? */
290 set_pcie_port_type(dev);
291
292 list_for_each_entry(slot, &dev->bus->slots, list)
293 if (PCI_SLOT(dev->devfn) == slot->number)
294 dev->slot = slot;
289 295
290 dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff); 296 dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
291 dev->device = of_getintprop_default(node, "device-id", 0xffff); 297 dev->device = of_getintprop_default(node, "device-id", 0xffff);
@@ -322,6 +328,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
322 328
323 dev->current_state = 4; /* unknown power state */ 329 dev->current_state = 4; /* unknown power state */
324 dev->error_state = pci_channel_io_normal; 330 dev->error_state = pci_channel_io_normal;
331 dev->dma_mask = 0xffffffff;
325 332
326 if (!strcmp(node->name, "pci")) { 333 if (!strcmp(node->name, "pci")) {
327 /* a PCI-PCI bridge */ 334 /* a PCI-PCI bridge */
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 2830b415e214..c49865b30719 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -526,7 +526,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
526 * Set some valid stack frames to give to the child. 526 * Set some valid stack frames to give to the child.
527 */ 527 */
528 childstack = (struct sparc_stackf __user *) 528 childstack = (struct sparc_stackf __user *)
529 (sp & ~0x7UL); 529 (sp & ~0xfUL);
530 parentstack = (struct sparc_stackf __user *) 530 parentstack = (struct sparc_stackf __user *)
531 regs->u_regs[UREG_FP]; 531 regs->u_regs[UREG_FP];
532 532
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index c3f1cce0e95e..cb70476bd8f5 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -398,11 +398,11 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
398 } else 398 } else
399 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); 399 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
400 400
401 /* Now 8-byte align the stack as this is mandatory in the 401 /* Now align the stack as this is mandatory in the Sparc ABI
402 * Sparc ABI due to how register windows work. This hides 402 * due to how register windows work. This hides the
403 * the restriction from thread libraries etc. -DaveM 403 * restriction from thread libraries etc.
404 */ 404 */
405 csp &= ~7UL; 405 csp &= ~15UL;
406 406
407 distance = fp - psp; 407 distance = fp - psp;
408 rval = (csp - distance); 408 rval = (csp - distance);
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index ba5b09ad6666..ea22cd373c64 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -120,8 +120,8 @@ struct rt_signal_frame32 {
120}; 120};
121 121
122/* Align macros */ 122/* Align macros */
123#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 7) & (~7))) 123#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15)))
124#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7))) 124#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15)))
125 125
126int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) 126int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
127{ 127{
@@ -420,15 +420,17 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns
420 sp = current->sas_ss_sp + current->sas_ss_size; 420 sp = current->sas_ss_sp + current->sas_ss_size;
421 } 421 }
422 422
423 sp -= framesize;
424
423 /* Always align the stack frame. This handles two cases. First, 425 /* Always align the stack frame. This handles two cases. First,
424 * sigaltstack need not be mindful of platform specific stack 426 * sigaltstack need not be mindful of platform specific stack
425 * alignment. Second, if we took this signal because the stack 427 * alignment. Second, if we took this signal because the stack
426 * is not aligned properly, we'd like to take the signal cleanly 428 * is not aligned properly, we'd like to take the signal cleanly
427 * and report that. 429 * and report that.
428 */ 430 */
429 sp &= ~7UL; 431 sp &= ~15UL;
430 432
431 return (void __user *)(sp - framesize); 433 return (void __user *) sp;
432} 434}
433 435
434static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) 436static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 7ce1a1005b1d..9882df92ba0a 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -267,15 +267,17 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re
267 sp = current->sas_ss_sp + current->sas_ss_size; 267 sp = current->sas_ss_sp + current->sas_ss_size;
268 } 268 }
269 269
270 sp -= framesize;
271
270 /* Always align the stack frame. This handles two cases. First, 272 /* Always align the stack frame. This handles two cases. First,
271 * sigaltstack need not be mindful of platform specific stack 273 * sigaltstack need not be mindful of platform specific stack
272 * alignment. Second, if we took this signal because the stack 274 * alignment. Second, if we took this signal because the stack
273 * is not aligned properly, we'd like to take the signal cleanly 275 * is not aligned properly, we'd like to take the signal cleanly
274 * and report that. 276 * and report that.
275 */ 277 */
276 sp &= ~7UL; 278 sp &= ~15UL;
277 279
278 return (void __user *)(sp - framesize); 280 return (void __user *) sp;
279} 281}
280 282
281static inline int 283static inline int
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 647afbda7ae1..9fa48c30037e 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -353,7 +353,7 @@ segv:
353/* Checks if the fp is valid */ 353/* Checks if the fp is valid */
354static int invalid_frame_pointer(void __user *fp, int fplen) 354static int invalid_frame_pointer(void __user *fp, int fplen)
355{ 355{
356 if (((unsigned long) fp) & 7) 356 if (((unsigned long) fp) & 15)
357 return 1; 357 return 1;
358 return 0; 358 return 0;
359} 359}
@@ -396,15 +396,17 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
396 sp = current->sas_ss_sp + current->sas_ss_size; 396 sp = current->sas_ss_sp + current->sas_ss_size;
397 } 397 }
398 398
399 sp -= framesize;
400
399 /* Always align the stack frame. This handles two cases. First, 401 /* Always align the stack frame. This handles two cases. First,
400 * sigaltstack need not be mindful of platform specific stack 402 * sigaltstack need not be mindful of platform specific stack
401 * alignment. Second, if we took this signal because the stack 403 * alignment. Second, if we took this signal because the stack
402 * is not aligned properly, we'd like to take the signal cleanly 404 * is not aligned properly, we'd like to take the signal cleanly
403 * and report that. 405 * and report that.
404 */ 406 */
405 sp &= ~7UL; 407 sp &= ~15UL;
406 408
407 return (void __user *)(sp - framesize); 409 return (void __user *) sp;
408} 410}
409 411
410static inline void 412static inline void
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index 8c91d9b29a2f..db15d123f054 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -191,10 +191,12 @@ tsb_dtlb_load:
191 191
192tsb_itlb_load: 192tsb_itlb_load:
193 /* Executable bit must be set. */ 193 /* Executable bit must be set. */
194661: andcc %g5, _PAGE_EXEC_4U, %g0 194661: sethi %hi(_PAGE_EXEC_4U), %g4
195 .section .sun4v_1insn_patch, "ax" 195 andcc %g5, %g4, %g0
196 .section .sun4v_2insn_patch, "ax"
196 .word 661b 197 .word 661b
197 andcc %g5, _PAGE_EXEC_4V, %g0 198 andcc %g5, _PAGE_EXEC_4V, %g0
199 nop
198 .previous 200 .previous
199 201
200 be,pn %xcc, tsb_do_fault 202 be,pn %xcc, tsb_do_fault
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 3b3c36601a7b..de317d0c3294 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -140,7 +140,7 @@ void mconsole_proc(struct mc_request *req)
140 goto out; 140 goto out;
141 } 141 }
142 142
143 err = may_open(&nd.path, MAY_READ, FMODE_READ); 143 err = may_open(&nd.path, MAY_READ, O_RDONLY);
144 if (result) { 144 if (result) {
145 mconsole_reply(req, "Failed to open file", 1, 0); 145 mconsole_reply(req, "Failed to open file", 1, 0);
146 path_put(&nd.path); 146 path_put(&nd.path);
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 1994d3f58443..f2ad2163109d 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -170,10 +170,7 @@ static inline void elf_common_init(struct thread_struct *t,
170} 170}
171 171
172#define ELF_PLAT_INIT(_r, load_addr) \ 172#define ELF_PLAT_INIT(_r, load_addr) \
173do { \ 173 elf_common_init(&current->thread, _r, 0)
174 elf_common_init(&current->thread, _r, 0); \
175 clear_thread_flag(TIF_IA32); \
176} while (0)
177 174
178#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \ 175#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \
179 elf_common_init(&current->thread, regs, __USER_DS) 176 elf_common_init(&current->thread, regs, __USER_DS)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index fc801bab1b3b..b753ea59703a 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -450,6 +450,8 @@ struct thread_struct {
450 struct perf_event *ptrace_bps[HBP_NUM]; 450 struct perf_event *ptrace_bps[HBP_NUM];
451 /* Debug status used for traps, single steps, etc... */ 451 /* Debug status used for traps, single steps, etc... */
452 unsigned long debugreg6; 452 unsigned long debugreg6;
453 /* Keep track of the exact dr7 value set by the user */
454 unsigned long ptrace_dr7;
453 /* Fault info: */ 455 /* Fault info: */
454 unsigned long cr2; 456 unsigned long cr2;
455 unsigned long trap_no; 457 unsigned long trap_no;
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index ecb544e65382..e04740f7a0bb 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -11,9 +11,9 @@
11#include <linux/irqflags.h> 11#include <linux/irqflags.h>
12 12
13/* entries in ARCH_DLINFO: */ 13/* entries in ARCH_DLINFO: */
14#ifdef CONFIG_IA32_EMULATION 14#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
15# define AT_VECTOR_SIZE_ARCH 2 15# define AT_VECTOR_SIZE_ARCH 2
16#else 16#else /* else it's non-compat x86-64 */
17# define AT_VECTOR_SIZE_ARCH 1 17# define AT_VECTOR_SIZE_ARCH 1
18#endif 18#endif
19 19
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 036d28adf59d..af1c5833ff23 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1185,9 +1185,6 @@ static void __init acpi_process_madt(void)
1185 if (!error) { 1185 if (!error) {
1186 acpi_lapic = 1; 1186 acpi_lapic = 1;
1187 1187
1188#ifdef CONFIG_X86_BIGSMP
1189 generic_bigsmp_probe();
1190#endif
1191 /* 1188 /*
1192 * Parse MADT IO-APIC entries 1189 * Parse MADT IO-APIC entries
1193 */ 1190 */
@@ -1197,8 +1194,6 @@ static void __init acpi_process_madt(void)
1197 acpi_ioapic = 1; 1194 acpi_ioapic = 1;
1198 1195
1199 smp_found_config = 1; 1196 smp_found_config = 1;
1200 if (apic->setup_apic_routing)
1201 apic->setup_apic_routing();
1202 } 1197 }
1203 } 1198 }
1204 if (error == -EINVAL) { 1199 if (error == -EINVAL) {
@@ -1349,14 +1344,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
1349 }, 1344 },
1350 { 1345 {
1351 .callback = force_acpi_ht, 1346 .callback = force_acpi_ht,
1352 .ident = "ASUS P2B-DS",
1353 .matches = {
1354 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1355 DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
1356 },
1357 },
1358 {
1359 .callback = force_acpi_ht,
1360 .ident = "ASUS CUR-DLS", 1347 .ident = "ASUS CUR-DLS",
1361 .matches = { 1348 .matches = {
1362 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), 1349 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 3987e4408f75..dfca210f6a10 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1641,9 +1641,7 @@ int __init APIC_init_uniprocessor(void)
1641#endif 1641#endif
1642 1642
1643 enable_IR_x2apic(); 1643 enable_IR_x2apic();
1644#ifdef CONFIG_X86_64
1645 default_setup_apic_routing(); 1644 default_setup_apic_routing();
1646#endif
1647 1645
1648 verify_local_APIC(); 1646 verify_local_APIC();
1649 connect_bsp_APIC(); 1647 connect_bsp_APIC();
@@ -1891,21 +1889,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
1891 if (apicid > max_physical_apicid) 1889 if (apicid > max_physical_apicid)
1892 max_physical_apicid = apicid; 1890 max_physical_apicid = apicid;
1893 1891
1894#ifdef CONFIG_X86_32
1895 if (num_processors > 8) {
1896 switch (boot_cpu_data.x86_vendor) {
1897 case X86_VENDOR_INTEL:
1898 if (!APIC_XAPIC(version)) {
1899 def_to_bigsmp = 0;
1900 break;
1901 }
1902 /* If P4 and above fall through */
1903 case X86_VENDOR_AMD:
1904 def_to_bigsmp = 1;
1905 }
1906 }
1907#endif
1908
1909#if defined(CONFIG_SMP) || defined(CONFIG_X86_64) 1892#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
1910 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; 1893 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1911 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; 1894 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 1a6559f6768c..99d2fe016084 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -52,7 +52,32 @@ static int __init print_ipi_mode(void)
52} 52}
53late_initcall(print_ipi_mode); 53late_initcall(print_ipi_mode);
54 54
55void default_setup_apic_routing(void) 55void __init default_setup_apic_routing(void)
56{
57 int version = apic_version[boot_cpu_physical_apicid];
58
59 if (num_possible_cpus() > 8) {
60 switch (boot_cpu_data.x86_vendor) {
61 case X86_VENDOR_INTEL:
62 if (!APIC_XAPIC(version)) {
63 def_to_bigsmp = 0;
64 break;
65 }
66 /* If P4 and above fall through */
67 case X86_VENDOR_AMD:
68 def_to_bigsmp = 1;
69 }
70 }
71
72#ifdef CONFIG_X86_BIGSMP
73 generic_bigsmp_probe();
74#endif
75
76 if (apic->setup_apic_routing)
77 apic->setup_apic_routing();
78}
79
80static void setup_apic_flat_routing(void)
56{ 81{
57#ifdef CONFIG_X86_IO_APIC 82#ifdef CONFIG_X86_IO_APIC
58 printk(KERN_INFO 83 printk(KERN_INFO
@@ -103,7 +128,7 @@ struct apic apic_default = {
103 .init_apic_ldr = default_init_apic_ldr, 128 .init_apic_ldr = default_init_apic_ldr,
104 129
105 .ioapic_phys_id_map = default_ioapic_phys_id_map, 130 .ioapic_phys_id_map = default_ioapic_phys_id_map,
106 .setup_apic_routing = default_setup_apic_routing, 131 .setup_apic_routing = setup_apic_flat_routing,
107 .multi_timer_check = NULL, 132 .multi_timer_check = NULL,
108 .apicid_to_node = default_apicid_to_node, 133 .apicid_to_node = default_apicid_to_node,
109 .cpu_to_logical_apicid = default_cpu_to_logical_apicid, 134 .cpu_to_logical_apicid = default_cpu_to_logical_apicid,
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index 450fe2064a14..83e9be4778e2 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -67,7 +67,7 @@ void __init default_setup_apic_routing(void)
67 } 67 }
68#endif 68#endif
69 69
70 if (apic == &apic_flat && num_processors > 8) 70 if (apic == &apic_flat && num_possible_cpus() > 8)
71 apic = &apic_physflat; 71 apic = &apic_physflat;
72 72
73 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); 73 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index f125e5c551c0..6e44519960c8 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1356,6 +1356,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
1356 1356
1357 kfree(data->powernow_table); 1357 kfree(data->powernow_table);
1358 kfree(data); 1358 kfree(data);
1359 per_cpu(powernow_data, pol->cpu) = NULL;
1359 1360
1360 return 0; 1361 return 0;
1361} 1362}
@@ -1375,7 +1376,7 @@ static unsigned int powernowk8_get(unsigned int cpu)
1375 int err; 1376 int err;
1376 1377
1377 if (!data) 1378 if (!data)
1378 return -EINVAL; 1379 return 0;
1379 1380
1380 smp_call_function_single(cpu, query_values_on_cpu, &err, true); 1381 smp_call_function_single(cpu, query_values_on_cpu, &err, true);
1381 if (err) 1382 if (err)
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 05d5fec64a94..bb6006e3e295 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -212,25 +212,6 @@ static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
212 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 212 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
213} 213}
214 214
215/*
216 * Store a breakpoint's encoded address, length, and type.
217 */
218static int arch_store_info(struct perf_event *bp)
219{
220 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
221 /*
222 * For kernel-addresses, either the address or symbol name can be
223 * specified.
224 */
225 if (info->name)
226 info->address = (unsigned long)
227 kallsyms_lookup_name(info->name);
228 if (info->address)
229 return 0;
230
231 return -EINVAL;
232}
233
234int arch_bp_generic_fields(int x86_len, int x86_type, 215int arch_bp_generic_fields(int x86_len, int x86_type,
235 int *gen_len, int *gen_type) 216 int *gen_len, int *gen_type)
236{ 217{
@@ -362,10 +343,13 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
362 return ret; 343 return ret;
363 } 344 }
364 345
365 ret = arch_store_info(bp); 346 /*
366 347 * For kernel-addresses, either the address or symbol name can be
367 if (ret < 0) 348 * specified.
368 return ret; 349 */
350 if (info->name)
351 info->address = (unsigned long)
352 kallsyms_lookup_name(info->name);
369 /* 353 /*
370 * Check that the low-order bits of the address are appropriate 354 * Check that the low-order bits of the address are appropriate
371 * for the alignment implied by len. 355 * for the alignment implied by len.
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 40b54ceb68b5..a2c1edd2d3ac 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -359,13 +359,6 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
359 x86_init.mpparse.mpc_record(1); 359 x86_init.mpparse.mpc_record(1);
360 } 360 }
361 361
362#ifdef CONFIG_X86_BIGSMP
363 generic_bigsmp_probe();
364#endif
365
366 if (apic->setup_apic_routing)
367 apic->setup_apic_routing();
368
369 if (!num_processors) 362 if (!num_processors)
370 printk(KERN_ERR "MPTABLE: no processors registered!\n"); 363 printk(KERN_ERR "MPTABLE: no processors registered!\n");
371 return num_processors; 364 return num_processors;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 41a26a82470a..126f0b493d04 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -527,6 +527,7 @@ void set_personality_ia32(void)
527 527
528 /* Make sure to be in 32bit mode */ 528 /* Make sure to be in 32bit mode */
529 set_thread_flag(TIF_IA32); 529 set_thread_flag(TIF_IA32);
530 current->personality |= force_personality32;
530 531
531 /* Prepare the first "return" to user space */ 532 /* Prepare the first "return" to user space */
532 current_thread_info()->status |= TS_COMPAT; 533 current_thread_info()->status |= TS_COMPAT;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 017d937639fe..0c1033d61e59 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -702,7 +702,7 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
702 } else if (n == 6) { 702 } else if (n == 6) {
703 val = thread->debugreg6; 703 val = thread->debugreg6;
704 } else if (n == 7) { 704 } else if (n == 7) {
705 val = ptrace_get_dr7(thread->ptrace_bps); 705 val = thread->ptrace_dr7;
706 } 706 }
707 return val; 707 return val;
708} 708}
@@ -778,8 +778,11 @@ int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
778 return rc; 778 return rc;
779 } 779 }
780 /* All that's left is DR7 */ 780 /* All that's left is DR7 */
781 if (n == 7) 781 if (n == 7) {
782 rc = ptrace_write_dr7(tsk, val); 782 rc = ptrace_write_dr7(tsk, val);
783 if (!rc)
784 thread->ptrace_dr7 = val;
785 }
783 786
784ret_path: 787ret_path:
785 return rc; 788 return rc;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 678d0b8c26f3..b4e870cbdc60 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1083,9 +1083,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1083 set_cpu_sibling_map(0); 1083 set_cpu_sibling_map(0);
1084 1084
1085 enable_IR_x2apic(); 1085 enable_IR_x2apic();
1086#ifdef CONFIG_X86_64
1087 default_setup_apic_routing(); 1086 default_setup_apic_routing();
1088#endif
1089 1087
1090 if (smp_sanity_check(max_cpus) < 0) { 1088 if (smp_sanity_check(max_cpus) < 0) {
1091 printk(KERN_INFO "SMP disabled\n"); 1089 printk(KERN_INFO "SMP disabled\n");
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 296aba49472a..15578f180e59 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -467,6 +467,9 @@ static int pit_ioport_read(struct kvm_io_device *this,
467 return -EOPNOTSUPP; 467 return -EOPNOTSUPP;
468 468
469 addr &= KVM_PIT_CHANNEL_MASK; 469 addr &= KVM_PIT_CHANNEL_MASK;
470 if (addr == 3)
471 return 0;
472
470 s = &pit_state->channels[addr]; 473 s = &pit_state->channels[addr];
471 474
472 mutex_lock(&pit_state->lock); 475 mutex_lock(&pit_state->lock);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1ddcad452add..a1e1bc9d412d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -670,7 +670,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
670{ 670{
671 static int version; 671 static int version;
672 struct pvclock_wall_clock wc; 672 struct pvclock_wall_clock wc;
673 struct timespec now, sys, boot; 673 struct timespec boot;
674 674
675 if (!wall_clock) 675 if (!wall_clock)
676 return; 676 return;
@@ -685,9 +685,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
685 * wall clock specified here. guest system time equals host 685 * wall clock specified here. guest system time equals host
686 * system time for us, thus we must fill in host boot time here. 686 * system time for us, thus we must fill in host boot time here.
687 */ 687 */
688 now = current_kernel_time(); 688 getboottime(&boot);
689 ktime_get_ts(&sys);
690 boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
691 689
692 wc.sec = boot.tv_sec; 690 wc.sec = boot.tv_sec;
693 wc.nsec = boot.tv_nsec; 691 wc.nsec = boot.tv_nsec;
@@ -762,6 +760,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
762 local_irq_save(flags); 760 local_irq_save(flags);
763 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp); 761 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
764 ktime_get_ts(&ts); 762 ktime_get_ts(&ts);
763 monotonic_to_bootbased(&ts);
765 local_irq_restore(flags); 764 local_irq_restore(flags);
766 765
767 /* With all the info we got, fill in the values */ 766 /* With all the info we got, fill in the values */
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 71da1bca13cb..738e6593799d 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -18,7 +18,7 @@ static inline pte_t gup_get_pte(pte_t *ptep)
18#else 18#else
19 /* 19 /*
20 * With get_user_pages_fast, we walk down the pagetables without taking 20 * With get_user_pages_fast, we walk down the pagetables without taking
21 * any locks. For this we would like to load the pointers atoimcally, 21 * any locks. For this we would like to load the pointers atomically,
22 * but that is not possible (without expensive cmpxchg8b) on PAE. What 22 * but that is not possible (without expensive cmpxchg8b) on PAE. What
23 * we do have is the guarantee that a pte will only either go from not 23 * we do have is the guarantee that a pte will only either go from not
24 * present to present, or present to not present or both -- it will not 24 * present to present, or present to not present or both -- it will not
diff --git a/block/blk-core.c b/block/blk-core.c
index 718897e6d37f..d1a9a0a64f95 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1147,7 +1147,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1147 */ 1147 */
1148static inline bool queue_should_plug(struct request_queue *q) 1148static inline bool queue_should_plug(struct request_queue *q)
1149{ 1149{
1150 return !(blk_queue_nonrot(q) && blk_queue_queuing(q)); 1150 return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
1151} 1151}
1152 1152
1153static int __make_request(struct request_queue *q, struct bio *bio) 1153static int __make_request(struct request_queue *q, struct bio *bio)
@@ -1859,15 +1859,8 @@ void blk_dequeue_request(struct request *rq)
1859 * and to it is freed is accounted as io that is in progress at 1859 * and to it is freed is accounted as io that is in progress at
1860 * the driver side. 1860 * the driver side.
1861 */ 1861 */
1862 if (blk_account_rq(rq)) { 1862 if (blk_account_rq(rq))
1863 q->in_flight[rq_is_sync(rq)]++; 1863 q->in_flight[rq_is_sync(rq)]++;
1864 /*
1865 * Mark this device as supporting hardware queuing, if
1866 * we have more IOs in flight than 4.
1867 */
1868 if (!blk_queue_queuing(q) && queue_in_flight(q) > 4)
1869 set_bit(QUEUE_FLAG_CQ, &q->queue_flags);
1870 }
1871} 1864}
1872 1865
1873/** 1866/**
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 17b768d0d42f..023f4e69a337 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -42,16 +42,13 @@ static const int cfq_hist_divisor = 4;
42 */ 42 */
43#define CFQ_MIN_TT (2) 43#define CFQ_MIN_TT (2)
44 44
45/*
46 * Allow merged cfqqs to perform this amount of seeky I/O before
47 * deciding to break the queues up again.
48 */
49#define CFQQ_COOP_TOUT (HZ)
50
51#define CFQ_SLICE_SCALE (5) 45#define CFQ_SLICE_SCALE (5)
52#define CFQ_HW_QUEUE_MIN (5) 46#define CFQ_HW_QUEUE_MIN (5)
53#define CFQ_SERVICE_SHIFT 12 47#define CFQ_SERVICE_SHIFT 12
54 48
49#define CFQQ_SEEK_THR 8 * 1024
50#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
51
55#define RQ_CIC(rq) \ 52#define RQ_CIC(rq) \
56 ((struct cfq_io_context *) (rq)->elevator_private) 53 ((struct cfq_io_context *) (rq)->elevator_private)
57#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) 54#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
@@ -137,7 +134,6 @@ struct cfq_queue {
137 u64 seek_total; 134 u64 seek_total;
138 sector_t seek_mean; 135 sector_t seek_mean;
139 sector_t last_request_pos; 136 sector_t last_request_pos;
140 unsigned long seeky_start;
141 137
142 pid_t pid; 138 pid_t pid;
143 139
@@ -314,6 +310,7 @@ enum cfqq_state_flags {
314 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 310 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
315 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 311 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
316 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ 312 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
313 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
317 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ 314 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
318 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ 315 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
319}; 316};
@@ -342,6 +339,7 @@ CFQ_CFQQ_FNS(prio_changed);
342CFQ_CFQQ_FNS(slice_new); 339CFQ_CFQQ_FNS(slice_new);
343CFQ_CFQQ_FNS(sync); 340CFQ_CFQQ_FNS(sync);
344CFQ_CFQQ_FNS(coop); 341CFQ_CFQQ_FNS(coop);
342CFQ_CFQQ_FNS(split_coop);
345CFQ_CFQQ_FNS(deep); 343CFQ_CFQQ_FNS(deep);
346CFQ_CFQQ_FNS(wait_busy); 344CFQ_CFQQ_FNS(wait_busy);
347#undef CFQ_CFQQ_FNS 345#undef CFQ_CFQQ_FNS
@@ -1566,6 +1564,15 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1566 cfq_clear_cfqq_wait_busy(cfqq); 1564 cfq_clear_cfqq_wait_busy(cfqq);
1567 1565
1568 /* 1566 /*
1567 * If this cfqq is shared between multiple processes, check to
1568 * make sure that those processes are still issuing I/Os within
1569 * the mean seek distance. If not, it may be time to break the
1570 * queues apart again.
1571 */
1572 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1573 cfq_mark_cfqq_split_coop(cfqq);
1574
1575 /*
1569 * store what was left of this slice, if the queue idled/timed out 1576 * store what was left of this slice, if the queue idled/timed out
1570 */ 1577 */
1571 if (timed_out && !cfq_cfqq_slice_new(cfqq)) { 1578 if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
@@ -1663,9 +1670,6 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1663 return cfqd->last_position - blk_rq_pos(rq); 1670 return cfqd->last_position - blk_rq_pos(rq);
1664} 1671}
1665 1672
1666#define CFQQ_SEEK_THR 8 * 1024
1667#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
1668
1669static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1673static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1670 struct request *rq, bool for_preempt) 1674 struct request *rq, bool for_preempt)
1671{ 1675{
@@ -3000,19 +3004,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3000 total = cfqq->seek_total + (cfqq->seek_samples/2); 3004 total = cfqq->seek_total + (cfqq->seek_samples/2);
3001 do_div(total, cfqq->seek_samples); 3005 do_div(total, cfqq->seek_samples);
3002 cfqq->seek_mean = (sector_t)total; 3006 cfqq->seek_mean = (sector_t)total;
3003
3004 /*
3005 * If this cfqq is shared between multiple processes, check to
3006 * make sure that those processes are still issuing I/Os within
3007 * the mean seek distance. If not, it may be time to break the
3008 * queues apart again.
3009 */
3010 if (cfq_cfqq_coop(cfqq)) {
3011 if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
3012 cfqq->seeky_start = jiffies;
3013 else if (!CFQQ_SEEKY(cfqq))
3014 cfqq->seeky_start = 0;
3015 }
3016} 3007}
3017 3008
3018/* 3009/*
@@ -3453,14 +3444,6 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3453 return cic_to_cfqq(cic, 1); 3444 return cic_to_cfqq(cic, 1);
3454} 3445}
3455 3446
3456static int should_split_cfqq(struct cfq_queue *cfqq)
3457{
3458 if (cfqq->seeky_start &&
3459 time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
3460 return 1;
3461 return 0;
3462}
3463
3464/* 3447/*
3465 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this 3448 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3466 * was the last process referring to said cfqq. 3449 * was the last process referring to said cfqq.
@@ -3469,9 +3452,9 @@ static struct cfq_queue *
3469split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq) 3452split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
3470{ 3453{
3471 if (cfqq_process_refs(cfqq) == 1) { 3454 if (cfqq_process_refs(cfqq) == 1) {
3472 cfqq->seeky_start = 0;
3473 cfqq->pid = current->pid; 3455 cfqq->pid = current->pid;
3474 cfq_clear_cfqq_coop(cfqq); 3456 cfq_clear_cfqq_coop(cfqq);
3457 cfq_clear_cfqq_split_coop(cfqq);
3475 return cfqq; 3458 return cfqq;
3476 } 3459 }
3477 3460
@@ -3510,7 +3493,7 @@ new_queue:
3510 /* 3493 /*
3511 * If the queue was seeky for too long, break it apart. 3494 * If the queue was seeky for too long, break it apart.
3512 */ 3495 */
3513 if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) { 3496 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3514 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); 3497 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3515 cfqq = split_cfqq(cic, cfqq); 3498 cfqq = split_cfqq(cic, cfqq);
3516 if (!cfqq) 3499 if (!cfqq)
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index bbc2c1315c47..b2586f57e1f5 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -935,6 +935,7 @@ static int dock_add(acpi_handle handle)
935 struct platform_device *dd; 935 struct platform_device *dd;
936 936
937 id = dock_station_count; 937 id = dock_station_count;
938 memset(&ds, 0, sizeof(ds));
938 dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds)); 939 dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
939 if (IS_ERR(dd)) 940 if (IS_ERR(dd))
940 return PTR_ERR(dd); 941 return PTR_ERR(dd);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 7c0441f63b39..cc978a8c00b7 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -110,6 +110,14 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
110 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 110 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
111 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 111 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
112 (void *)2}, 112 (void *)2},
113 { set_max_cstate, "Pavilion zv5000", {
114 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
115 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
116 (void *)1},
117 { set_max_cstate, "Asus L8400B", {
118 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
119 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
120 (void *)1},
113 {}, 121 {},
114}; 122};
115 123
@@ -872,12 +880,14 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
872 return(acpi_idle_enter_c1(dev, state)); 880 return(acpi_idle_enter_c1(dev, state));
873 881
874 local_irq_disable(); 882 local_irq_disable();
875 current_thread_info()->status &= ~TS_POLLING; 883 if (cx->entry_method != ACPI_CSTATE_FFH) {
876 /* 884 current_thread_info()->status &= ~TS_POLLING;
877 * TS_POLLING-cleared state must be visible before we test 885 /*
878 * NEED_RESCHED: 886 * TS_POLLING-cleared state must be visible before we test
879 */ 887 * NEED_RESCHED:
880 smp_mb(); 888 */
889 smp_mb();
890 }
881 891
882 if (unlikely(need_resched())) { 892 if (unlikely(need_resched())) {
883 current_thread_info()->status |= TS_POLLING; 893 current_thread_info()->status |= TS_POLLING;
@@ -957,12 +967,14 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
957 } 967 }
958 968
959 local_irq_disable(); 969 local_irq_disable();
960 current_thread_info()->status &= ~TS_POLLING; 970 if (cx->entry_method != ACPI_CSTATE_FFH) {
961 /* 971 current_thread_info()->status &= ~TS_POLLING;
962 * TS_POLLING-cleared state must be visible before we test 972 /*
963 * NEED_RESCHED: 973 * TS_POLLING-cleared state must be visible before we test
964 */ 974 * NEED_RESCHED:
965 smp_mb(); 975 */
976 smp_mb();
977 }
966 978
967 if (unlikely(need_resched())) { 979 if (unlikely(need_resched())) {
968 current_thread_info()->status |= TS_POLLING; 980 current_thread_info()->status |= TS_POLLING;
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
index 7247819dbd80..e306ba9aa34e 100644
--- a/drivers/acpi/processor_pdc.c
+++ b/drivers/acpi/processor_pdc.c
@@ -125,6 +125,8 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
125 return status; 125 return status;
126} 126}
127 127
128static int early_pdc_done;
129
128void acpi_processor_set_pdc(acpi_handle handle) 130void acpi_processor_set_pdc(acpi_handle handle)
129{ 131{
130 struct acpi_object_list *obj_list; 132 struct acpi_object_list *obj_list;
@@ -132,6 +134,9 @@ void acpi_processor_set_pdc(acpi_handle handle)
132 if (arch_has_acpi_pdc() == false) 134 if (arch_has_acpi_pdc() == false)
133 return; 135 return;
134 136
137 if (early_pdc_done)
138 return;
139
135 obj_list = acpi_processor_alloc_pdc(); 140 obj_list = acpi_processor_alloc_pdc();
136 if (!obj_list) 141 if (!obj_list)
137 return; 142 return;
@@ -151,6 +156,13 @@ static int set_early_pdc_optin(const struct dmi_system_id *id)
151 return 0; 156 return 0;
152} 157}
153 158
159static int param_early_pdc_optin(char *s)
160{
161 early_pdc_optin = 1;
162 return 1;
163}
164__setup("acpi_early_pdc_eval", param_early_pdc_optin);
165
154static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = { 166static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = {
155 { 167 {
156 set_early_pdc_optin, "HP Envy", { 168 set_early_pdc_optin, "HP Envy", {
@@ -192,4 +204,6 @@ void __init acpi_early_processor_set_pdc(void)
192 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 204 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
193 ACPI_UINT32_MAX, 205 ACPI_UINT32_MAX,
194 early_init_pdc, NULL, NULL, NULL); 206 early_init_pdc, NULL, NULL, NULL);
207
208 early_pdc_done = 1;
195} 209}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 2cabadcc4d8c..a959f6a07508 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -413,7 +413,11 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
413 if (result) 413 if (result)
414 goto update_bios; 414 goto update_bios;
415 415
416 return 0; 416 /* We need to call _PPC once when cpufreq starts */
417 if (ignore_ppc != 1)
418 result = acpi_processor_get_platform_limit(pr);
419
420 return result;
417 421
418 /* 422 /*
419 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that 423 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index ff9f6226085d..3e009674f333 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1336,9 +1336,25 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
1336 1336
1337 if (child) 1337 if (child)
1338 *child = device; 1338 *child = device;
1339 return 0; 1339
1340 if (device)
1341 return 0;
1342 else
1343 return -ENODEV;
1340} 1344}
1341 1345
1346/*
1347 * acpi_bus_add and acpi_bus_start
1348 *
1349 * scan a given ACPI tree and (probably recently hot-plugged)
1350 * create and add or starts found devices.
1351 *
1352 * If no devices were found -ENODEV is returned which does not
1353 * mean that this is a real error, there just have been no suitable
1354 * ACPI objects in the table trunk from which the kernel could create
1355 * a device and add/start an appropriate driver.
1356 */
1357
1342int 1358int
1343acpi_bus_add(struct acpi_device **child, 1359acpi_bus_add(struct acpi_device **child,
1344 struct acpi_device *parent, acpi_handle handle, int type) 1360 struct acpi_device *parent, acpi_handle handle, int type)
@@ -1348,8 +1364,7 @@ acpi_bus_add(struct acpi_device **child,
1348 memset(&ops, 0, sizeof(ops)); 1364 memset(&ops, 0, sizeof(ops));
1349 ops.acpi_op_add = 1; 1365 ops.acpi_op_add = 1;
1350 1366
1351 acpi_bus_scan(handle, &ops, child); 1367 return acpi_bus_scan(handle, &ops, child);
1352 return 0;
1353} 1368}
1354EXPORT_SYMBOL(acpi_bus_add); 1369EXPORT_SYMBOL(acpi_bus_add);
1355 1370
@@ -1357,11 +1372,13 @@ int acpi_bus_start(struct acpi_device *device)
1357{ 1372{
1358 struct acpi_bus_ops ops; 1373 struct acpi_bus_ops ops;
1359 1374
1375 if (!device)
1376 return -EINVAL;
1377
1360 memset(&ops, 0, sizeof(ops)); 1378 memset(&ops, 0, sizeof(ops));
1361 ops.acpi_op_start = 1; 1379 ops.acpi_op_start = 1;
1362 1380
1363 acpi_bus_scan(device->handle, &ops, NULL); 1381 return acpi_bus_scan(device->handle, &ops, NULL);
1364 return 0;
1365} 1382}
1366EXPORT_SYMBOL(acpi_bus_start); 1383EXPORT_SYMBOL(acpi_bus_start);
1367 1384
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index f336bca7c450..8a0ed2800e63 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
213 unsigned long table_end; 213 unsigned long table_end;
214 acpi_size tbl_size; 214 acpi_size tbl_size;
215 215
216 if (acpi_disabled) 216 if (acpi_disabled && !acpi_ht)
217 return -ENODEV; 217 return -ENODEV;
218 218
219 if (!handler) 219 if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
280 struct acpi_table_header *table = NULL; 280 struct acpi_table_header *table = NULL;
281 acpi_size tbl_size; 281 acpi_size tbl_size;
282 282
283 if (acpi_disabled) 283 if (acpi_disabled && !acpi_ht)
284 return -ENODEV; 284 return -ENODEV;
285 285
286 if (!handler) 286 if (!handler)
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 161746deab4b..6e2c3b064f53 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -59,6 +59,8 @@ static void class_release(struct kobject *kobj)
59 else 59 else
60 pr_debug("class '%s' does not have a release() function, " 60 pr_debug("class '%s' does not have a release() function, "
61 "be careful\n", class->name); 61 "be careful\n", class->name);
62
63 kfree(cp);
62} 64}
63 65
64static struct sysfs_ops class_sysfs_ops = { 66static struct sysfs_ops class_sysfs_ops = {
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 873e594860d3..9291614ac6b7 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -337,6 +337,9 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
337 if (*pos > h->highest_lun) 337 if (*pos > h->highest_lun)
338 return 0; 338 return 0;
339 339
340 if (drv == NULL) /* it's possible for h->drv[] to have holes. */
341 return 0;
342
340 if (drv->heads == 0) 343 if (drv->heads == 0)
341 return 0; 344 return 0;
342 345
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index f36defa37764..57d965b7f521 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -808,6 +808,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
808 808
809exit: 809exit:
810 sdio_release_host(card->func); 810 sdio_release_host(card->func);
811 kfree(tmpbuf);
811 812
812 return ret; 813 return ret;
813} 814}
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 34cf04e21795..fd50ead59c79 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -767,16 +767,19 @@ int __init agp_amd64_init(void)
767 767
768static int __init agp_amd64_mod_init(void) 768static int __init agp_amd64_mod_init(void)
769{ 769{
770#ifndef MODULE
770 if (gart_iommu_aperture) 771 if (gart_iommu_aperture)
771 return agp_bridges_found ? 0 : -ENODEV; 772 return agp_bridges_found ? 0 : -ENODEV;
772 773#endif
773 return agp_amd64_init(); 774 return agp_amd64_init();
774} 775}
775 776
776static void __exit agp_amd64_cleanup(void) 777static void __exit agp_amd64_cleanup(void)
777{ 778{
779#ifndef MODULE
778 if (gart_iommu_aperture) 780 if (gart_iommu_aperture)
779 return; 781 return;
782#endif
780 if (aperture_resource) 783 if (aperture_resource)
781 release_resource(aperture_resource); 784 release_resource(aperture_resource);
782 pci_unregister_driver(&agp_amd64_pci_driver); 785 pci_unregister_driver(&agp_amd64_pci_driver);
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index ecba4942fc8e..f58440791e65 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -39,12 +39,12 @@
39struct tpm_inf_dev { 39struct tpm_inf_dev {
40 int iotype; 40 int iotype;
41 41
42 void __iomem *mem_base; /* MMIO ioremap'd addr */ 42 void __iomem *mem_base; /* MMIO ioremap'd addr */
43 unsigned long map_base; /* phys MMIO base */ 43 unsigned long map_base; /* phys MMIO base */
44 unsigned long map_size; /* MMIO region size */ 44 unsigned long map_size; /* MMIO region size */
45 unsigned int index_off; /* index register offset */ 45 unsigned int index_off; /* index register offset */
46 46
47 unsigned int data_regs; /* Data registers */ 47 unsigned int data_regs; /* Data registers */
48 unsigned int data_size; 48 unsigned int data_size;
49 49
50 unsigned int config_port; /* IO Port config index reg */ 50 unsigned int config_port; /* IO Port config index reg */
@@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = {
406 .miscdev = {.fops = &inf_ops,}, 406 .miscdev = {.fops = &inf_ops,},
407}; 407};
408 408
409static const struct pnp_device_id tpm_pnp_tbl[] = { 409static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
410 /* Infineon TPMs */ 410 /* Infineon TPMs */
411 {"IFX0101", 0}, 411 {"IFX0101", 0},
412 {"IFX0102", 0}, 412 {"IFX0102", 0},
413 {"", 0} 413 {"", 0}
414}; 414};
415 415
416MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); 416MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
417 417
418static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, 418static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
419 const struct pnp_device_id *dev_id) 419 const struct pnp_device_id *dev_id)
@@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
430 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && 430 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
431 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { 431 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
432 432
433 tpm_dev.iotype = TPM_INF_IO_PORT; 433 tpm_dev.iotype = TPM_INF_IO_PORT;
434 434
435 tpm_dev.config_port = pnp_port_start(dev, 0); 435 tpm_dev.config_port = pnp_port_start(dev, 0);
436 tpm_dev.config_size = pnp_port_len(dev, 0); 436 tpm_dev.config_size = pnp_port_len(dev, 0);
@@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
459 goto err_last; 459 goto err_last;
460 } 460 }
461 } else if (pnp_mem_valid(dev, 0) && 461 } else if (pnp_mem_valid(dev, 0) &&
462 !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { 462 !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
463 463
464 tpm_dev.iotype = TPM_INF_IO_MEM; 464 tpm_dev.iotype = TPM_INF_IO_MEM;
465 465
466 tpm_dev.map_base = pnp_mem_start(dev, 0); 466 tpm_dev.map_base = pnp_mem_start(dev, 0);
467 tpm_dev.map_size = pnp_mem_len(dev, 0); 467 tpm_dev.map_size = pnp_mem_len(dev, 0);
@@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
563 "product id 0x%02x%02x" 563 "product id 0x%02x%02x"
564 "%s\n", 564 "%s\n",
565 tpm_dev.iotype == TPM_INF_IO_PORT ? 565 tpm_dev.iotype == TPM_INF_IO_PORT ?
566 tpm_dev.config_port : 566 tpm_dev.config_port :
567 tpm_dev.map_base + tpm_dev.index_off, 567 tpm_dev.map_base + tpm_dev.index_off,
568 tpm_dev.iotype == TPM_INF_IO_PORT ? 568 tpm_dev.iotype == TPM_INF_IO_PORT ?
569 tpm_dev.data_regs : 569 tpm_dev.data_regs :
570 tpm_dev.map_base + tpm_dev.data_regs, 570 tpm_dev.map_base + tpm_dev.data_regs,
571 version[0], version[1], 571 version[0], version[1],
572 vendorid[0], vendorid[1], 572 vendorid[0], vendorid[1],
573 productid[0], productid[1], chipname); 573 productid[0], productid[1], chipname);
@@ -607,20 +607,55 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
607 iounmap(tpm_dev.mem_base); 607 iounmap(tpm_dev.mem_base);
608 release_mem_region(tpm_dev.map_base, tpm_dev.map_size); 608 release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
609 } 609 }
610 tpm_dev_vendor_release(chip);
610 tpm_remove_hardware(chip->dev); 611 tpm_remove_hardware(chip->dev);
611 } 612 }
612} 613}
613 614
615static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
616{
617 struct tpm_chip *chip = pnp_get_drvdata(dev);
618 int rc;
619 if (chip) {
620 u8 savestate[] = {
621 0, 193, /* TPM_TAG_RQU_COMMAND */
622 0, 0, 0, 10, /* blob length (in bytes) */
623 0, 0, 0, 152 /* TPM_ORD_SaveState */
624 };
625 dev_info(&dev->dev, "saving TPM state\n");
626 rc = tpm_inf_send(chip, savestate, sizeof(savestate));
627 if (rc < 0) {
628 dev_err(&dev->dev, "error while saving TPM state\n");
629 return rc;
630 }
631 }
632 return 0;
633}
634
635static int tpm_inf_pnp_resume(struct pnp_dev *dev)
636{
637 /* Re-configure TPM after suspending */
638 tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
639 tpm_config_out(IOLIMH, TPM_INF_ADDR);
640 tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
641 tpm_config_out(IOLIML, TPM_INF_ADDR);
642 tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
643 /* activate register */
644 tpm_config_out(TPM_DAR, TPM_INF_ADDR);
645 tpm_config_out(0x01, TPM_INF_DATA);
646 tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
647 /* disable RESET, LP and IRQC */
648 tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
649 return tpm_pm_resume(&dev->dev);
650}
651
614static struct pnp_driver tpm_inf_pnp_driver = { 652static struct pnp_driver tpm_inf_pnp_driver = {
615 .name = "tpm_inf_pnp", 653 .name = "tpm_inf_pnp",
616 .driver = { 654 .id_table = tpm_inf_pnp_tbl,
617 .owner = THIS_MODULE,
618 .suspend = tpm_pm_suspend,
619 .resume = tpm_pm_resume,
620 },
621 .id_table = tpm_pnp_tbl,
622 .probe = tpm_inf_pnp_probe, 655 .probe = tpm_inf_pnp_probe,
623 .remove = __devexit_p(tpm_inf_pnp_remove), 656 .suspend = tpm_inf_pnp_suspend,
657 .resume = tpm_inf_pnp_resume,
658 .remove = __devexit_p(tpm_inf_pnp_remove)
624}; 659};
625 660
626static int __init init_inf(void) 661static int __init init_inf(void)
@@ -638,5 +673,5 @@ module_exit(cleanup_inf);
638 673
639MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>"); 674MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
640MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); 675MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
641MODULE_VERSION("1.9"); 676MODULE_VERSION("1.9.2");
642MODULE_LICENSE("GPL"); 677MODULE_LICENSE("GPL");
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index 27d20fac19d1..b314a999aabe 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -21,7 +21,7 @@
21 21
22#define DRV_NAME "cs5535-clockevt" 22#define DRV_NAME "cs5535-clockevt"
23 23
24static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ; 24static int timer_irq;
25module_param_named(irq, timer_irq, int, 0644); 25module_param_named(irq, timer_irq, int, 0644);
26MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks."); 26MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
27 27
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 4b34ade2332b..bd444dc93cf2 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
554 (dbs_tuners_ins.up_threshold - 554 (dbs_tuners_ins.up_threshold -
555 dbs_tuners_ins.down_differential); 555 dbs_tuners_ins.down_differential);
556 556
557 if (freq_next < policy->min)
558 freq_next = policy->min;
559
557 if (!dbs_tuners_ins.powersave_bias) { 560 if (!dbs_tuners_ins.powersave_bias) {
558 __cpufreq_driver_target(policy, freq_next, 561 __cpufreq_driver_target(policy, freq_next,
559 CPUFREQ_RELATION_L); 562 CPUFREQ_RELATION_L);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index b5f2ee0f8e2c..64a937262a40 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -613,8 +613,6 @@ static void dma_tasklet(unsigned long data)
613 cohd_fin->pending_irqs--; 613 cohd_fin->pending_irqs--;
614 cohc->completed = cohd_fin->desc.cookie; 614 cohc->completed = cohd_fin->desc.cookie;
615 615
616 BUG_ON(cohc->nbr_active_done && cohd_fin == NULL);
617
618 if (cohc->nbr_active_done == 0) 616 if (cohc->nbr_active_done == 0)
619 return; 617 return;
620 618
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 6f51a0a7a8bb..e7a3230fb7d5 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device)
826 chan->dev->chan = NULL; 826 chan->dev->chan = NULL;
827 mutex_unlock(&dma_list_mutex); 827 mutex_unlock(&dma_list_mutex);
828 device_unregister(&chan->dev->device); 828 device_unregister(&chan->dev->device);
829 free_percpu(chan->local);
829 } 830 }
830} 831}
831EXPORT_SYMBOL(dma_async_device_unregister); 832EXPORT_SYMBOL(dma_async_device_unregister);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 8b905161fbf4..948d563941c9 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -467,7 +467,7 @@ err_srcs:
467 467
468 if (iterations > 0) 468 if (iterations > 0)
469 while (!kthread_should_stop()) { 469 while (!kthread_should_stop()) {
470 DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit); 470 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
471 interruptible_sleep_on(&wait_dmatest_exit); 471 interruptible_sleep_on(&wait_dmatest_exit);
472 } 472 }
473 473
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5f7a500e18d0..5cc37afe2bc1 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -249,7 +249,7 @@ int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
249 if (is_ioat_active(status) || is_ioat_idle(status)) 249 if (is_ioat_active(status) || is_ioat_idle(status))
250 ioat_suspend(chan); 250 ioat_suspend(chan);
251 while (is_ioat_active(status) || is_ioat_idle(status)) { 251 while (is_ioat_active(status) || is_ioat_idle(status)) {
252 if (end && time_after(jiffies, end)) { 252 if (tmo && time_after(jiffies, end)) {
253 err = -ETIMEDOUT; 253 err = -ETIMEDOUT;
254 break; 254 break;
255 } 255 }
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 9a5bc1a7389e..e80bae1673fa 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -761,12 +761,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
761 * @buffer_n: buffer number to update. 761 * @buffer_n: buffer number to update.
762 * 0 or 1 are the only valid values. 762 * 0 or 1 are the only valid values.
763 * @phyaddr: buffer physical address. 763 * @phyaddr: buffer physical address.
764 * @return: Returns 0 on success or negative error code on failure. This
765 * function will fail if the buffer is set to ready.
766 */ 764 */
767/* Called under spin_lock(_irqsave)(&ichan->lock) */ 765/* Called under spin_lock(_irqsave)(&ichan->lock) */
768static int ipu_update_channel_buffer(struct idmac_channel *ichan, 766static void ipu_update_channel_buffer(struct idmac_channel *ichan,
769 int buffer_n, dma_addr_t phyaddr) 767 int buffer_n, dma_addr_t phyaddr)
770{ 768{
771 enum ipu_channel channel = ichan->dma_chan.chan_id; 769 enum ipu_channel channel = ichan->dma_chan.chan_id;
772 uint32_t reg; 770 uint32_t reg;
@@ -806,8 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan,
806 } 804 }
807 805
808 spin_unlock_irqrestore(&ipu_data.lock, flags); 806 spin_unlock_irqrestore(&ipu_data.lock, flags);
809
810 return 0;
811} 807}
812 808
813/* Called under spin_lock_irqsave(&ichan->lock) */ 809/* Called under spin_lock_irqsave(&ichan->lock) */
@@ -816,7 +812,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
816{ 812{
817 unsigned int chan_id = ichan->dma_chan.chan_id; 813 unsigned int chan_id = ichan->dma_chan.chan_id;
818 struct device *dev = &ichan->dma_chan.dev->device; 814 struct device *dev = &ichan->dma_chan.dev->device;
819 int ret;
820 815
821 if (async_tx_test_ack(&desc->txd)) 816 if (async_tx_test_ack(&desc->txd))
822 return -EINTR; 817 return -EINTR;
@@ -827,14 +822,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
827 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but 822 * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
828 * doing it again shouldn't hurt either. 823 * doing it again shouldn't hurt either.
829 */ 824 */
830 ret = ipu_update_channel_buffer(ichan, buf_idx, 825 ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
831 sg_dma_address(sg));
832
833 if (ret < 0) {
834 dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
835 sg, chan_id, buf_idx);
836 return ret;
837 }
838 826
839 ipu_select_buffer(chan_id, buf_idx); 827 ipu_select_buffer(chan_id, buf_idx);
840 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", 828 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
@@ -1379,10 +1367,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1379 1367
1380 if (likely(sgnew) && 1368 if (likely(sgnew) &&
1381 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { 1369 ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
1382 callback = desc->txd.callback; 1370 callback = descnew->txd.callback;
1383 callback_param = desc->txd.callback_param; 1371 callback_param = descnew->txd.callback_param;
1384 spin_unlock(&ichan->lock); 1372 spin_unlock(&ichan->lock);
1385 callback(callback_param); 1373 if (callback)
1374 callback(callback_param);
1386 spin_lock(&ichan->lock); 1375 spin_lock(&ichan->lock);
1387 } 1376 }
1388 1377
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 000dc67b85b7..3391e6739d06 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2658,10 +2658,11 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2658 * the memory system completely. A command line option allows to force-enable 2658 * the memory system completely. A command line option allows to force-enable
2659 * hardware ECC later in amd64_enable_ecc_error_reporting(). 2659 * hardware ECC later in amd64_enable_ecc_error_reporting().
2660 */ 2660 */
2661static const char *ecc_warning = 2661static const char *ecc_msg =
2662 "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n" 2662 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2663 " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n" 2663 " Either enable ECC checking or force module loading by setting "
2664 " Also, use of the override can cause unknown side effects.\n"; 2664 "'ecc_enable_override'.\n"
2665 " (Note that use of the override may cause unknown side effects.)\n";
2665 2666
2666static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) 2667static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2667{ 2668{
@@ -2673,7 +2674,7 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2673 2674
2674 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); 2675 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
2675 if (!ecc_enabled) 2676 if (!ecc_enabled)
2676 amd64_printk(KERN_WARNING, "This node reports that Memory ECC " 2677 amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
2677 "is currently disabled, set F3x%x[22] (%s).\n", 2678 "is currently disabled, set F3x%x[22] (%s).\n",
2678 K8_NBCFG, pci_name(pvt->misc_f3_ctl)); 2679 K8_NBCFG, pci_name(pvt->misc_f3_ctl));
2679 else 2680 else
@@ -2681,13 +2682,13 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2681 2682
2682 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); 2683 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
2683 if (!nb_mce_en) 2684 if (!nb_mce_en)
2684 amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR " 2685 amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
2685 "0x%08x[4] on node %d to enable.\n", 2686 "0x%08x[4] on node %d to enable.\n",
2686 MSR_IA32_MCG_CTL, pvt->mc_node_id); 2687 MSR_IA32_MCG_CTL, pvt->mc_node_id);
2687 2688
2688 if (!ecc_enabled || !nb_mce_en) { 2689 if (!ecc_enabled || !nb_mce_en) {
2689 if (!ecc_enable_override) { 2690 if (!ecc_enable_override) {
2690 amd64_printk(KERN_WARNING, "%s", ecc_warning); 2691 amd64_printk(KERN_NOTICE, "%s", ecc_msg);
2691 return -ENODEV; 2692 return -ENODEV;
2692 } 2693 }
2693 ecc_enable_override = 0; 2694 ecc_enable_override = 0;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index cf27402af97b..ecd5928d7110 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -804,8 +804,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
804 end <<= (24 - PAGE_SHIFT); 804 end <<= (24 - PAGE_SHIFT);
805 end |= (1 << (24 - PAGE_SHIFT)) - 1; 805 end |= (1 << (24 - PAGE_SHIFT)) - 1;
806 806
807 csrow->first_page = start >> PAGE_SHIFT; 807 csrow->first_page = start;
808 csrow->last_page = end >> PAGE_SHIFT; 808 csrow->last_page = end;
809 csrow->nr_pages = end + 1 - start; 809 csrow->nr_pages = end + 1 - start;
810 csrow->grain = 8; 810 csrow->grain = 8;
811 csrow->mtype = mtype; 811 csrow->mtype = mtype;
@@ -892,10 +892,6 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
892 892
893 mpc85xx_init_csrows(mci); 893 mpc85xx_init_csrows(mci);
894 894
895#ifdef CONFIG_EDAC_DEBUG
896 edac_mc_register_mcidev_debug((struct attribute **)debug_attr);
897#endif
898
899 /* store the original error disable bits */ 895 /* store the original error disable bits */
900 orig_ddr_err_disable = 896 orig_ddr_err_disable =
901 in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE); 897 in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index cbaf420c36c5..2d3dc7ded0a9 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -893,20 +893,31 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
893 893
894static struct kmem_cache *fwnet_packet_task_cache; 894static struct kmem_cache *fwnet_packet_task_cache;
895 895
896static void fwnet_free_ptask(struct fwnet_packet_task *ptask)
897{
898 dev_kfree_skb_any(ptask->skb);
899 kmem_cache_free(fwnet_packet_task_cache, ptask);
900}
901
896static int fwnet_send_packet(struct fwnet_packet_task *ptask); 902static int fwnet_send_packet(struct fwnet_packet_task *ptask);
897 903
898static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) 904static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
899{ 905{
900 struct fwnet_device *dev; 906 struct fwnet_device *dev = ptask->dev;
901 unsigned long flags; 907 unsigned long flags;
902 908 bool free;
903 dev = ptask->dev;
904 909
905 spin_lock_irqsave(&dev->lock, flags); 910 spin_lock_irqsave(&dev->lock, flags);
906 list_del(&ptask->pt_link);
907 spin_unlock_irqrestore(&dev->lock, flags);
908 911
909 ptask->outstanding_pkts--; /* FIXME access inside lock */ 912 ptask->outstanding_pkts--;
913
914 /* Check whether we or the networking TX soft-IRQ is last user. */
915 free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link));
916
917 if (ptask->outstanding_pkts == 0)
918 list_del(&ptask->pt_link);
919
920 spin_unlock_irqrestore(&dev->lock, flags);
910 921
911 if (ptask->outstanding_pkts > 0) { 922 if (ptask->outstanding_pkts > 0) {
912 u16 dg_size; 923 u16 dg_size;
@@ -951,10 +962,10 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
951 ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; 962 ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
952 } 963 }
953 fwnet_send_packet(ptask); 964 fwnet_send_packet(ptask);
954 } else {
955 dev_kfree_skb_any(ptask->skb);
956 kmem_cache_free(fwnet_packet_task_cache, ptask);
957 } 965 }
966
967 if (free)
968 fwnet_free_ptask(ptask);
958} 969}
959 970
960static void fwnet_write_complete(struct fw_card *card, int rcode, 971static void fwnet_write_complete(struct fw_card *card, int rcode,
@@ -977,6 +988,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
977 unsigned tx_len; 988 unsigned tx_len;
978 struct rfc2734_header *bufhdr; 989 struct rfc2734_header *bufhdr;
979 unsigned long flags; 990 unsigned long flags;
991 bool free;
980 992
981 dev = ptask->dev; 993 dev = ptask->dev;
982 tx_len = ptask->max_payload; 994 tx_len = ptask->max_payload;
@@ -1022,12 +1034,16 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
1022 generation, SCODE_100, 0ULL, ptask->skb->data, 1034 generation, SCODE_100, 0ULL, ptask->skb->data,
1023 tx_len + 8, fwnet_write_complete, ptask); 1035 tx_len + 8, fwnet_write_complete, ptask);
1024 1036
1025 /* FIXME race? */
1026 spin_lock_irqsave(&dev->lock, flags); 1037 spin_lock_irqsave(&dev->lock, flags);
1027 list_add_tail(&ptask->pt_link, &dev->broadcasted_list); 1038
1039 /* If the AT tasklet already ran, we may be last user. */
1040 free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
1041 if (!free)
1042 list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
1043
1028 spin_unlock_irqrestore(&dev->lock, flags); 1044 spin_unlock_irqrestore(&dev->lock, flags);
1029 1045
1030 return 0; 1046 goto out;
1031 } 1047 }
1032 1048
1033 fw_send_request(dev->card, &ptask->transaction, 1049 fw_send_request(dev->card, &ptask->transaction,
@@ -1035,12 +1051,19 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
1035 ptask->generation, ptask->speed, ptask->fifo_addr, 1051 ptask->generation, ptask->speed, ptask->fifo_addr,
1036 ptask->skb->data, tx_len, fwnet_write_complete, ptask); 1052 ptask->skb->data, tx_len, fwnet_write_complete, ptask);
1037 1053
1038 /* FIXME race? */
1039 spin_lock_irqsave(&dev->lock, flags); 1054 spin_lock_irqsave(&dev->lock, flags);
1040 list_add_tail(&ptask->pt_link, &dev->sent_list); 1055
1056 /* If the AT tasklet already ran, we may be last user. */
1057 free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
1058 if (!free)
1059 list_add_tail(&ptask->pt_link, &dev->sent_list);
1060
1041 spin_unlock_irqrestore(&dev->lock, flags); 1061 spin_unlock_irqrestore(&dev->lock, flags);
1042 1062
1043 dev->netdev->trans_start = jiffies; 1063 dev->netdev->trans_start = jiffies;
1064 out:
1065 if (free)
1066 fwnet_free_ptask(ptask);
1044 1067
1045 return 0; 1068 return 0;
1046} 1069}
@@ -1298,6 +1321,8 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
1298 spin_unlock_irqrestore(&dev->lock, flags); 1321 spin_unlock_irqrestore(&dev->lock, flags);
1299 1322
1300 ptask->max_payload = max_payload; 1323 ptask->max_payload = max_payload;
1324 INIT_LIST_HEAD(&ptask->pt_link);
1325
1301 fwnet_send_packet(ptask); 1326 fwnet_send_packet(ptask);
1302 1327
1303 return NETDEV_TX_OK; 1328 return NETDEV_TX_OK;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 2345d4103fe6..43ebf337b131 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2101,11 +2101,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2101 u32 payload_index, payload_end_index, next_page_index; 2101 u32 payload_index, payload_end_index, next_page_index;
2102 int page, end_page, i, length, offset; 2102 int page, end_page, i, length, offset;
2103 2103
2104 /*
2105 * FIXME: Cycle lost behavior should be configurable: lose
2106 * packet, retransmit or terminate..
2107 */
2108
2109 p = packet; 2104 p = packet;
2110 payload_index = payload; 2105 payload_index = payload;
2111 2106
@@ -2135,6 +2130,14 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
2135 if (!p->skip) { 2130 if (!p->skip) {
2136 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 2131 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
2137 d[0].req_count = cpu_to_le16(8); 2132 d[0].req_count = cpu_to_le16(8);
2133 /*
2134 * Link the skip address to this descriptor itself. This causes
2135 * a context to skip a cycle whenever lost cycles or FIFO
2136 * overruns occur, without dropping the data. The application
2137 * should then decide whether this is an error condition or not.
2138 * FIXME: Make the context's cycle-lost behaviour configurable?
2139 */
2140 d[0].branch_address = cpu_to_le32(d_bus | z);
2138 2141
2139 header = (__le32 *) &d[1]; 2142 header = (__le32 *) &d[1];
2140 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | 2143 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f665b05592f3..ab6c97330412 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
598 return mode; 598 return mode;
599} 599}
600 600
601/*
602 * EDID is delightfully ambiguous about how interlaced modes are to be
603 * encoded. Our internal representation is of frame height, but some
604 * HDTV detailed timings are encoded as field height.
605 *
606 * The format list here is from CEA, in frame size. Technically we
607 * should be checking refresh rate too. Whatever.
608 */
609static void
610drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
611 struct detailed_pixel_timing *pt)
612{
613 int i;
614 static const struct {
615 int w, h;
616 } cea_interlaced[] = {
617 { 1920, 1080 },
618 { 720, 480 },
619 { 1440, 480 },
620 { 2880, 480 },
621 { 720, 576 },
622 { 1440, 576 },
623 { 2880, 576 },
624 };
625 static const int n_sizes =
626 sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
627
628 if (!(pt->misc & DRM_EDID_PT_INTERLACED))
629 return;
630
631 for (i = 0; i < n_sizes; i++) {
632 if ((mode->hdisplay == cea_interlaced[i].w) &&
633 (mode->vdisplay == cea_interlaced[i].h / 2)) {
634 mode->vdisplay *= 2;
635 mode->vsync_start *= 2;
636 mode->vsync_end *= 2;
637 mode->vtotal *= 2;
638 mode->vtotal |= 1;
639 }
640 }
641
642 mode->flags |= DRM_MODE_FLAG_INTERLACE;
643}
644
601/** 645/**
602 * drm_mode_detailed - create a new mode from an EDID detailed timing section 646 * drm_mode_detailed - create a new mode from an EDID detailed timing section
603 * @dev: DRM device (needed to create new mode) 647 * @dev: DRM device (needed to create new mode)
@@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
680 724
681 drm_mode_set_name(mode); 725 drm_mode_set_name(mode);
682 726
683 if (pt->misc & DRM_EDID_PT_INTERLACED) 727 drm_mode_do_interlace_quirk(mode, pt);
684 mode->flags |= DRM_MODE_FLAG_INTERLACE;
685 728
686 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { 729 if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
687 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; 730 pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index cdec32977129..2ac074c8f5d2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
405 wasted += alignment - tmp; 405 wasted += alignment - tmp;
406 } 406 }
407 407
408 if (entry->size >= size + wasted) { 408 if (entry->size >= size + wasted &&
409 (entry->start + wasted + size) <= end) {
409 if (!best_match) 410 if (!best_match)
410 return entry; 411 return entry;
411 if (entry->size < best_size) { 412 if (entry->size < best_size) {
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e660ac07f3b2..2307f98349f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -735,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
735 if (cmdbuf->num_cliprects) { 735 if (cmdbuf->num_cliprects) {
736 cliprects = kcalloc(cmdbuf->num_cliprects, 736 cliprects = kcalloc(cmdbuf->num_cliprects,
737 sizeof(struct drm_clip_rect), GFP_KERNEL); 737 sizeof(struct drm_clip_rect), GFP_KERNEL);
738 if (cliprects == NULL) 738 if (cliprects == NULL) {
739 ret = -ENOMEM;
739 goto fail_batch_free; 740 goto fail_batch_free;
741 }
740 742
741 ret = copy_from_user(cliprects, cmdbuf->cliprects, 743 ret = copy_from_user(cliprects, cmdbuf->cliprects,
742 cmdbuf->num_cliprects * 744 cmdbuf->num_cliprects *
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ecac882e1d54..cf4cb3e9a0c2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -174,26 +174,20 @@ const static struct pci_device_id pciidlist[] = {
174MODULE_DEVICE_TABLE(pci, pciidlist); 174MODULE_DEVICE_TABLE(pci, pciidlist);
175#endif 175#endif
176 176
177static int i915_suspend(struct drm_device *dev, pm_message_t state) 177static int i915_drm_freeze(struct drm_device *dev)
178{ 178{
179 struct drm_i915_private *dev_priv = dev->dev_private; 179 struct drm_i915_private *dev_priv = dev->dev_private;
180 180
181 if (!dev || !dev_priv) {
182 DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
183 DRM_ERROR("DRM not initialized, aborting suspend.\n");
184 return -ENODEV;
185 }
186
187 if (state.event == PM_EVENT_PRETHAW)
188 return 0;
189
190 pci_save_state(dev->pdev); 181 pci_save_state(dev->pdev);
191 182
192 /* If KMS is active, we do the leavevt stuff here */ 183 /* If KMS is active, we do the leavevt stuff here */
193 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 184 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
194 if (i915_gem_idle(dev)) 185 int error = i915_gem_idle(dev);
186 if (error) {
195 dev_err(&dev->pdev->dev, 187 dev_err(&dev->pdev->dev,
196 "GEM idle failed, resume may fail\n"); 188 "GEM idle failed, resume might fail\n");
189 return error;
190 }
197 drm_irq_uninstall(dev); 191 drm_irq_uninstall(dev);
198 } 192 }
199 193
@@ -201,26 +195,42 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
201 195
202 intel_opregion_free(dev, 1); 196 intel_opregion_free(dev, 1);
203 197
198 /* Modeset on resume, not lid events */
199 dev_priv->modeset_on_lid = 0;
200
201 return 0;
202}
203
204static int i915_suspend(struct drm_device *dev, pm_message_t state)
205{
206 int error;
207
208 if (!dev || !dev->dev_private) {
209 DRM_ERROR("dev: %p\n", dev);
210 DRM_ERROR("DRM not initialized, aborting suspend.\n");
211 return -ENODEV;
212 }
213
214 if (state.event == PM_EVENT_PRETHAW)
215 return 0;
216
217 error = i915_drm_freeze(dev);
218 if (error)
219 return error;
220
204 if (state.event == PM_EVENT_SUSPEND) { 221 if (state.event == PM_EVENT_SUSPEND) {
205 /* Shut down the device */ 222 /* Shut down the device */
206 pci_disable_device(dev->pdev); 223 pci_disable_device(dev->pdev);
207 pci_set_power_state(dev->pdev, PCI_D3hot); 224 pci_set_power_state(dev->pdev, PCI_D3hot);
208 } 225 }
209 226
210 /* Modeset on resume, not lid events */
211 dev_priv->modeset_on_lid = 0;
212
213 return 0; 227 return 0;
214} 228}
215 229
216static int i915_resume(struct drm_device *dev) 230static int i915_drm_thaw(struct drm_device *dev)
217{ 231{
218 struct drm_i915_private *dev_priv = dev->dev_private; 232 struct drm_i915_private *dev_priv = dev->dev_private;
219 int ret = 0; 233 int error = 0;
220
221 if (pci_enable_device(dev->pdev))
222 return -1;
223 pci_set_master(dev->pdev);
224 234
225 i915_restore_state(dev); 235 i915_restore_state(dev);
226 236
@@ -231,21 +241,28 @@ static int i915_resume(struct drm_device *dev)
231 mutex_lock(&dev->struct_mutex); 241 mutex_lock(&dev->struct_mutex);
232 dev_priv->mm.suspended = 0; 242 dev_priv->mm.suspended = 0;
233 243
234 ret = i915_gem_init_ringbuffer(dev); 244 error = i915_gem_init_ringbuffer(dev);
235 if (ret != 0)
236 ret = -1;
237 mutex_unlock(&dev->struct_mutex); 245 mutex_unlock(&dev->struct_mutex);
238 246
239 drm_irq_install(dev); 247 drm_irq_install(dev);
240 } 248
241 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
242 /* Resume the modeset for every activated CRTC */ 249 /* Resume the modeset for every activated CRTC */
243 drm_helper_resume_force_mode(dev); 250 drm_helper_resume_force_mode(dev);
244 } 251 }
245 252
246 dev_priv->modeset_on_lid = 0; 253 dev_priv->modeset_on_lid = 0;
247 254
248 return ret; 255 return error;
256}
257
258static int i915_resume(struct drm_device *dev)
259{
260 if (pci_enable_device(dev->pdev))
261 return -EIO;
262
263 pci_set_master(dev->pdev);
264
265 return i915_drm_thaw(dev);
249} 266}
250 267
251/** 268/**
@@ -386,57 +403,62 @@ i915_pci_remove(struct pci_dev *pdev)
386 drm_put_dev(dev); 403 drm_put_dev(dev);
387} 404}
388 405
389static int 406static int i915_pm_suspend(struct device *dev)
390i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
391{ 407{
392 struct drm_device *dev = pci_get_drvdata(pdev); 408 struct pci_dev *pdev = to_pci_dev(dev);
409 struct drm_device *drm_dev = pci_get_drvdata(pdev);
410 int error;
393 411
394 return i915_suspend(dev, state); 412 if (!drm_dev || !drm_dev->dev_private) {
395} 413 dev_err(dev, "DRM not initialized, aborting suspend.\n");
414 return -ENODEV;
415 }
396 416
397static int 417 error = i915_drm_freeze(drm_dev);
398i915_pci_resume(struct pci_dev *pdev) 418 if (error)
399{ 419 return error;
400 struct drm_device *dev = pci_get_drvdata(pdev);
401 420
402 return i915_resume(dev); 421 pci_disable_device(pdev);
403} 422 pci_set_power_state(pdev, PCI_D3hot);
404 423
405static int 424 return 0;
406i915_pm_suspend(struct device *dev)
407{
408 return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
409} 425}
410 426
411static int 427static int i915_pm_resume(struct device *dev)
412i915_pm_resume(struct device *dev)
413{ 428{
414 return i915_pci_resume(to_pci_dev(dev)); 429 struct pci_dev *pdev = to_pci_dev(dev);
415} 430 struct drm_device *drm_dev = pci_get_drvdata(pdev);
416 431
417static int 432 return i915_resume(drm_dev);
418i915_pm_freeze(struct device *dev)
419{
420 return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE);
421} 433}
422 434
423static int 435static int i915_pm_freeze(struct device *dev)
424i915_pm_thaw(struct device *dev)
425{ 436{
426 /* thaw during hibernate, do nothing! */ 437 struct pci_dev *pdev = to_pci_dev(dev);
427 return 0; 438 struct drm_device *drm_dev = pci_get_drvdata(pdev);
439
440 if (!drm_dev || !drm_dev->dev_private) {
441 dev_err(dev, "DRM not initialized, aborting suspend.\n");
442 return -ENODEV;
443 }
444
445 return i915_drm_freeze(drm_dev);
428} 446}
429 447
430static int 448static int i915_pm_thaw(struct device *dev)
431i915_pm_poweroff(struct device *dev)
432{ 449{
433 return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE); 450 struct pci_dev *pdev = to_pci_dev(dev);
451 struct drm_device *drm_dev = pci_get_drvdata(pdev);
452
453 return i915_drm_thaw(drm_dev);
434} 454}
435 455
436static int 456static int i915_pm_poweroff(struct device *dev)
437i915_pm_restore(struct device *dev)
438{ 457{
439 return i915_pci_resume(to_pci_dev(dev)); 458 struct pci_dev *pdev = to_pci_dev(dev);
459 struct drm_device *drm_dev = pci_get_drvdata(pdev);
460
461 return i915_drm_freeze(drm_dev);
440} 462}
441 463
442const struct dev_pm_ops i915_pm_ops = { 464const struct dev_pm_ops i915_pm_ops = {
@@ -445,7 +467,7 @@ const struct dev_pm_ops i915_pm_ops = {
445 .freeze = i915_pm_freeze, 467 .freeze = i915_pm_freeze,
446 .thaw = i915_pm_thaw, 468 .thaw = i915_pm_thaw,
447 .poweroff = i915_pm_poweroff, 469 .poweroff = i915_pm_poweroff,
448 .restore = i915_pm_restore, 470 .restore = i915_pm_resume,
449}; 471};
450 472
451static struct vm_operations_struct i915_gem_vm_ops = { 473static struct vm_operations_struct i915_gem_vm_ops = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aaf934d96f21..b99b6a841d95 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -493,6 +493,15 @@ typedef struct drm_i915_private {
493 struct list_head flushing_list; 493 struct list_head flushing_list;
494 494
495 /** 495 /**
496 * List of objects currently pending a GPU write flush.
497 *
498 * All elements on this list will belong to either the
499 * active_list or flushing_list, last_rendering_seqno can
500 * be used to differentiate between the two elements.
501 */
502 struct list_head gpu_write_list;
503
504 /**
496 * LRU list of objects which are not in the ringbuffer and 505 * LRU list of objects which are not in the ringbuffer and
497 * are ready to unbind, but are still in the GTT. 506 * are ready to unbind, but are still in the GTT.
498 * 507 *
@@ -592,6 +601,8 @@ struct drm_i915_gem_object {
592 601
593 /** This object's place on the active/flushing/inactive lists */ 602 /** This object's place on the active/flushing/inactive lists */
594 struct list_head list; 603 struct list_head list;
604 /** This object's place on GPU write list */
605 struct list_head gpu_write_list;
595 606
596 /** This object's place on the fenced object LRU */ 607 /** This object's place on the fenced object LRU */
597 struct list_head fence_list; 608 struct list_head fence_list;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b4c8c0230689..ec8a0d7ffa39 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1552,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1552 else 1552 else
1553 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1553 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1554 1554
1555 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1556
1555 obj_priv->last_rendering_seqno = 0; 1557 obj_priv->last_rendering_seqno = 0;
1556 if (obj_priv->active) { 1558 if (obj_priv->active) {
1557 obj_priv->active = 0; 1559 obj_priv->active = 0;
@@ -1622,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1622 struct drm_i915_gem_object *obj_priv, *next; 1624 struct drm_i915_gem_object *obj_priv, *next;
1623 1625
1624 list_for_each_entry_safe(obj_priv, next, 1626 list_for_each_entry_safe(obj_priv, next,
1625 &dev_priv->mm.flushing_list, list) { 1627 &dev_priv->mm.gpu_write_list,
1628 gpu_write_list) {
1626 struct drm_gem_object *obj = obj_priv->obj; 1629 struct drm_gem_object *obj = obj_priv->obj;
1627 1630
1628 if ((obj->write_domain & flush_domains) == 1631 if ((obj->write_domain & flush_domains) ==
@@ -1630,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1630 uint32_t old_write_domain = obj->write_domain; 1633 uint32_t old_write_domain = obj->write_domain;
1631 1634
1632 obj->write_domain = 0; 1635 obj->write_domain = 0;
1636 list_del_init(&obj_priv->gpu_write_list);
1633 i915_gem_object_move_to_active(obj, seqno); 1637 i915_gem_object_move_to_active(obj, seqno);
1634 1638
1635 trace_i915_gem_object_change_domain(obj, 1639 trace_i915_gem_object_change_domain(obj,
@@ -2084,8 +2088,8 @@ static int
2084i915_gem_evict_everything(struct drm_device *dev) 2088i915_gem_evict_everything(struct drm_device *dev)
2085{ 2089{
2086 drm_i915_private_t *dev_priv = dev->dev_private; 2090 drm_i915_private_t *dev_priv = dev->dev_private;
2087 uint32_t seqno;
2088 int ret; 2091 int ret;
2092 uint32_t seqno;
2089 bool lists_empty; 2093 bool lists_empty;
2090 2094
2091 spin_lock(&dev_priv->mm.active_list_lock); 2095 spin_lock(&dev_priv->mm.active_list_lock);
@@ -2107,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
2107 if (ret) 2111 if (ret)
2108 return ret; 2112 return ret;
2109 2113
2114 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2115
2110 ret = i915_gem_evict_from_inactive_list(dev); 2116 ret = i915_gem_evict_from_inactive_list(dev);
2111 if (ret) 2117 if (ret)
2112 return ret; 2118 return ret;
@@ -2701,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2701 old_write_domain = obj->write_domain; 2707 old_write_domain = obj->write_domain;
2702 i915_gem_flush(dev, 0, obj->write_domain); 2708 i915_gem_flush(dev, 0, obj->write_domain);
2703 seqno = i915_add_request(dev, NULL, obj->write_domain); 2709 seqno = i915_add_request(dev, NULL, obj->write_domain);
2704 obj->write_domain = 0; 2710 BUG_ON(obj->write_domain);
2705 i915_gem_object_move_to_active(obj, seqno); 2711 i915_gem_object_move_to_active(obj, seqno);
2706 2712
2707 trace_i915_gem_object_change_domain(obj, 2713 trace_i915_gem_object_change_domain(obj,
@@ -3682,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3682 if (args->num_cliprects != 0) { 3688 if (args->num_cliprects != 0) {
3683 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), 3689 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3684 GFP_KERNEL); 3690 GFP_KERNEL);
3685 if (cliprects == NULL) 3691 if (cliprects == NULL) {
3692 ret = -ENOMEM;
3686 goto pre_mutex_err; 3693 goto pre_mutex_err;
3694 }
3687 3695
3688 ret = copy_from_user(cliprects, 3696 ret = copy_from_user(cliprects,
3689 (struct drm_clip_rect __user *) 3697 (struct drm_clip_rect __user *)
@@ -3850,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3850 i915_gem_flush(dev, 3858 i915_gem_flush(dev,
3851 dev->invalidate_domains, 3859 dev->invalidate_domains,
3852 dev->flush_domains); 3860 dev->flush_domains);
3853 if (dev->flush_domains) 3861 if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
3854 (void)i915_add_request(dev, file_priv, 3862 (void)i915_add_request(dev, file_priv,
3855 dev->flush_domains); 3863 dev->flush_domains);
3856 } 3864 }
3857 3865
3858 for (i = 0; i < args->buffer_count; i++) { 3866 for (i = 0; i < args->buffer_count; i++) {
3859 struct drm_gem_object *obj = object_list[i]; 3867 struct drm_gem_object *obj = object_list[i];
3868 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3860 uint32_t old_write_domain = obj->write_domain; 3869 uint32_t old_write_domain = obj->write_domain;
3861 3870
3862 obj->write_domain = obj->pending_write_domain; 3871 obj->write_domain = obj->pending_write_domain;
3872 if (obj->write_domain)
3873 list_move_tail(&obj_priv->gpu_write_list,
3874 &dev_priv->mm.gpu_write_list);
3875 else
3876 list_del_init(&obj_priv->gpu_write_list);
3877
3863 trace_i915_gem_object_change_domain(obj, 3878 trace_i915_gem_object_change_domain(obj,
3864 obj->read_domains, 3879 obj->read_domains,
3865 old_write_domain); 3880 old_write_domain);
@@ -4370,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4370 obj_priv->obj = obj; 4385 obj_priv->obj = obj;
4371 obj_priv->fence_reg = I915_FENCE_REG_NONE; 4386 obj_priv->fence_reg = I915_FENCE_REG_NONE;
4372 INIT_LIST_HEAD(&obj_priv->list); 4387 INIT_LIST_HEAD(&obj_priv->list);
4388 INIT_LIST_HEAD(&obj_priv->gpu_write_list);
4373 INIT_LIST_HEAD(&obj_priv->fence_list); 4389 INIT_LIST_HEAD(&obj_priv->fence_list);
4374 obj_priv->madv = I915_MADV_WILLNEED; 4390 obj_priv->madv = I915_MADV_WILLNEED;
4375 4391
@@ -4821,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
4821 spin_lock_init(&dev_priv->mm.active_list_lock); 4837 spin_lock_init(&dev_priv->mm.active_list_lock);
4822 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4838 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4823 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4839 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4840 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4824 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4841 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4825 INIT_LIST_HEAD(&dev_priv->mm.request_list); 4842 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4826 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4843 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 50ddf4a95c5e..a17d6bdfe63e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -309,21 +309,21 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
309 if (de_iir & DE_GSE) 309 if (de_iir & DE_GSE)
310 ironlake_opregion_gse_intr(dev); 310 ironlake_opregion_gse_intr(dev);
311 311
312 if (de_iir & DE_PLANEA_FLIP_DONE) 312 if (de_iir & DE_PLANEA_FLIP_DONE) {
313 intel_prepare_page_flip(dev, 0); 313 intel_prepare_page_flip(dev, 0);
314 intel_finish_page_flip(dev, 0);
315 }
314 316
315 if (de_iir & DE_PLANEB_FLIP_DONE) 317 if (de_iir & DE_PLANEB_FLIP_DONE) {
316 intel_prepare_page_flip(dev, 1); 318 intel_prepare_page_flip(dev, 1);
319 intel_finish_page_flip(dev, 1);
320 }
317 321
318 if (de_iir & DE_PIPEA_VBLANK) { 322 if (de_iir & DE_PIPEA_VBLANK)
319 drm_handle_vblank(dev, 0); 323 drm_handle_vblank(dev, 0);
320 intel_finish_page_flip(dev, 0);
321 }
322 324
323 if (de_iir & DE_PIPEB_VBLANK) { 325 if (de_iir & DE_PIPEB_VBLANK)
324 drm_handle_vblank(dev, 1); 326 drm_handle_vblank(dev, 1);
325 intel_finish_page_flip(dev, 1);
326 }
327 327
328 /* check event from PCH */ 328 /* check event from PCH */
329 if ((de_iir & DE_PCH_EVENT) && 329 if ((de_iir & DE_PCH_EVENT) &&
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 847006c5218e..ab1bd2d3d3b6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -338,6 +338,7 @@
338#define FBC_CTL_PERIODIC (1<<30) 338#define FBC_CTL_PERIODIC (1<<30)
339#define FBC_CTL_INTERVAL_SHIFT (16) 339#define FBC_CTL_INTERVAL_SHIFT (16)
340#define FBC_CTL_UNCOMPRESSIBLE (1<<14) 340#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
341#define FBC_C3_IDLE (1<<13)
341#define FBC_CTL_STRIDE_SHIFT (5) 342#define FBC_CTL_STRIDE_SHIFT (5)
342#define FBC_CTL_FENCENO (1<<0) 343#define FBC_CTL_FENCENO (1<<0)
343#define FBC_COMMAND 0x0320c 344#define FBC_COMMAND 0x0320c
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 12775df1bbfd..b27202d23ebc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -240,33 +240,86 @@ struct intel_limit {
240#define IRONLAKE_DOT_MAX 350000 240#define IRONLAKE_DOT_MAX 350000
241#define IRONLAKE_VCO_MIN 1760000 241#define IRONLAKE_VCO_MIN 1760000
242#define IRONLAKE_VCO_MAX 3510000 242#define IRONLAKE_VCO_MAX 3510000
243#define IRONLAKE_N_MIN 1
244#define IRONLAKE_N_MAX 6
245#define IRONLAKE_M_MIN 79
246#define IRONLAKE_M_MAX 127
247#define IRONLAKE_M1_MIN 12 243#define IRONLAKE_M1_MIN 12
248#define IRONLAKE_M1_MAX 22 244#define IRONLAKE_M1_MAX 22
249#define IRONLAKE_M2_MIN 5 245#define IRONLAKE_M2_MIN 5
250#define IRONLAKE_M2_MAX 9 246#define IRONLAKE_M2_MAX 9
251#define IRONLAKE_P_SDVO_DAC_MIN 5
252#define IRONLAKE_P_SDVO_DAC_MAX 80
253#define IRONLAKE_P_LVDS_MIN 28
254#define IRONLAKE_P_LVDS_MAX 112
255#define IRONLAKE_P1_MIN 1
256#define IRONLAKE_P1_MAX 8
257#define IRONLAKE_P2_SDVO_DAC_SLOW 10
258#define IRONLAKE_P2_SDVO_DAC_FAST 5
259#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
260#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
261#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 247#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
262 248
263#define IRONLAKE_P_DISPLAY_PORT_MIN 10 249/* We have parameter ranges for different type of outputs. */
264#define IRONLAKE_P_DISPLAY_PORT_MAX 20 250
265#define IRONLAKE_P2_DISPLAY_PORT_FAST 10 251/* DAC & HDMI Refclk 120Mhz */
266#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10 252#define IRONLAKE_DAC_N_MIN 1
267#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0 253#define IRONLAKE_DAC_N_MAX 5
268#define IRONLAKE_P1_DISPLAY_PORT_MIN 1 254#define IRONLAKE_DAC_M_MIN 79
269#define IRONLAKE_P1_DISPLAY_PORT_MAX 2 255#define IRONLAKE_DAC_M_MAX 127
256#define IRONLAKE_DAC_P_MIN 5
257#define IRONLAKE_DAC_P_MAX 80
258#define IRONLAKE_DAC_P1_MIN 1
259#define IRONLAKE_DAC_P1_MAX 8
260#define IRONLAKE_DAC_P2_SLOW 10
261#define IRONLAKE_DAC_P2_FAST 5
262
263/* LVDS single-channel 120Mhz refclk */
264#define IRONLAKE_LVDS_S_N_MIN 1
265#define IRONLAKE_LVDS_S_N_MAX 3
266#define IRONLAKE_LVDS_S_M_MIN 79
267#define IRONLAKE_LVDS_S_M_MAX 118
268#define IRONLAKE_LVDS_S_P_MIN 28
269#define IRONLAKE_LVDS_S_P_MAX 112
270#define IRONLAKE_LVDS_S_P1_MIN 2
271#define IRONLAKE_LVDS_S_P1_MAX 8
272#define IRONLAKE_LVDS_S_P2_SLOW 14
273#define IRONLAKE_LVDS_S_P2_FAST 14
274
275/* LVDS dual-channel 120Mhz refclk */
276#define IRONLAKE_LVDS_D_N_MIN 1
277#define IRONLAKE_LVDS_D_N_MAX 3
278#define IRONLAKE_LVDS_D_M_MIN 79
279#define IRONLAKE_LVDS_D_M_MAX 127
280#define IRONLAKE_LVDS_D_P_MIN 14
281#define IRONLAKE_LVDS_D_P_MAX 56
282#define IRONLAKE_LVDS_D_P1_MIN 2
283#define IRONLAKE_LVDS_D_P1_MAX 8
284#define IRONLAKE_LVDS_D_P2_SLOW 7
285#define IRONLAKE_LVDS_D_P2_FAST 7
286
287/* LVDS single-channel 100Mhz refclk */
288#define IRONLAKE_LVDS_S_SSC_N_MIN 1
289#define IRONLAKE_LVDS_S_SSC_N_MAX 2
290#define IRONLAKE_LVDS_S_SSC_M_MIN 79
291#define IRONLAKE_LVDS_S_SSC_M_MAX 126
292#define IRONLAKE_LVDS_S_SSC_P_MIN 28
293#define IRONLAKE_LVDS_S_SSC_P_MAX 112
294#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
295#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
296#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
297#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
298
299/* LVDS dual-channel 100Mhz refclk */
300#define IRONLAKE_LVDS_D_SSC_N_MIN 1
301#define IRONLAKE_LVDS_D_SSC_N_MAX 3
302#define IRONLAKE_LVDS_D_SSC_M_MIN 79
303#define IRONLAKE_LVDS_D_SSC_M_MAX 126
304#define IRONLAKE_LVDS_D_SSC_P_MIN 14
305#define IRONLAKE_LVDS_D_SSC_P_MAX 42
306#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
307#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
308#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
309#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
310
311/* DisplayPort */
312#define IRONLAKE_DP_N_MIN 1
313#define IRONLAKE_DP_N_MAX 2
314#define IRONLAKE_DP_M_MIN 81
315#define IRONLAKE_DP_M_MAX 90
316#define IRONLAKE_DP_P_MIN 10
317#define IRONLAKE_DP_P_MAX 20
318#define IRONLAKE_DP_P2_FAST 10
319#define IRONLAKE_DP_P2_SLOW 10
320#define IRONLAKE_DP_P2_LIMIT 0
321#define IRONLAKE_DP_P1_MIN 1
322#define IRONLAKE_DP_P1_MAX 2
270 323
271static bool 324static bool
272intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 325intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -474,33 +527,78 @@ static const intel_limit_t intel_limits_pineview_lvds = {
474 .find_pll = intel_find_best_PLL, 527 .find_pll = intel_find_best_PLL,
475}; 528};
476 529
477static const intel_limit_t intel_limits_ironlake_sdvo = { 530static const intel_limit_t intel_limits_ironlake_dac = {
478 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 531 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
479 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 532 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
480 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 533 .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
481 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 534 .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
482 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 535 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
483 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 536 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
484 .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, 537 .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
485 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 538 .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
486 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 539 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
487 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, 540 .p2_slow = IRONLAKE_DAC_P2_SLOW,
488 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, 541 .p2_fast = IRONLAKE_DAC_P2_FAST },
489 .find_pll = intel_g4x_find_best_PLL, 542 .find_pll = intel_g4x_find_best_PLL,
490}; 543};
491 544
492static const intel_limit_t intel_limits_ironlake_lvds = { 545static const intel_limit_t intel_limits_ironlake_single_lvds = {
493 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, 546 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
494 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, 547 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
495 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, 548 .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
496 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, 549 .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
497 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, 550 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
498 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, 551 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
499 .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, 552 .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
500 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, 553 .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
501 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 554 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
502 .p2_slow = IRONLAKE_P2_LVDS_SLOW, 555 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
503 .p2_fast = IRONLAKE_P2_LVDS_FAST }, 556 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
557 .find_pll = intel_g4x_find_best_PLL,
558};
559
560static const intel_limit_t intel_limits_ironlake_dual_lvds = {
561 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
562 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
563 .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
564 .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
565 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
566 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
567 .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
568 .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
569 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
570 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
571 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
572 .find_pll = intel_g4x_find_best_PLL,
573};
574
575static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
576 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
577 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
578 .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
579 .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
580 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
581 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
582 .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
583 .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
584 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
585 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
586 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
587 .find_pll = intel_g4x_find_best_PLL,
588};
589
590static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
591 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
592 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
593 .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
594 .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
595 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
596 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
597 .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
598 .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
599 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
600 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
601 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
504 .find_pll = intel_g4x_find_best_PLL, 602 .find_pll = intel_g4x_find_best_PLL,
505}; 603};
506 604
@@ -509,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
509 .max = IRONLAKE_DOT_MAX }, 607 .max = IRONLAKE_DOT_MAX },
510 .vco = { .min = IRONLAKE_VCO_MIN, 608 .vco = { .min = IRONLAKE_VCO_MIN,
511 .max = IRONLAKE_VCO_MAX}, 609 .max = IRONLAKE_VCO_MAX},
512 .n = { .min = IRONLAKE_N_MIN, 610 .n = { .min = IRONLAKE_DP_N_MIN,
513 .max = IRONLAKE_N_MAX }, 611 .max = IRONLAKE_DP_N_MAX },
514 .m = { .min = IRONLAKE_M_MIN, 612 .m = { .min = IRONLAKE_DP_M_MIN,
515 .max = IRONLAKE_M_MAX }, 613 .max = IRONLAKE_DP_M_MAX },
516 .m1 = { .min = IRONLAKE_M1_MIN, 614 .m1 = { .min = IRONLAKE_M1_MIN,
517 .max = IRONLAKE_M1_MAX }, 615 .max = IRONLAKE_M1_MAX },
518 .m2 = { .min = IRONLAKE_M2_MIN, 616 .m2 = { .min = IRONLAKE_M2_MIN,
519 .max = IRONLAKE_M2_MAX }, 617 .max = IRONLAKE_M2_MAX },
520 .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN, 618 .p = { .min = IRONLAKE_DP_P_MIN,
521 .max = IRONLAKE_P_DISPLAY_PORT_MAX }, 619 .max = IRONLAKE_DP_P_MAX },
522 .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN, 620 .p1 = { .min = IRONLAKE_DP_P1_MIN,
523 .max = IRONLAKE_P1_DISPLAY_PORT_MAX}, 621 .max = IRONLAKE_DP_P1_MAX},
524 .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT, 622 .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
525 .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW, 623 .p2_slow = IRONLAKE_DP_P2_SLOW,
526 .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST }, 624 .p2_fast = IRONLAKE_DP_P2_FAST },
527 .find_pll = intel_find_pll_ironlake_dp, 625 .find_pll = intel_find_pll_ironlake_dp,
528}; 626};
529 627
530static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) 628static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
531{ 629{
630 struct drm_device *dev = crtc->dev;
631 struct drm_i915_private *dev_priv = dev->dev_private;
532 const intel_limit_t *limit; 632 const intel_limit_t *limit;
533 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 633 int refclk = 120;
534 limit = &intel_limits_ironlake_lvds; 634
535 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 635 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
636 if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
637 refclk = 100;
638
639 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
640 LVDS_CLKB_POWER_UP) {
641 /* LVDS dual channel */
642 if (refclk == 100)
643 limit = &intel_limits_ironlake_dual_lvds_100m;
644 else
645 limit = &intel_limits_ironlake_dual_lvds;
646 } else {
647 if (refclk == 100)
648 limit = &intel_limits_ironlake_single_lvds_100m;
649 else
650 limit = &intel_limits_ironlake_single_lvds;
651 }
652 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
536 HAS_eDP) 653 HAS_eDP)
537 limit = &intel_limits_ironlake_display_port; 654 limit = &intel_limits_ironlake_display_port;
538 else 655 else
539 limit = &intel_limits_ironlake_sdvo; 656 limit = &intel_limits_ironlake_dac;
540 657
541 return limit; 658 return limit;
542} 659}
@@ -914,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
914 1031
915 /* enable it... */ 1032 /* enable it... */
916 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 1033 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1034 if (IS_I945GM(dev))
1035 fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
917 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1036 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
918 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1037 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
919 if (obj_priv->tiling_mode != I915_TILING_NONE) 1038 if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -3962,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
3962struct intel_unpin_work { 4081struct intel_unpin_work {
3963 struct work_struct work; 4082 struct work_struct work;
3964 struct drm_device *dev; 4083 struct drm_device *dev;
3965 struct drm_gem_object *obj; 4084 struct drm_gem_object *old_fb_obj;
4085 struct drm_gem_object *pending_flip_obj;
3966 struct drm_pending_vblank_event *event; 4086 struct drm_pending_vblank_event *event;
3967 int pending; 4087 int pending;
3968}; 4088};
@@ -3973,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
3973 container_of(__work, struct intel_unpin_work, work); 4093 container_of(__work, struct intel_unpin_work, work);
3974 4094
3975 mutex_lock(&work->dev->struct_mutex); 4095 mutex_lock(&work->dev->struct_mutex);
3976 i915_gem_object_unpin(work->obj); 4096 i915_gem_object_unpin(work->old_fb_obj);
3977 drm_gem_object_unreference(work->obj); 4097 drm_gem_object_unreference(work->pending_flip_obj);
4098 drm_gem_object_unreference(work->old_fb_obj);
3978 mutex_unlock(&work->dev->struct_mutex); 4099 mutex_unlock(&work->dev->struct_mutex);
3979 kfree(work); 4100 kfree(work);
3980} 4101}
@@ -3998,7 +4119,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
3998 work = intel_crtc->unpin_work; 4119 work = intel_crtc->unpin_work;
3999 if (work == NULL || !work->pending) { 4120 if (work == NULL || !work->pending) {
4000 if (work && !work->pending) { 4121 if (work && !work->pending) {
4001 obj_priv = work->obj->driver_private; 4122 obj_priv = work->pending_flip_obj->driver_private;
4002 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", 4123 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4003 obj_priv, 4124 obj_priv,
4004 atomic_read(&obj_priv->pending_flip)); 4125 atomic_read(&obj_priv->pending_flip));
@@ -4023,7 +4144,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4023 4144
4024 spin_unlock_irqrestore(&dev->event_lock, flags); 4145 spin_unlock_irqrestore(&dev->event_lock, flags);
4025 4146
4026 obj_priv = work->obj->driver_private; 4147 obj_priv = work->pending_flip_obj->driver_private;
4027 4148
4028 /* Initial scanout buffer will have a 0 pending flip count */ 4149 /* Initial scanout buffer will have a 0 pending flip count */
4029 if ((atomic_read(&obj_priv->pending_flip) == 0) || 4150 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
@@ -4060,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4060 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4061 struct intel_unpin_work *work; 4182 struct intel_unpin_work *work;
4062 unsigned long flags; 4183 unsigned long flags;
4063 int ret; 4184 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
4185 int ret, pipesrc;
4064 RING_LOCALS; 4186 RING_LOCALS;
4065 4187
4066 work = kzalloc(sizeof *work, GFP_KERNEL); 4188 work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -4072,7 +4194,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4072 work->event = event; 4194 work->event = event;
4073 work->dev = crtc->dev; 4195 work->dev = crtc->dev;
4074 intel_fb = to_intel_framebuffer(crtc->fb); 4196 intel_fb = to_intel_framebuffer(crtc->fb);
4075 work->obj = intel_fb->obj; 4197 work->old_fb_obj = intel_fb->obj;
4076 INIT_WORK(&work->work, intel_unpin_work_fn); 4198 INIT_WORK(&work->work, intel_unpin_work_fn);
4077 4199
4078 /* We borrow the event spin lock for protecting unpin_work */ 4200 /* We borrow the event spin lock for protecting unpin_work */
@@ -4100,14 +4222,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4100 return ret; 4222 return ret;
4101 } 4223 }
4102 4224
4103 /* Reference the old fb object for the scheduled work. */ 4225 /* Reference the objects for the scheduled work. */
4104 drm_gem_object_reference(work->obj); 4226 drm_gem_object_reference(work->old_fb_obj);
4227 drm_gem_object_reference(obj);
4105 4228
4106 crtc->fb = fb; 4229 crtc->fb = fb;
4107 i915_gem_object_flush_write_domain(obj); 4230 i915_gem_object_flush_write_domain(obj);
4108 drm_vblank_get(dev, intel_crtc->pipe); 4231 drm_vblank_get(dev, intel_crtc->pipe);
4109 obj_priv = obj->driver_private; 4232 obj_priv = obj->driver_private;
4110 atomic_inc(&obj_priv->pending_flip); 4233 atomic_inc(&obj_priv->pending_flip);
4234 work->pending_flip_obj = obj;
4111 4235
4112 BEGIN_LP_RING(4); 4236 BEGIN_LP_RING(4);
4113 OUT_RING(MI_DISPLAY_FLIP | 4237 OUT_RING(MI_DISPLAY_FLIP |
@@ -4115,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4115 OUT_RING(fb->pitch); 4239 OUT_RING(fb->pitch);
4116 if (IS_I965G(dev)) { 4240 if (IS_I965G(dev)) {
4117 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); 4241 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
4118 OUT_RING((fb->width << 16) | fb->height); 4242 pipesrc = I915_READ(pipesrc_reg);
4243 OUT_RING(pipesrc & 0x0fff0fff);
4119 } else { 4244 } else {
4120 OUT_RING(obj_priv->gtt_offset); 4245 OUT_RING(obj_priv->gtt_offset);
4121 OUT_RING(MI_NOOP); 4246 OUT_RING(MI_NOOP);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 371d753e362b..aaabbcbe5905 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
148 148
149 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
150 150
151 ret = i915_gem_object_pin(fbo, PAGE_SIZE); 151 ret = i915_gem_object_pin(fbo, 64*1024);
152 if (ret) { 152 if (ret) {
153 DRM_ERROR("failed to pin fb: %d\n", ret); 153 DRM_ERROR("failed to pin fb: %d\n", ret);
154 goto out_unref; 154 goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b1d0acbae4e4..c2e8a45780d5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -636,6 +636,13 @@ static const struct dmi_system_id bad_lid_status[] = {
636 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), 636 DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
637 }, 637 },
638 }, 638 },
639 {
640 .ident = "Clevo M5x0N",
641 .matches = {
642 DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
643 DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
644 },
645 },
639 { } 646 { }
640}; 647};
641 648
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 1cf488247a16..48227e744753 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev)
90{ 90{
91 int result; 91 int result;
92 92
93 if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY, 93 if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
94 &result)) 94 &result))
95 return -ENODEV; 95 return -ENODEV;
96 96
97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); 97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
98 98
99 if (result & 0x1) { /* Stamina mode - disable the external GPU */ 99 if (result) { /* Ensure that the external GPU is enabled */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
101 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
102 NULL);
103 } else { /* Stamina mode - disable the external GPU */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, 104 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
101 NULL); 105 NULL);
102 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, 106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
103 NULL); 107 NULL);
104 } else { /* Ensure that the external GPU is enabled */
105 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
107 NULL);
108 } 108 }
109 109
110 return 0; 110 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index d7f8d8b4a4b8..0e9cd1d49130 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1865,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1865 1865
1866 struct drm_nouveau_private *dev_priv = bios->dev->dev_private; 1866 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
1867 1867
1868 if (dev_priv->card_type >= NV_50) 1868 if (dev_priv->card_type >= NV_40)
1869 return 1; 1869 return 1;
1870 1870
1871 /* 1871 /*
@@ -3765,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3765 */ 3765 */
3766 3766
3767 struct drm_nouveau_private *dev_priv = dev->dev_private; 3767 struct drm_nouveau_private *dev_priv = dev->dev_private;
3768 struct init_exec iexec = {true, false};
3769 struct nvbios *bios = &dev_priv->VBIOS; 3768 struct nvbios *bios = &dev_priv->VBIOS;
3770 uint8_t *table = &bios->data[bios->display.script_table_ptr]; 3769 uint8_t *table = &bios->data[bios->display.script_table_ptr];
3771 uint8_t *otable = NULL; 3770 uint8_t *otable = NULL;
@@ -3845,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3845 } 3844 }
3846 } 3845 }
3847 3846
3848 bios->display.output = dcbent;
3849
3850 if (pxclk == 0) { 3847 if (pxclk == 0) {
3851 script = ROM16(otable[6]); 3848 script = ROM16(otable[6]);
3852 if (!script) { 3849 if (!script) {
@@ -3855,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3855 } 3852 }
3856 3853
3857 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); 3854 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
3858 parse_init_table(bios, script, &iexec); 3855 nouveau_bios_run_init_table(dev, script, dcbent);
3859 } else 3856 } else
3860 if (pxclk == -1) { 3857 if (pxclk == -1) {
3861 script = ROM16(otable[8]); 3858 script = ROM16(otable[8]);
@@ -3865,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3865 } 3862 }
3866 3863
3867 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); 3864 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
3868 parse_init_table(bios, script, &iexec); 3865 nouveau_bios_run_init_table(dev, script, dcbent);
3869 } else 3866 } else
3870 if (pxclk == -2) { 3867 if (pxclk == -2) {
3871 if (table[4] >= 12) 3868 if (table[4] >= 12)
@@ -3878,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3878 } 3875 }
3879 3876
3880 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); 3877 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
3881 parse_init_table(bios, script, &iexec); 3878 nouveau_bios_run_init_table(dev, script, dcbent);
3882 } else 3879 } else
3883 if (pxclk > 0) { 3880 if (pxclk > 0) {
3884 script = ROM16(otable[table[4] + i*6 + 2]); 3881 script = ROM16(otable[table[4] + i*6 + 2]);
@@ -3890,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3890 } 3887 }
3891 3888
3892 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); 3889 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
3893 parse_init_table(bios, script, &iexec); 3890 nouveau_bios_run_init_table(dev, script, dcbent);
3894 } else 3891 } else
3895 if (pxclk < 0) { 3892 if (pxclk < 0) {
3896 script = ROM16(otable[table[4] + i*6 + 4]); 3893 script = ROM16(otable[table[4] + i*6 + 4]);
@@ -3902,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3902 } 3899 }
3903 3900
3904 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); 3901 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
3905 parse_init_table(bios, script, &iexec); 3902 nouveau_bios_run_init_table(dev, script, dcbent);
3906 } 3903 }
3907 3904
3908 return 0; 3905 return 0;
@@ -5865,9 +5862,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5865 struct nvbios *bios = &dev_priv->VBIOS; 5862 struct nvbios *bios = &dev_priv->VBIOS;
5866 struct init_exec iexec = { true, false }; 5863 struct init_exec iexec = { true, false };
5867 5864
5865 mutex_lock(&bios->lock);
5868 bios->display.output = dcbent; 5866 bios->display.output = dcbent;
5869 parse_init_table(bios, table, &iexec); 5867 parse_init_table(bios, table, &iexec);
5870 bios->display.output = NULL; 5868 bios->display.output = NULL;
5869 mutex_unlock(&bios->lock);
5871} 5870}
5872 5871
5873static bool NVInitVBIOS(struct drm_device *dev) 5872static bool NVInitVBIOS(struct drm_device *dev)
@@ -5876,6 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
5876 struct nvbios *bios = &dev_priv->VBIOS; 5875 struct nvbios *bios = &dev_priv->VBIOS;
5877 5876
5878 memset(bios, 0, sizeof(struct nvbios)); 5877 memset(bios, 0, sizeof(struct nvbios));
5878 mutex_init(&bios->lock);
5879 bios->dev = dev; 5879 bios->dev = dev;
5880 5880
5881 if (!NVShadowVBIOS(dev, bios->data)) 5881 if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 058e98c76d89..fd94bd6dc264 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -205,6 +205,8 @@ struct nvbios {
205 struct drm_device *dev; 205 struct drm_device *dev;
206 struct nouveau_bios_info pub; 206 struct nouveau_bios_info pub;
207 207
208 struct mutex lock;
209
208 uint8_t data[NV_PROM_SIZE]; 210 uint8_t data[NV_PROM_SIZE];
209 unsigned int length; 211 unsigned int length;
210 bool execute; 212 bool execute;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index db0ed4c13f98..028719fddf76 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -65,8 +65,10 @@ nouveau_bo_fixup_align(struct drm_device *dev,
65 65
66 /* 66 /*
67 * Some of the tile_flags have a periodic structure of N*4096 bytes, 67 * Some of the tile_flags have a periodic structure of N*4096 bytes,
68 * align to to that as well as the page size. Overallocate memory to 68 * align to to that as well as the page size. Align the size to the
69 * avoid corruption of other buffer objects. 69 * appropriate boundaries. This does imply that sizes are rounded up
70 * 3-7 pages, so be aware of this and do not waste memory by allocating
71 * many small buffers.
70 */ 72 */
71 if (dev_priv->card_type == NV_50) { 73 if (dev_priv->card_type == NV_50) {
72 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; 74 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
@@ -77,22 +79,20 @@ nouveau_bo_fixup_align(struct drm_device *dev,
77 case 0x2800: 79 case 0x2800:
78 case 0x4800: 80 case 0x4800:
79 case 0x7a00: 81 case 0x7a00:
80 *size = roundup(*size, block_size);
81 if (is_power_of_2(block_size)) { 82 if (is_power_of_2(block_size)) {
82 *size += 3 * block_size;
83 for (i = 1; i < 10; i++) { 83 for (i = 1; i < 10; i++) {
84 *align = 12 * i * block_size; 84 *align = 12 * i * block_size;
85 if (!(*align % 65536)) 85 if (!(*align % 65536))
86 break; 86 break;
87 } 87 }
88 } else { 88 } else {
89 *size += 6 * block_size;
90 for (i = 1; i < 10; i++) { 89 for (i = 1; i < 10; i++) {
91 *align = 8 * i * block_size; 90 *align = 8 * i * block_size;
92 if (!(*align % 65536)) 91 if (!(*align % 65536))
93 break; 92 break;
94 } 93 }
95 } 94 }
95 *size = roundup(*size, *align);
96 break; 96 break;
97 default: 97 default:
98 break; 98 break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 343d718a9667..2281f99da7fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -278,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan)
278 /* Ensure the channel is no longer active on the GPU */ 278 /* Ensure the channel is no longer active on the GPU */
279 pfifo->reassign(dev, false); 279 pfifo->reassign(dev, false);
280 280
281 if (pgraph->channel(dev) == chan) { 281 pgraph->fifo_access(dev, false);
282 pgraph->fifo_access(dev, false); 282 if (pgraph->channel(dev) == chan)
283 pgraph->unload_context(dev); 283 pgraph->unload_context(dev);
284 pgraph->fifo_access(dev, true);
285 }
286 pgraph->destroy_context(chan); 284 pgraph->destroy_context(chan);
285 pgraph->fifo_access(dev, true);
287 286
288 if (pfifo->channel_id(dev) == chan->id) { 287 if (pfifo->channel_id(dev) == chan->id) {
289 pfifo->disable(dev); 288 pfifo->disable(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7e6d673f3a23..d2f63353ea97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -88,13 +88,14 @@ nouveau_connector_destroy(struct drm_connector *drm_connector)
88{ 88{
89 struct nouveau_connector *nv_connector = 89 struct nouveau_connector *nv_connector =
90 nouveau_connector(drm_connector); 90 nouveau_connector(drm_connector);
91 struct drm_device *dev = nv_connector->base.dev; 91 struct drm_device *dev;
92
93 NV_DEBUG_KMS(dev, "\n");
94 92
95 if (!nv_connector) 93 if (!nv_connector)
96 return; 94 return;
97 95
96 dev = nv_connector->base.dev;
97 NV_DEBUG_KMS(dev, "\n");
98
98 kfree(nv_connector->edid); 99 kfree(nv_connector->edid);
99 drm_sysfs_connector_remove(drm_connector); 100 drm_sysfs_connector_remove(drm_connector);
100 drm_connector_cleanup(drm_connector); 101 drm_connector_cleanup(drm_connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index dd4937224220..f954ad93e81f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -502,12 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
502 break; 502 break;
503 } 503 }
504 504
505 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
506 ret = -EREMOTEIO;
507 goto out;
508 }
509
510 if (cmd & 1) { 505 if (cmd & 1) {
506 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
507 ret = -EREMOTEIO;
508 goto out;
509 }
510
511 for (i = 0; i < 4; i++) { 511 for (i = 0; i < 4; i++) {
512 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); 512 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
513 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); 513 NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 343ab7f17ccc..da3b93b84502 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -56,7 +56,7 @@ int nouveau_vram_pushbuf;
56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); 56module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
57 57
58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); 58MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
59int nouveau_vram_notify; 59int nouveau_vram_notify = 1;
60module_param_named(vram_notify, nouveau_vram_notify, int, 0400); 60module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
61 61
62MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); 62MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
@@ -75,6 +75,14 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
75int nouveau_ignorelid = 0; 75int nouveau_ignorelid = 0;
76module_param_named(ignorelid, nouveau_ignorelid, int, 0400); 76module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
77 77
78MODULE_PARM_DESC(noagp, "Disable all acceleration");
79int nouveau_noaccel = 0;
80module_param_named(noaccel, nouveau_noaccel, int, 0400);
81
82MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
83int nouveau_nofbaccel = 0;
84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
85
78MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 86MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
79 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" 87 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
80 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" 88 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 6b9690418bc7..1c15ef37b71c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -583,6 +583,7 @@ struct drm_nouveau_private {
583 uint64_t vm_end; 583 uint64_t vm_end;
584 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; 584 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
585 int vm_vram_pt_nr; 585 int vm_vram_pt_nr;
586 uint64_t vram_sys_base;
586 587
587 /* the mtrr covering the FB */ 588 /* the mtrr covering the FB */
588 int fb_mtrr; 589 int fb_mtrr;
@@ -678,6 +679,8 @@ extern int nouveau_reg_debug;
678extern char *nouveau_vbios; 679extern char *nouveau_vbios;
679extern int nouveau_ctxfw; 680extern int nouveau_ctxfw;
680extern int nouveau_ignorelid; 681extern int nouveau_ignorelid;
682extern int nouveau_nofbaccel;
683extern int nouveau_noaccel;
681 684
682/* nouveau_state.c */ 685/* nouveau_state.c */
683extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); 686extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0b05c869e0e7..ea879a2efef3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -107,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = {
107 .fb_setcmap = drm_fb_helper_setcmap, 107 .fb_setcmap = drm_fb_helper_setcmap,
108}; 108};
109 109
110static struct fb_ops nv04_fbcon_ops = {
111 .owner = THIS_MODULE,
112 .fb_check_var = drm_fb_helper_check_var,
113 .fb_set_par = drm_fb_helper_set_par,
114 .fb_setcolreg = drm_fb_helper_setcolreg,
115 .fb_fillrect = nv04_fbcon_fillrect,
116 .fb_copyarea = nv04_fbcon_copyarea,
117 .fb_imageblit = nv04_fbcon_imageblit,
118 .fb_sync = nouveau_fbcon_sync,
119 .fb_pan_display = drm_fb_helper_pan_display,
120 .fb_blank = drm_fb_helper_blank,
121 .fb_setcmap = drm_fb_helper_setcmap,
122};
123
124static struct fb_ops nv50_fbcon_ops = {
125 .owner = THIS_MODULE,
126 .fb_check_var = drm_fb_helper_check_var,
127 .fb_set_par = drm_fb_helper_set_par,
128 .fb_setcolreg = drm_fb_helper_setcolreg,
129 .fb_fillrect = nv50_fbcon_fillrect,
130 .fb_copyarea = nv50_fbcon_copyarea,
131 .fb_imageblit = nv50_fbcon_imageblit,
132 .fb_sync = nouveau_fbcon_sync,
133 .fb_pan_display = drm_fb_helper_pan_display,
134 .fb_blank = drm_fb_helper_blank,
135 .fb_setcmap = drm_fb_helper_setcmap,
136};
137
110static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 138static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
111 u16 blue, int regno) 139 u16 blue, int regno)
112{ 140{
@@ -267,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
267 dev_priv->fbdev_info = info; 295 dev_priv->fbdev_info = info;
268 296
269 strcpy(info->fix.id, "nouveaufb"); 297 strcpy(info->fix.id, "nouveaufb");
270 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | 298 if (nouveau_nofbaccel)
271 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; 299 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
300 else
301 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
302 FBINFO_HWACCEL_FILLRECT |
303 FBINFO_HWACCEL_IMAGEBLIT;
272 info->fbops = &nouveau_fbcon_ops; 304 info->fbops = &nouveau_fbcon_ops;
273 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - 305 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
274 dev_priv->vm_vram_base; 306 dev_priv->vm_vram_base;
@@ -316,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
316 par->nouveau_fb = nouveau_fb; 348 par->nouveau_fb = nouveau_fb;
317 par->dev = dev; 349 par->dev = dev;
318 350
319 if (dev_priv->channel) { 351 if (dev_priv->channel && !nouveau_nofbaccel) {
320 switch (dev_priv->card_type) { 352 switch (dev_priv->card_type) {
321 case NV_50: 353 case NV_50:
322 nv50_fbcon_accel_init(info); 354 nv50_fbcon_accel_init(info);
355 info->fbops = &nv50_fbcon_ops;
323 break; 356 break;
324 default: 357 default:
325 nv04_fbcon_accel_init(info); 358 nv04_fbcon_accel_init(info);
359 info->fbops = &nv04_fbcon_ops;
326 break; 360 break;
327 }; 361 };
328 } 362 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 462e0b87b4bd..f9c34e1a8c11 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,7 +40,13 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
40void nouveau_fbcon_restore(void); 40void nouveau_fbcon_restore(void);
41void nouveau_fbcon_zfill(struct drm_device *dev); 41void nouveau_fbcon_zfill(struct drm_device *dev);
42 42
43void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
44void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
45void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
43int nv04_fbcon_accel_init(struct fb_info *info); 46int nv04_fbcon_accel_init(struct fb_info *info);
47void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
48void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
49void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
44int nv50_fbcon_accel_init(struct fb_info *info); 50int nv50_fbcon_accel_init(struct fb_info *info);
45 51
46void nouveau_fbcon_gpu_lockup(struct fb_info *info); 52void nouveau_fbcon_gpu_lockup(struct fb_info *info);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 6ac804b0c9f9..70cc30803e3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -925,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
925 } 925 }
926 926
927 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { 927 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
928 spin_lock(&nvbo->bo.lock);
928 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); 929 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
930 spin_unlock(&nvbo->bo.lock);
929 } else { 931 } else {
930 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); 932 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
931 if (ret == 0) 933 if (ret == 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index 419f4c2b3b89..c7ebec696747 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev)
97 } 97 }
98 98
99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); 99 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
100 if (!pgraph->ctxprog) { 100 if (!pgraph->ctxvals) {
101 NV_ERROR(dev, "OOM copying ctxprog\n"); 101 NV_ERROR(dev, "OOM copying ctxvals\n");
102 release_firmware(fw); 102 release_firmware(fw);
103 nouveau_grctx_fini(dev); 103 nouveau_grctx_fini(dev);
104 return -ENOMEM; 104 return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 3b9bad66162a..447f9f69d6b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
211 get + 4); 211 get + 4);
212 } 212 }
213 213
214 if (status & NV_PFIFO_INTR_SEMAPHORE) {
215 uint32_t sem;
216
217 status &= ~NV_PFIFO_INTR_SEMAPHORE;
218 nv_wr32(dev, NV03_PFIFO_INTR_0,
219 NV_PFIFO_INTR_SEMAPHORE);
220
221 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
222 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
223
224 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
225 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
226 }
227
214 if (status) { 228 if (status) {
215 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", 229 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
216 status, chid); 230 status, chid);
@@ -566,86 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
566static void 580static void
567nv50_pgraph_irq_handler(struct drm_device *dev) 581nv50_pgraph_irq_handler(struct drm_device *dev)
568{ 582{
569 uint32_t status, nsource; 583 uint32_t status;
570 584
571 status = nv_rd32(dev, NV03_PGRAPH_INTR); 585 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
572 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); 586 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
573 587
574 if (status & 0x00000001) { 588 if (status & 0x00000001) {
575 nouveau_pgraph_intr_notify(dev, nsource); 589 nouveau_pgraph_intr_notify(dev, nsource);
576 status &= ~0x00000001; 590 status &= ~0x00000001;
577 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); 591 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
578 } 592 }
579 593
580 if (status & 0x00000010) { 594 if (status & 0x00000010) {
581 nouveau_pgraph_intr_error(dev, nsource | 595 nouveau_pgraph_intr_error(dev, nsource |
582 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); 596 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
583 597
584 status &= ~0x00000010; 598 status &= ~0x00000010;
585 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); 599 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
586 } 600 }
587 601
588 if (status & 0x00001000) { 602 if (status & 0x00001000) {
589 nv_wr32(dev, 0x400500, 0x00000000); 603 nv_wr32(dev, 0x400500, 0x00000000);
590 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); 604 nv_wr32(dev, NV03_PGRAPH_INTR,
591 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, 605 NV_PGRAPH_INTR_CONTEXT_SWITCH);
592 NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH); 606 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
593 nv_wr32(dev, 0x400500, 0x00010001); 607 NV40_PGRAPH_INTR_EN) &
608 ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
609 nv_wr32(dev, 0x400500, 0x00010001);
594 610
595 nv50_graph_context_switch(dev); 611 nv50_graph_context_switch(dev);
596 612
597 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 613 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
598 } 614 }
599 615
600 if (status & 0x00100000) { 616 if (status & 0x00100000) {
601 nouveau_pgraph_intr_error(dev, nsource | 617 nouveau_pgraph_intr_error(dev, nsource |
602 NV03_PGRAPH_NSOURCE_DATA_ERROR); 618 NV03_PGRAPH_NSOURCE_DATA_ERROR);
603 619
604 status &= ~0x00100000; 620 status &= ~0x00100000;
605 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); 621 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
606 } 622 }
607 623
608 if (status & 0x00200000) { 624 if (status & 0x00200000) {
609 int r; 625 int r;
610 626
611 nouveau_pgraph_intr_error(dev, nsource | 627 nouveau_pgraph_intr_error(dev, nsource |
612 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR); 628 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
613 629
614 NV_ERROR(dev, "magic set 1:\n"); 630 NV_ERROR(dev, "magic set 1:\n");
615 for (r = 0x408900; r <= 0x408910; r += 4) 631 for (r = 0x408900; r <= 0x408910; r += 4)
616 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 632 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
617 nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000); 633 nv_rd32(dev, r));
618 for (r = 0x408e08; r <= 0x408e24; r += 4) 634 nv_wr32(dev, 0x408900,
619 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 635 nv_rd32(dev, 0x408904) | 0xc0000000);
620 nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000); 636 for (r = 0x408e08; r <= 0x408e24; r += 4)
621 637 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
622 NV_ERROR(dev, "magic set 2:\n"); 638 nv_rd32(dev, r));
623 for (r = 0x409900; r <= 0x409910; r += 4) 639 nv_wr32(dev, 0x408e08,
624 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 640 nv_rd32(dev, 0x408e08) | 0xc0000000);
625 nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000); 641
626 for (r = 0x409e08; r <= 0x409e24; r += 4) 642 NV_ERROR(dev, "magic set 2:\n");
627 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r)); 643 for (r = 0x409900; r <= 0x409910; r += 4)
628 nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000); 644 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
629 645 nv_rd32(dev, r));
630 status &= ~0x00200000; 646 nv_wr32(dev, 0x409900,
631 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource); 647 nv_rd32(dev, 0x409904) | 0xc0000000);
632 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); 648 for (r = 0x409e08; r <= 0x409e24; r += 4)
633 } 649 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
650 nv_rd32(dev, r));
651 nv_wr32(dev, 0x409e08,
652 nv_rd32(dev, 0x409e08) | 0xc0000000);
653
654 status &= ~0x00200000;
655 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
656 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
657 }
634 658
635 if (status) { 659 if (status) {
636 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); 660 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
637 nv_wr32(dev, NV03_PGRAPH_INTR, status); 661 status);
638 } 662 nv_wr32(dev, NV03_PGRAPH_INTR, status);
663 }
639 664
640 { 665 {
641 const int isb = (1 << 16) | (1 << 0); 666 const int isb = (1 << 16) | (1 << 0);
642 667
643 if ((nv_rd32(dev, 0x400500) & isb) != isb) 668 if ((nv_rd32(dev, 0x400500) & isb) != isb)
644 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb); 669 nv_wr32(dev, 0x400500,
645 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); 670 nv_rd32(dev, 0x400500) | isb);
671 }
646 } 672 }
647 673
648 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); 674 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
675 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
649} 676}
650 677
651static void 678static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 8f3a12f614ed..2dc09dbd817d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
285 uint32_t flags, uint64_t phys) 285 uint32_t flags, uint64_t phys)
286{ 286{
287 struct drm_nouveau_private *dev_priv = dev->dev_private; 287 struct drm_nouveau_private *dev_priv = dev->dev_private;
288 struct nouveau_gpuobj **pgt; 288 struct nouveau_gpuobj *pgt;
289 unsigned psz, pfl, pages; 289 unsigned block;
290 290 int i;
291 if (virt >= dev_priv->vm_gart_base &&
292 (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
293 psz = 12;
294 pgt = &dev_priv->gart_info.sg_ctxdma;
295 pfl = 0x21;
296 virt -= dev_priv->vm_gart_base;
297 } else
298 if (virt >= dev_priv->vm_vram_base &&
299 (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
300 psz = 16;
301 pgt = dev_priv->vm_vram_pt;
302 pfl = 0x01;
303 virt -= dev_priv->vm_vram_base;
304 } else {
305 NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
306 virt, virt + size - 1);
307 return -EINVAL;
308 }
309 291
310 pages = size >> psz; 292 virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
293 size = (size >> 16) << 1;
294
295 phys |= ((uint64_t)flags << 32);
296 phys |= 1;
297 if (dev_priv->vram_sys_base) {
298 phys += dev_priv->vram_sys_base;
299 phys |= 0x30;
300 }
311 301
312 dev_priv->engine.instmem.prepare_access(dev, true); 302 dev_priv->engine.instmem.prepare_access(dev, true);
313 if (flags & 0x80000000) { 303 while (size) {
314 while (pages--) { 304 unsigned offset_h = upper_32_bits(phys);
315 struct nouveau_gpuobj *pt = pgt[virt >> 29]; 305 unsigned offset_l = lower_32_bits(phys);
316 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; 306 unsigned pte, end;
307
308 for (i = 7; i >= 0; i--) {
309 block = 1 << (i + 1);
310 if (size >= block && !(virt & (block - 1)))
311 break;
312 }
313 offset_l |= (i << 7);
317 314
318 nv_wo32(dev, pt, pte++, 0x00000000); 315 phys += block << 15;
319 nv_wo32(dev, pt, pte++, 0x00000000); 316 size -= block;
320 317
321 virt += (1 << psz); 318 while (block) {
322 } 319 pgt = dev_priv->vm_vram_pt[virt >> 14];
323 } else { 320 pte = virt & 0x3ffe;
324 while (pages--) {
325 struct nouveau_gpuobj *pt = pgt[virt >> 29];
326 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
327 unsigned offset_h = upper_32_bits(phys) & 0xff;
328 unsigned offset_l = lower_32_bits(phys);
329 321
330 nv_wo32(dev, pt, pte++, offset_l | pfl); 322 end = pte + block;
331 nv_wo32(dev, pt, pte++, offset_h | flags); 323 if (end > 16384)
324 end = 16384;
325 block -= (end - pte);
326 virt += (end - pte);
332 327
333 phys += (1 << psz); 328 while (pte < end) {
334 virt += (1 << psz); 329 nv_wo32(dev, pgt, pte++, offset_l);
330 nv_wo32(dev, pgt, pte++, offset_h);
331 }
335 } 332 }
336 } 333 }
337 dev_priv->engine.instmem.finish_access(dev); 334 dev_priv->engine.instmem.finish_access(dev);
@@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
356void 353void
357nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) 354nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
358{ 355{
359 nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0); 356 struct drm_nouveau_private *dev_priv = dev->dev_private;
357 struct nouveau_gpuobj *pgt;
358 unsigned pages, pte, end;
359
360 virt -= dev_priv->vm_vram_base;
361 pages = (size >> 16) << 1;
362
363 dev_priv->engine.instmem.prepare_access(dev, true);
364 while (pages) {
365 pgt = dev_priv->vm_vram_pt[virt >> 29];
366 pte = (virt & 0x1ffe0000ULL) >> 15;
367
368 end = pte + pages;
369 if (end > 16384)
370 end = 16384;
371 pages -= (end - pte);
372 virt += (end - pte) << 15;
373
374 while (pte < end)
375 nv_wo32(dev, pgt, pte++, 0);
376 }
377 dev_priv->engine.instmem.finish_access(dev);
378
379 nv_wr32(dev, 0x100c80, 0x00050001);
380 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
381 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
382 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
383 return;
384 }
385
386 nv_wr32(dev, 0x100c80, 0x00000001);
387 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
388 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
389 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
390 }
360} 391}
361 392
362/* 393/*
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 6c66a34b6345..d99dc087f9b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
34{ 34{
35 struct drm_device *dev = chan->dev; 35 struct drm_device *dev = chan->dev;
36 struct nouveau_bo *ntfy = NULL; 36 struct nouveau_bo *ntfy = NULL;
37 uint32_t flags;
37 int ret; 38 int ret;
38 39
39 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ? 40 if (nouveau_vram_notify)
40 TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT, 41 flags = TTM_PL_FLAG_VRAM;
42 else
43 flags = TTM_PL_FLAG_TT;
44
45 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
41 0, 0x0000, false, true, &ntfy); 46 0, 0x0000, false, true, &ntfy);
42 if (ret) 47 if (ret)
43 return ret; 48 return ret;
44 49
45 ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM); 50 ret = nouveau_bo_pin(ntfy, flags);
46 if (ret) 51 if (ret)
47 goto out_err; 52 goto out_err;
48 53
@@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
128 target = NV_DMA_TARGET_PCI; 133 target = NV_DMA_TARGET_PCI;
129 } else { 134 } else {
130 target = NV_DMA_TARGET_AGP; 135 target = NV_DMA_TARGET_AGP;
136 if (dev_priv->card_type >= NV_50)
137 offset += dev_priv->vm_gart_base;
131 } 138 }
132 } else { 139 } else {
133 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", 140 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 6c2cf81716df..e7c100ba63a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -885,11 +885,12 @@ int
885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, 885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
886 struct nouveau_gpuobj **gpuobj_ret) 886 struct nouveau_gpuobj **gpuobj_ret)
887{ 887{
888 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 888 struct drm_nouveau_private *dev_priv;
889 struct nouveau_gpuobj *gpuobj; 889 struct nouveau_gpuobj *gpuobj;
890 890
891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) 891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
892 return -EINVAL; 892 return -EINVAL;
893 dev_priv = chan->dev->dev_private;
893 894
894 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 895 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
895 if (!gpuobj) 896 if (!gpuobj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 251f1b3b38b9..aa9b310e41be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -99,6 +99,7 @@
99 * the card will hang early on in the X init process. 99 * the card will hang early on in the X init process.
100 */ 100 */
101# define NV_PMC_ENABLE_UNK13 (1<<13) 101# define NV_PMC_ENABLE_UNK13 (1<<13)
102#define NV40_PMC_GRAPH_UNITS 0x00001540
102#define NV40_PMC_BACKLIGHT 0x000015f0 103#define NV40_PMC_BACKLIGHT 0x000015f0
103# define NV40_PMC_BACKLIGHT_MASK 0x001f0000 104# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
104#define NV40_PMC_1700 0x00001700 105#define NV40_PMC_1700 0x00001700
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 4c7f1e403e80..ed1590577b6c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -54,11 +54,12 @@ static void
54nouveau_sgdma_clear(struct ttm_backend *be) 54nouveau_sgdma_clear(struct ttm_backend *be)
55{ 55{
56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; 56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
57 struct drm_device *dev = nvbe->dev; 57 struct drm_device *dev;
58
59 NV_DEBUG(nvbe->dev, "\n");
60 58
61 if (nvbe && nvbe->pages) { 59 if (nvbe && nvbe->pages) {
60 dev = nvbe->dev;
61 NV_DEBUG(dev, "\n");
62
62 if (nvbe->bound) 63 if (nvbe->bound)
63 be->func->unbind(be); 64 be->func->unbind(be);
64 65
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index f2d0187ba152..a4851af5b05e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -310,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
310static unsigned int 310static unsigned int
311nouveau_vga_set_decode(void *priv, bool state) 311nouveau_vga_set_decode(void *priv, bool state)
312{ 312{
313 struct drm_device *dev = priv;
314 struct drm_nouveau_private *dev_priv = dev->dev_private;
315
316 if (dev_priv->chipset >= 0x40)
317 nv_wr32(dev, 0x88054, state);
318 else
319 nv_wr32(dev, 0x1854, state);
320
313 if (state) 321 if (state)
314 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 322 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
315 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 323 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -427,15 +435,19 @@ nouveau_card_init(struct drm_device *dev)
427 if (ret) 435 if (ret)
428 goto out_timer; 436 goto out_timer;
429 437
430 /* PGRAPH */ 438 if (nouveau_noaccel)
431 ret = engine->graph.init(dev); 439 engine->graph.accel_blocked = true;
432 if (ret) 440 else {
433 goto out_fb; 441 /* PGRAPH */
442 ret = engine->graph.init(dev);
443 if (ret)
444 goto out_fb;
434 445
435 /* PFIFO */ 446 /* PFIFO */
436 ret = engine->fifo.init(dev); 447 ret = engine->fifo.init(dev);
437 if (ret) 448 if (ret)
438 goto out_graph; 449 goto out_graph;
450 }
439 451
440 /* this call irq_preinstall, register irq handler and 452 /* this call irq_preinstall, register irq handler and
441 * call irq_postinstall 453 * call irq_postinstall
@@ -479,9 +491,11 @@ nouveau_card_init(struct drm_device *dev)
479out_irq: 491out_irq:
480 drm_irq_uninstall(dev); 492 drm_irq_uninstall(dev);
481out_fifo: 493out_fifo:
482 engine->fifo.takedown(dev); 494 if (!nouveau_noaccel)
495 engine->fifo.takedown(dev);
483out_graph: 496out_graph:
484 engine->graph.takedown(dev); 497 if (!nouveau_noaccel)
498 engine->graph.takedown(dev);
485out_fb: 499out_fb:
486 engine->fb.takedown(dev); 500 engine->fb.takedown(dev);
487out_timer: 501out_timer:
@@ -518,8 +532,10 @@ static void nouveau_card_takedown(struct drm_device *dev)
518 dev_priv->channel = NULL; 532 dev_priv->channel = NULL;
519 } 533 }
520 534
521 engine->fifo.takedown(dev); 535 if (!nouveau_noaccel) {
522 engine->graph.takedown(dev); 536 engine->fifo.takedown(dev);
537 engine->graph.takedown(dev);
538 }
523 engine->fb.takedown(dev); 539 engine->fb.takedown(dev);
524 engine->timer.takedown(dev); 540 engine->timer.takedown(dev);
525 engine->mc.takedown(dev); 541 engine->mc.takedown(dev);
@@ -817,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
817 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 833 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
818 getparam->value = dev_priv->vm_vram_base; 834 getparam->value = dev_priv->vm_vram_base;
819 break; 835 break;
836 case NOUVEAU_GETPARAM_GRAPH_UNITS:
837 /* NV40 and NV50 versions are quite different, but register
838 * address is the same. User is supposed to know the card
839 * family anyway... */
840 if (dev_priv->chipset >= 0x40) {
841 getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
842 break;
843 }
844 /* FALLTHRU */
820 default: 845 default:
821 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); 846 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
822 return -EINVAL; 847 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index d0e038d28948..1d73b15d70da 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
119 struct drm_connector *connector) 119 struct drm_connector *connector)
120{ 120{
121 struct drm_device *dev = encoder->dev; 121 struct drm_device *dev = encoder->dev;
122 uint8_t saved_seq1, saved_pi, saved_rpc1; 122 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
123 uint8_t saved_palette0[3], saved_palette_mask; 123 uint8_t saved_palette0[3], saved_palette_mask;
124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl; 124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
125 int i; 125 int i;
@@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
135 /* only implemented for head A for now */ 135 /* only implemented for head A for now */
136 NVSetOwner(dev, 0); 136 NVSetOwner(dev, 0);
137 137
138 saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
139 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
140
138 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); 141 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
139 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); 142 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
140 143
@@ -203,6 +206,7 @@ out:
203 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); 206 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
204 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); 207 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
205 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); 208 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
209 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
206 210
207 if (blue == 0x18) { 211 if (blue == 0x18) {
208 NV_INFO(dev, "Load detected on head A\n"); 212 NV_INFO(dev, "Load detected on head A\n");
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index d910873c1368..fd01caabd5c3 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -27,7 +27,7 @@
27#include "nouveau_dma.h" 27#include "nouveau_dma.h"
28#include "nouveau_fbcon.h" 28#include "nouveau_fbcon.h"
29 29
30static void 30void
31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
32{ 32{
33 struct nouveau_fbcon_par *par = info->par; 33 struct nouveau_fbcon_par *par = info->par;
@@ -54,7 +54,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
54 FIRE_RING(chan); 54 FIRE_RING(chan);
55} 55}
56 56
57static void 57void
58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 58nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
59{ 59{
60 struct nouveau_fbcon_par *par = info->par; 60 struct nouveau_fbcon_par *par = info->par;
@@ -88,7 +88,7 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
88 FIRE_RING(chan); 88 FIRE_RING(chan);
89} 89}
90 90
91static void 91void
92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 92nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
93{ 93{
94 struct nouveau_fbcon_par *par = info->par; 94 struct nouveau_fbcon_par *par = info->par;
@@ -307,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info)
307 307
308 FIRE_RING(chan); 308 FIRE_RING(chan);
309 309
310 info->fbops->fb_fillrect = nv04_fbcon_fillrect;
311 info->fbops->fb_copyarea = nv04_fbcon_copyarea;
312 info->fbops->fb_imageblit = nv04_fbcon_imageblit;
313 return 0; 310 return 0;
314} 311}
315 312
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 58b917c3341b..21ac6e49b6ee 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder)
579 nouveau_encoder(encoder)->restore.output); 579 nouveau_encoder(encoder)->restore.output);
580 580
581 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); 581 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
582
583 nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
582} 584}
583 585
584static int nv17_tv_create_resources(struct drm_encoder *encoder, 586static int nv17_tv_create_resources(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 40b7360841f8..d1a651e3400c 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
298static void 298static void
299nv50_crtc_destroy(struct drm_crtc *crtc) 299nv50_crtc_destroy(struct drm_crtc *crtc)
300{ 300{
301 struct drm_device *dev = crtc->dev; 301 struct drm_device *dev;
302 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 302 struct nouveau_crtc *nv_crtc;
303
304 NV_DEBUG_KMS(dev, "\n");
305 303
306 if (!crtc) 304 if (!crtc)
307 return; 305 return;
308 306
307 dev = crtc->dev;
308 nv_crtc = nouveau_crtc(crtc);
309
310 NV_DEBUG_KMS(dev, "\n");
311
309 drm_crtc_cleanup(&nv_crtc->base); 312 drm_crtc_cleanup(&nv_crtc->base);
310 313
311 nv50_cursor_fini(nv_crtc); 314 nv50_cursor_fini(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index e4f279ee61cf..0f57cdf7ccb2 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -3,7 +3,7 @@
3#include "nouveau_dma.h" 3#include "nouveau_dma.h"
4#include "nouveau_fbcon.h" 4#include "nouveau_fbcon.h"
5 5
6static void 6void
7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
8{ 8{
9 struct nouveau_fbcon_par *par = info->par; 9 struct nouveau_fbcon_par *par = info->par;
@@ -46,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
46 FIRE_RING(chan); 46 FIRE_RING(chan);
47} 47}
48 48
49static void 49void
50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 50nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
51{ 51{
52 struct nouveau_fbcon_par *par = info->par; 52 struct nouveau_fbcon_par *par = info->par;
@@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
81 FIRE_RING(chan); 81 FIRE_RING(chan);
82} 82}
83 83
84static void 84void
85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
86{ 86{
87 struct nouveau_fbcon_par *par = info->par; 87 struct nouveau_fbcon_par *par = info->par;
@@ -262,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + 262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
263 dev_priv->vm_vram_base); 263 dev_priv->vm_vram_base);
264 264
265 info->fbops->fb_fillrect = nv50_fbcon_fillrect;
266 info->fbops->fb_copyarea = nv50_fbcon_copyarea;
267 info->fbops->fb_imageblit = nv50_fbcon_imageblit;
268 return 0; 265 return 0;
269} 266}
270 267
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 32b244bcb482..204a79ff10f4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -317,17 +317,20 @@ void
317nv50_fifo_destroy_context(struct nouveau_channel *chan) 317nv50_fifo_destroy_context(struct nouveau_channel *chan)
318{ 318{
319 struct drm_device *dev = chan->dev; 319 struct drm_device *dev = chan->dev;
320 struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
320 321
321 NV_DEBUG(dev, "ch%d\n", chan->id); 322 NV_DEBUG(dev, "ch%d\n", chan->id);
322 323
323 nouveau_gpuobj_ref_del(dev, &chan->ramfc); 324 /* This will ensure the channel is seen as disabled. */
324 nouveau_gpuobj_ref_del(dev, &chan->cache); 325 chan->ramfc = NULL;
325
326 nv50_fifo_channel_disable(dev, chan->id, false); 326 nv50_fifo_channel_disable(dev, chan->id, false);
327 327
328 /* Dummy channel, also used on ch 127 */ 328 /* Dummy channel, also used on ch 127 */
329 if (chan->id == 0) 329 if (chan->id == 0)
330 nv50_fifo_channel_disable(dev, 127, false); 330 nv50_fifo_channel_disable(dev, 127, false);
331
332 nouveau_gpuobj_ref_del(dev, &ramfc);
333 nouveau_gpuobj_ref_del(dev, &chan->cache);
331} 334}
332 335
333int 336int
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 20319e59d368..6d504801b514 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev)
165 uint32_t inst; 165 uint32_t inst;
166 int i; 166 int i;
167 167
168 /* Be sure we're not in the middle of a context switch or bad things
169 * will happen, such as unloading the wrong pgraph context.
170 */
171 if (!nv_wait(0x400300, 0x00000001, 0x00000000))
172 NV_ERROR(dev, "Ctxprog is still running\n");
173
168 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 174 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
169 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 175 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
170 return NULL; 176 return NULL;
@@ -275,7 +281,7 @@ nv50_graph_load_context(struct nouveau_channel *chan)
275int 281int
276nv50_graph_unload_context(struct drm_device *dev) 282nv50_graph_unload_context(struct drm_device *dev)
277{ 283{
278 uint32_t inst, fifo = nv_rd32(dev, 0x400500); 284 uint32_t inst;
279 285
280 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR); 286 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
281 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED)) 287 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
@@ -283,12 +289,10 @@ nv50_graph_unload_context(struct drm_device *dev)
283 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE; 289 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
284 290
285 nouveau_wait_for_idle(dev); 291 nouveau_wait_for_idle(dev);
286 nv_wr32(dev, 0x400500, fifo & ~1);
287 nv_wr32(dev, 0x400784, inst); 292 nv_wr32(dev, 0x400784, inst);
288 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20); 293 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
289 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01); 294 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
290 nouveau_wait_for_idle(dev); 295 nouveau_wait_for_idle(dev);
291 nv_wr32(dev, 0x400500, fifo);
292 296
293 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst); 297 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
294 return 0; 298 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 94400f777e7f..f0dc4e36ef05 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev)
76 for (i = 0x1700; i <= 0x1710; i += 4) 76 for (i = 0x1700; i <= 0x1710; i += 4)
77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); 77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
78 78
79 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
80 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
81 else
82 dev_priv->vram_sys_base = 0;
83
79 /* Reserve the last MiB of VRAM, we should probably try to avoid 84 /* Reserve the last MiB of VRAM, we should probably try to avoid
80 * setting up the below tables over the top of the VBIOS image at 85 * setting up the below tables over the top of the VBIOS image at
81 * some point. 86 * some point.
@@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev)
172 * We map the entire fake channel into the start of the PRAMIN BAR 177 * We map the entire fake channel into the start of the PRAMIN BAR
173 */ 178 */
174 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, 179 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
175 0, &priv->pramin_pt); 180 0, &priv->pramin_pt);
176 if (ret) 181 if (ret)
177 return ret; 182 return ret;
178 183
179 for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) { 184 v = c_offset | 1;
180 if (v < (c_offset + c_size)) 185 if (dev_priv->vram_sys_base) {
181 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); 186 v += dev_priv->vram_sys_base;
182 else 187 v |= 0x30;
183 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); 188 }
189
190 i = 0;
191 while (v < dev_priv->vram_sys_base + c_offset + c_size) {
192 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
193 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
194 v += 0x1000;
195 i += 8;
196 }
197
198 while (i < pt_size) {
199 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
184 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); 200 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
201 i += 8;
185 } 202 }
186 203
187 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); 204 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
@@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
416{ 433{
417 struct drm_nouveau_private *dev_priv = dev->dev_private; 434 struct drm_nouveau_private *dev_priv = dev->dev_private;
418 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 435 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
419 uint32_t pte, pte_end, vram; 436 struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
437 uint32_t pte, pte_end;
438 uint64_t vram;
420 439
421 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) 440 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
422 return -EINVAL; 441 return -EINVAL;
@@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
424 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", 443 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
425 gpuobj->im_pramin->start, gpuobj->im_pramin->size); 444 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
426 445
427 pte = (gpuobj->im_pramin->start >> 12) << 3; 446 pte = (gpuobj->im_pramin->start >> 12) << 1;
428 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; 447 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
429 vram = gpuobj->im_backing_start; 448 vram = gpuobj->im_backing_start;
430 449
431 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", 450 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
432 gpuobj->im_pramin->start, pte, pte_end); 451 gpuobj->im_pramin->start, pte, pte_end);
433 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); 452 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
434 453
454 vram |= 1;
455 if (dev_priv->vram_sys_base) {
456 vram += dev_priv->vram_sys_base;
457 vram |= 0x30;
458 }
459
435 dev_priv->engine.instmem.prepare_access(dev, true); 460 dev_priv->engine.instmem.prepare_access(dev, true);
436 while (pte < pte_end) { 461 while (pte < pte_end) {
437 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); 462 nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
438 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); 463 nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
439
440 pte += 8;
441 vram += NV50_INSTMEM_PAGE_SIZE; 464 vram += NV50_INSTMEM_PAGE_SIZE;
442 } 465 }
443 dev_priv->engine.instmem.finish_access(dev); 466 dev_priv->engine.instmem.finish_access(dev);
@@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
470 if (gpuobj->im_bound == 0) 493 if (gpuobj->im_bound == 0)
471 return -EINVAL; 494 return -EINVAL;
472 495
473 pte = (gpuobj->im_pramin->start >> 12) << 3; 496 pte = (gpuobj->im_pramin->start >> 12) << 1;
474 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; 497 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
475 498
476 dev_priv->engine.instmem.prepare_access(dev, true); 499 dev_priv->engine.instmem.prepare_access(dev, true);
477 while (pte < pte_end) { 500 while (pte < pte_end) {
478 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); 501 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
479 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); 502 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
480 pte += 8;
481 } 503 }
482 dev_priv->engine.instmem.finish_access(dev); 504 dev_priv->engine.instmem.finish_access(dev);
483 505
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index ecf1936b8224..c2fff543b06f 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -101,6 +101,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
101 struct nouveau_encoder *nvenc = nouveau_encoder(enc); 101 struct nouveau_encoder *nvenc = nouveau_encoder(enc);
102 102
103 if (nvenc == nv_encoder || 103 if (nvenc == nv_encoder ||
104 nvenc->disconnect != nv50_sor_disconnect ||
104 nvenc->dcb->or != nv_encoder->dcb->or) 105 nvenc->dcb->or != nv_encoder->dcb->or)
105 continue; 106 continue;
106 107
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 5982321be4d5..1c02d23f6fcc 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,10 +1,14 @@
1config DRM_RADEON_KMS 1config DRM_RADEON_KMS
2 bool "Enable modesetting on radeon by default" 2 bool "Enable modesetting on radeon by default - NEW DRIVER"
3 depends on DRM_RADEON 3 depends on DRM_RADEON
4 help 4 help
5 Choose this option if you want kernel modesetting enabled by default, 5 Choose this option if you want kernel modesetting enabled by default.
6 and you have a new enough userspace to support this. Running old 6
7 userspaces with this enabled will cause pain. 7 This is a completely new driver. It's only part of the existing drm
8 for compatibility reasons. It requires an entirely different graphics
9 stack above it and works very differently from the old drm stack.
10 i.e. don't enable this unless you know what you are doing it may
11 cause issues or bugs compared to the previous userspace driver stack.
8 12
9 When kernel modesetting is enabled the IOCTL of radeon/drm 13 When kernel modesetting is enabled the IOCTL of radeon/drm
10 driver are considered as invalid and an error message is printed 14 driver are considered as invalid and an error message is printed
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index e3b44562d265..7f152f66f196 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <asm/unaligned.h>
27 28
28#define ATOM_DEBUG 29#define ATOM_DEBUG
29 30
@@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
212 case ATOM_ARG_PS: 213 case ATOM_ARG_PS:
213 idx = U8(*ptr); 214 idx = U8(*ptr);
214 (*ptr)++; 215 (*ptr)++;
215 val = le32_to_cpu(ctx->ps[idx]); 216 /* get_unaligned_le32 avoids unaligned accesses from atombios
217 * tables, noticed on a DEC Alpha. */
218 val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
216 if (print) 219 if (print)
217 DEBUG("PS[0x%02X,0x%04X]", idx, val); 220 DEBUG("PS[0x%02X,0x%04X]", idx, val);
218 break; 221 break;
@@ -640,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
640 uint8_t count = U8((*ptr)++); 643 uint8_t count = U8((*ptr)++);
641 SDEBUG(" count: %d\n", count); 644 SDEBUG(" count: %d\n", count);
642 if (arg == ATOM_UNIT_MICROSEC) 645 if (arg == ATOM_UNIT_MICROSEC)
643 schedule_timeout_uninterruptible(usecs_to_jiffies(count)); 646 udelay(count);
644 else 647 else
645 schedule_timeout_uninterruptible(msecs_to_jiffies(count)); 648 schedule_timeout_uninterruptible(msecs_to_jiffies(count));
646} 649}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 71060114d5de..99915a682d59 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; 332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); 333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
334 unsigned char *base; 334 unsigned char *base;
335 int retry_count = 0;
335 336
336 memset(&args, 0, sizeof(args)); 337 memset(&args, 0, sizeof(args));
337 338
338 base = (unsigned char *)rdev->mode_info.atom_context->scratch; 339 base = (unsigned char *)rdev->mode_info.atom_context->scratch;
339 340
341retry:
340 memcpy(base, req_bytes, num_bytes); 342 memcpy(base, req_bytes, num_bytes);
341 343
342 args.lpAuxRequest = 0; 344 args.lpAuxRequest = 0;
@@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
347 349
348 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 350 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
349 351
350 if (args.ucReplyStatus) { 352 if (args.ucReplyStatus && !args.ucDataOutLen) {
351 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", 353 if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
354 goto retry;
355 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
352 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], 356 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
353 chan->rec.i2c_id, args.ucReplyStatus); 357 chan->rec.i2c_id, args.ucReplyStatus, retry_count);
354 return false; 358 return false;
355 } 359 }
356 360
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index a1198d99cdf9..2ffcf5a03551 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1950,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev)
1950 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1950 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1951 return r; 1951 return r;
1952 } 1952 }
1953
1954 r = r600_audio_init(rdev);
1955 if (r) {
1956 DRM_ERROR("radeon: audio resume failed\n");
1957 return r;
1958 }
1959
1953 return r; 1960 return r;
1954} 1961}
1955 1962
@@ -1957,6 +1964,7 @@ int r600_suspend(struct radeon_device *rdev)
1957{ 1964{
1958 int r; 1965 int r;
1959 1966
1967 r600_audio_fini(rdev);
1960 /* FIXME: we should wait for ring to be empty */ 1968 /* FIXME: we should wait for ring to be empty */
1961 r600_cp_stop(rdev); 1969 r600_cp_stop(rdev);
1962 rdev->cp.ready = false; 1970 rdev->cp.ready = false;
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index b1c1d3433454..0dcb6904c4ff 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev)
261 if (!r600_audio_chipset_supported(rdev)) 261 if (!r600_audio_chipset_supported(rdev))
262 return; 262 return;
263 263
264 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
265
266 del_timer(&rdev->audio_timer); 264 del_timer(&rdev->audio_timer);
265 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
267} 266}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index af1c3ca8a4cb..446b765ac72a 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
543void r600_vb_ib_put(struct radeon_device *rdev) 543void r600_vb_ib_put(struct radeon_device *rdev)
544{ 544{
545 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); 545 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
546 mutex_lock(&rdev->ib_pool.mutex);
547 list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
548 mutex_unlock(&rdev->ib_pool.mutex);
549 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 546 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
550} 547}
551 548
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 6d5a711c2e91..75bcf35a0931 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev,
1428 1428
1429 gb_tiling_config |= R600_BANK_SWAPS(1); 1429 gb_tiling_config |= R600_BANK_SWAPS(1);
1430 1430
1431 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, 1431 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
1432 dev_priv->r600_max_backends, 1432 backend_map = 0x28;
1433 (0xff << dev_priv->r600_max_backends) & 0xff); 1433 else
1434 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
1435 dev_priv->r600_max_backends,
1436 (0xff << dev_priv->r600_max_backends) & 0xff);
1434 gb_tiling_config |= R600_BACKEND_MAP(backend_map); 1437 gb_tiling_config |= R600_BACKEND_MAP(backend_map);
1435 1438
1436 cc_gc_shader_pipe_config = 1439 cc_gc_shader_pipe_config =
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index f57480ba1355..c0356bb193e5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -96,6 +96,7 @@ extern int radeon_audio;
96 * symbol; 96 * symbol;
97 */ 97 */
98#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 98#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
99/* RADEON_IB_POOL_SIZE must be a power of 2 */
99#define RADEON_IB_POOL_SIZE 16 100#define RADEON_IB_POOL_SIZE 16
100#define RADEON_DEBUGFS_MAX_NUM_FILES 32 101#define RADEON_DEBUGFS_MAX_NUM_FILES 32
101#define RADEONFB_CONN_LIMIT 4 102#define RADEONFB_CONN_LIMIT 4
@@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
363 */ 364 */
364struct radeon_ib { 365struct radeon_ib {
365 struct list_head list; 366 struct list_head list;
366 unsigned long idx; 367 unsigned idx;
367 uint64_t gpu_addr; 368 uint64_t gpu_addr;
368 struct radeon_fence *fence; 369 struct radeon_fence *fence;
369 uint32_t *ptr; 370 uint32_t *ptr;
370 uint32_t length_dw; 371 uint32_t length_dw;
372 bool free;
371}; 373};
372 374
373/* 375/*
@@ -377,10 +379,9 @@ struct radeon_ib {
377struct radeon_ib_pool { 379struct radeon_ib_pool {
378 struct mutex mutex; 380 struct mutex mutex;
379 struct radeon_bo *robj; 381 struct radeon_bo *robj;
380 struct list_head scheduled_ibs;
381 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 382 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
382 bool ready; 383 bool ready;
383 DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE); 384 unsigned head_id;
384}; 385};
385 386
386struct radeon_cp { 387struct radeon_cp {
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index fa82ca74324e..4d8831548a5f 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -206,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
206 *connector_type = DRM_MODE_CONNECTOR_DVID; 206 *connector_type = DRM_MODE_CONNECTOR_DVID;
207 } 207 }
208 208
209 /* Asrock RS600 board lists the DVI port as HDMI */
210 if ((dev->pdev->device == 0x7941) &&
211 (dev->pdev->subsystem_vendor == 0x1849) &&
212 (dev->pdev->subsystem_device == 0x7941)) {
213 if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
214 (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
215 *connector_type = DRM_MODE_CONNECTOR_DVID;
216 }
217
209 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ 218 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
210 if ((dev->pdev->device == 0x7941) && 219 if ((dev->pdev->device == 0x7941) &&
211 (dev->pdev->subsystem_vendor == 0x147b) && 220 (dev->pdev->subsystem_vendor == 0x147b) &&
@@ -287,6 +296,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
287 *connector_type = DRM_MODE_CONNECTOR_DVID; 296 *connector_type = DRM_MODE_CONNECTOR_DVID;
288 } 297 }
289 298
299 /* XFX Pine Group device rv730 reports no VGA DDC lines
300 * even though they are wired up to record 0x93
301 */
302 if ((dev->pdev->device == 0x9498) &&
303 (dev->pdev->subsystem_vendor == 0x1682) &&
304 (dev->pdev->subsystem_device == 0x2452)) {
305 struct radeon_device *rdev = dev->dev_private;
306 *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
307 }
290 return true; 308 return true;
291} 309}
292 310
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 4ddfd4b5bc51..7932dc4d6b90 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
65 if (r) { 65 if (r) {
66 goto out_cleanup; 66 goto out_cleanup;
67 } 67 }
68 start_jiffies = jiffies; 68
69 for (i = 0; i < n; i++) { 69 /* r100 doesn't have dma engine so skip the test */
70 r = radeon_fence_create(rdev, &fence); 70 if (rdev->asic->copy_dma) {
71 if (r) { 71
72 goto out_cleanup; 72 start_jiffies = jiffies;
73 for (i = 0; i < n; i++) {
74 r = radeon_fence_create(rdev, &fence);
75 if (r) {
76 goto out_cleanup;
77 }
78
79 r = radeon_copy_dma(rdev, saddr, daddr,
80 size / RADEON_GPU_PAGE_SIZE, fence);
81
82 if (r) {
83 goto out_cleanup;
84 }
85 r = radeon_fence_wait(fence, false);
86 if (r) {
87 goto out_cleanup;
88 }
89 radeon_fence_unref(&fence);
73 } 90 }
74 r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence); 91 end_jiffies = jiffies;
75 if (r) { 92 time = end_jiffies - start_jiffies;
76 goto out_cleanup; 93 time = jiffies_to_msecs(time);
94 if (time > 0) {
95 i = ((n * size) >> 10) / time;
96 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
97 " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
98 n, size >> 10,
99 sdomain, ddomain, time,
100 i, i * 1000, (i * 1000) / 1024);
77 } 101 }
78 r = radeon_fence_wait(fence, false);
79 if (r) {
80 goto out_cleanup;
81 }
82 radeon_fence_unref(&fence);
83 }
84 end_jiffies = jiffies;
85 time = end_jiffies - start_jiffies;
86 time = jiffies_to_msecs(time);
87 if (time > 0) {
88 i = ((n * size) >> 10) / time;
89 printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
90 " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
91 sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
92 } 102 }
103
93 start_jiffies = jiffies; 104 start_jiffies = jiffies;
94 for (i = 0; i < n; i++) { 105 for (i = 0; i < n; i++) {
95 r = radeon_fence_create(rdev, &fence); 106 r = radeon_fence_create(rdev, &fence);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2d8e5a70f284..65f81942f399 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -580,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
580 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 580 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
581 struct drm_encoder *encoder; 581 struct drm_encoder *encoder;
582 struct drm_encoder_helper_funcs *encoder_funcs; 582 struct drm_encoder_helper_funcs *encoder_funcs;
583 bool dret; 583 bool dret = false;
584 enum drm_connector_status ret = connector_status_disconnected; 584 enum drm_connector_status ret = connector_status_disconnected;
585 585
586 encoder = radeon_best_single_encoder(connector); 586 encoder = radeon_best_single_encoder(connector);
587 if (!encoder) 587 if (!encoder)
588 ret = connector_status_disconnected; 588 ret = connector_status_disconnected;
589 589
590 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 590 if (radeon_connector->ddc_bus) {
591 dret = radeon_ddc_probe(radeon_connector); 591 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
592 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 592 dret = radeon_ddc_probe(radeon_connector);
593 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
594 }
593 if (dret) { 595 if (dret) {
594 if (radeon_connector->edid) { 596 if (radeon_connector->edid) {
595 kfree(radeon_connector->edid); 597 kfree(radeon_connector->edid);
@@ -740,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
740 struct drm_mode_object *obj; 742 struct drm_mode_object *obj;
741 int i; 743 int i;
742 enum drm_connector_status ret = connector_status_disconnected; 744 enum drm_connector_status ret = connector_status_disconnected;
743 bool dret; 745 bool dret = false;
744 746
745 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); 747 if (radeon_connector->ddc_bus) {
746 dret = radeon_ddc_probe(radeon_connector); 748 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
747 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); 749 dret = radeon_ddc_probe(radeon_connector);
750 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
751 }
748 if (dret) { 752 if (dret) {
749 if (radeon_connector->edid) { 753 if (radeon_connector->edid) {
750 kfree(radeon_connector->edid); 754 kfree(radeon_connector->edid);
@@ -776,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
776 * connected and the DVI port disconnected. If the edid doesn't 780 * connected and the DVI port disconnected. If the edid doesn't
777 * say HDMI, vice versa. 781 * say HDMI, vice versa.
778 */ 782 */
779 if (radeon_connector->shared_ddc && connector_status_connected) { 783 if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
780 struct drm_device *dev = connector->dev; 784 struct drm_device *dev = connector->dev;
781 struct drm_connector *list_connector; 785 struct drm_connector *list_connector;
782 struct radeon_connector *list_radeon_connector; 786 struct radeon_connector *list_radeon_connector;
@@ -1056,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1056 return; 1060 return;
1057 } 1061 }
1058 if (radeon_connector->ddc_bus && i2c_bus->valid) { 1062 if (radeon_connector->ddc_bus && i2c_bus->valid) {
1059 if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus, 1063 if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
1060 sizeof(struct radeon_i2c_bus_rec)) == 0) {
1061 radeon_connector->shared_ddc = true; 1064 radeon_connector->shared_ddc = true;
1062 shared_ddc = true; 1065 shared_ddc = true;
1063 } 1066 }
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 1190148cf5e6..e9d085021c1f 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
86 &p->validated); 86 &p->validated);
87 } 87 }
88 } 88 }
89 return radeon_bo_list_validate(&p->validated, p->ib->fence); 89 return radeon_bo_list_validate(&p->validated);
90} 90}
91 91
92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
189{ 189{
190 unsigned i; 190 unsigned i;
191 191
192 if (error && parser->ib) { 192 if (!error && parser->ib) {
193 radeon_bo_list_unvalidate(&parser->validated, 193 radeon_bo_list_fence(&parser->validated, parser->ib->fence);
194 parser->ib->fence);
195 } else {
196 radeon_bo_list_unreserve(&parser->validated);
197 } 194 }
195 radeon_bo_list_unreserve(&parser->validated);
198 for (i = 0; i < parser->nrelocs; i++) { 196 for (i = 0; i < parser->nrelocs; i++) {
199 if (parser->relocs[i].gobj) { 197 if (parser->relocs[i].gobj) {
200 mutex_lock(&parser->rdev->ddev->struct_mutex); 198 mutex_lock(&parser->rdev->ddev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6a92f994cc26..7e17a362b54b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -278,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
278 DRM_INFO(" %s\n", connector_names[connector->connector_type]); 278 DRM_INFO(" %s\n", connector_names[connector->connector_type]);
279 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) 279 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
280 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); 280 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
281 if (radeon_connector->ddc_bus) 281 if (radeon_connector->ddc_bus) {
282 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 282 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
283 radeon_connector->ddc_bus->rec.mask_clk_reg, 283 radeon_connector->ddc_bus->rec.mask_clk_reg,
284 radeon_connector->ddc_bus->rec.mask_data_reg, 284 radeon_connector->ddc_bus->rec.mask_data_reg,
@@ -288,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev)
288 radeon_connector->ddc_bus->rec.en_data_reg, 288 radeon_connector->ddc_bus->rec.en_data_reg,
289 radeon_connector->ddc_bus->rec.y_clk_reg, 289 radeon_connector->ddc_bus->rec.y_clk_reg,
290 radeon_connector->ddc_bus->rec.y_data_reg); 290 radeon_connector->ddc_bus->rec.y_data_reg);
291 } else {
292 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
293 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
294 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
295 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
296 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
297 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
298 DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
299 }
291 DRM_INFO(" Encoders:\n"); 300 DRM_INFO(" Encoders:\n");
292 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 301 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
293 radeon_encoder = to_radeon_encoder(encoder); 302 radeon_encoder = to_radeon_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index e13785282a82..c57ad606504d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -106,9 +106,10 @@
106 * 1.29- R500 3D cmd buffer support 106 * 1.29- R500 3D cmd buffer support
107 * 1.30- Add support for occlusion queries 107 * 1.30- Add support for occlusion queries
108 * 1.31- Add support for num Z pipes from GET_PARAM 108 * 1.31- Add support for num Z pipes from GET_PARAM
109 * 1.32- fixes for rv740 setup
109 */ 110 */
110#define DRIVER_MAJOR 1 111#define DRIVER_MAJOR 1
111#define DRIVER_MINOR 31 112#define DRIVER_MINOR 32
112#define DRIVER_PATCHLEVEL 0 113#define DRIVER_PATCHLEVEL 0
113 114
114enum radeon_cp_microcode_version { 115enum radeon_cp_microcode_version {
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 3ba213d1b06c..d71e346e9ab5 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev,
248 if (ret) 248 if (ret)
249 goto out_unref; 249 goto out_unref;
250 250
251 memset_io(fbptr, 0xff, aligned_size); 251 memset_io(fbptr, 0x0, aligned_size);
252 252
253 strcpy(info->fix.id, "radeondrmfb"); 253 strcpy(info->fix.id, "radeondrmfb");
254 254
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d72a71bff218..f1da370928eb 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
306 } 306 }
307} 307}
308 308
309int radeon_bo_list_validate(struct list_head *head, void *fence) 309int radeon_bo_list_validate(struct list_head *head)
310{ 310{
311 struct radeon_bo_list *lobj; 311 struct radeon_bo_list *lobj;
312 struct radeon_bo *bo; 312 struct radeon_bo *bo;
313 struct radeon_fence *old_fence = NULL;
314 int r; 313 int r;
315 314
316 r = radeon_bo_list_reserve(head); 315 r = radeon_bo_list_reserve(head);
@@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
334 } 333 }
335 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 334 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
336 lobj->tiling_flags = bo->tiling_flags; 335 lobj->tiling_flags = bo->tiling_flags;
337 if (fence) {
338 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
339 bo->tbo.sync_obj = radeon_fence_ref(fence);
340 bo->tbo.sync_obj_arg = NULL;
341 }
342 if (old_fence) {
343 radeon_fence_unref(&old_fence);
344 }
345 } 336 }
346 return 0; 337 return 0;
347} 338}
348 339
349void radeon_bo_list_unvalidate(struct list_head *head, void *fence) 340void radeon_bo_list_fence(struct list_head *head, void *fence)
350{ 341{
351 struct radeon_bo_list *lobj; 342 struct radeon_bo_list *lobj;
352 struct radeon_fence *old_fence; 343 struct radeon_bo *bo;
353 344 struct radeon_fence *old_fence = NULL;
354 if (fence) 345
355 list_for_each_entry(lobj, head, list) { 346 list_for_each_entry(lobj, head, list) {
356 old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); 347 bo = lobj->bo;
357 if (old_fence == fence) { 348 spin_lock(&bo->tbo.lock);
358 lobj->bo->tbo.sync_obj = NULL; 349 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
359 radeon_fence_unref(&old_fence); 350 bo->tbo.sync_obj = radeon_fence_ref(fence);
360 } 351 bo->tbo.sync_obj_arg = NULL;
352 spin_unlock(&bo->tbo.lock);
353 if (old_fence) {
354 radeon_fence_unref(&old_fence);
361 } 355 }
362 radeon_bo_list_unreserve(head); 356 }
363} 357}
364 358
365int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 359int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index a02f18011ad1..7ab43de1e244 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
156 struct list_head *head); 156 struct list_head *head);
157extern int radeon_bo_list_reserve(struct list_head *head); 157extern int radeon_bo_list_reserve(struct list_head *head);
158extern void radeon_bo_list_unreserve(struct list_head *head); 158extern void radeon_bo_list_unreserve(struct list_head *head);
159extern int radeon_bo_list_validate(struct list_head *head, void *fence); 159extern int radeon_bo_list_validate(struct list_head *head);
160extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); 160extern void radeon_bo_list_fence(struct list_head *head, void *fence);
161extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 161extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
162 struct vm_area_struct *vma); 162 struct vm_area_struct *vma);
163extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 163extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 4d12b2d17b4d..6579eb4c1f28 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
41{ 41{
42 struct radeon_fence *fence; 42 struct radeon_fence *fence;
43 struct radeon_ib *nib; 43 struct radeon_ib *nib;
44 unsigned long i; 44 int r = 0, i, c;
45 int r = 0;
46 45
47 *ib = NULL; 46 *ib = NULL;
48 r = radeon_fence_create(rdev, &fence); 47 r = radeon_fence_create(rdev, &fence);
49 if (r) { 48 if (r) {
50 DRM_ERROR("failed to create fence for new IB\n"); 49 dev_err(rdev->dev, "failed to create fence for new IB\n");
51 return r; 50 return r;
52 } 51 }
53 mutex_lock(&rdev->ib_pool.mutex); 52 mutex_lock(&rdev->ib_pool.mutex);
54 i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 53 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
55 if (i < RADEON_IB_POOL_SIZE) { 54 i &= (RADEON_IB_POOL_SIZE - 1);
56 set_bit(i, rdev->ib_pool.alloc_bm); 55 if (rdev->ib_pool.ibs[i].free) {
57 rdev->ib_pool.ibs[i].length_dw = 0; 56 nib = &rdev->ib_pool.ibs[i];
58 *ib = &rdev->ib_pool.ibs[i]; 57 break;
59 mutex_unlock(&rdev->ib_pool.mutex); 58 }
60 goto out;
61 } 59 }
62 if (list_empty(&rdev->ib_pool.scheduled_ibs)) { 60 if (nib == NULL) {
63 /* we go do nothings here */ 61 /* This should never happen, it means we allocated all
62 * IB and haven't scheduled one yet, return EBUSY to
63 * userspace hoping that on ioctl recall we get better
64 * luck
65 */
66 dev_err(rdev->dev, "no free indirect buffer !\n");
64 mutex_unlock(&rdev->ib_pool.mutex); 67 mutex_unlock(&rdev->ib_pool.mutex);
65 DRM_ERROR("all IB allocated none scheduled.\n"); 68 radeon_fence_unref(&fence);
66 r = -EINVAL; 69 return -EBUSY;
67 goto out;
68 } 70 }
69 /* get the first ib on the scheduled list */ 71 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
70 nib = list_entry(rdev->ib_pool.scheduled_ibs.next, 72 nib->free = false;
71 struct radeon_ib, list); 73 if (nib->fence) {
72 if (nib->fence == NULL) {
73 /* we go do nothings here */
74 mutex_unlock(&rdev->ib_pool.mutex); 74 mutex_unlock(&rdev->ib_pool.mutex);
75 DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); 75 r = radeon_fence_wait(nib->fence, false);
76 r = -EINVAL; 76 if (r) {
77 goto out; 77 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
78 } 78 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
79 mutex_unlock(&rdev->ib_pool.mutex); 79 mutex_lock(&rdev->ib_pool.mutex);
80 80 nib->free = true;
81 r = radeon_fence_wait(nib->fence, false); 81 mutex_unlock(&rdev->ib_pool.mutex);
82 if (r) { 82 radeon_fence_unref(&fence);
83 DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, 83 return r;
84 (unsigned long)nib->gpu_addr, nib->length_dw); 84 }
85 DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n"); 85 mutex_lock(&rdev->ib_pool.mutex);
86 goto out;
87 } 86 }
88 radeon_fence_unref(&nib->fence); 87 radeon_fence_unref(&nib->fence);
89 88 nib->fence = fence;
90 nib->length_dw = 0; 89 nib->length_dw = 0;
91
92 /* scheduled list is accessed here */
93 mutex_lock(&rdev->ib_pool.mutex);
94 list_del(&nib->list);
95 INIT_LIST_HEAD(&nib->list);
96 mutex_unlock(&rdev->ib_pool.mutex); 90 mutex_unlock(&rdev->ib_pool.mutex);
97
98 *ib = nib; 91 *ib = nib;
99out: 92 return 0;
100 if (r) {
101 radeon_fence_unref(&fence);
102 } else {
103 (*ib)->fence = fence;
104 }
105 return r;
106} 93}
107 94
108void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) 95void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
113 if (tmp == NULL) { 100 if (tmp == NULL) {
114 return; 101 return;
115 } 102 }
116 mutex_lock(&rdev->ib_pool.mutex); 103 if (!tmp->fence->emited)
117 if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
118 /* IB is scheduled & not signaled don't do anythings */
119 mutex_unlock(&rdev->ib_pool.mutex);
120 return;
121 }
122 list_del(&tmp->list);
123 INIT_LIST_HEAD(&tmp->list);
124 if (tmp->fence)
125 radeon_fence_unref(&tmp->fence); 104 radeon_fence_unref(&tmp->fence);
126 105 mutex_lock(&rdev->ib_pool.mutex);
127 tmp->length_dw = 0; 106 tmp->free = true;
128 clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
129 mutex_unlock(&rdev->ib_pool.mutex); 107 mutex_unlock(&rdev->ib_pool.mutex);
130} 108}
131 109
@@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
135 113
136 if (!ib->length_dw || !rdev->cp.ready) { 114 if (!ib->length_dw || !rdev->cp.ready) {
137 /* TODO: Nothings in the ib we should report. */ 115 /* TODO: Nothings in the ib we should report. */
138 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); 116 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
139 return -EINVAL; 117 return -EINVAL;
140 } 118 }
141 119
@@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
148 radeon_ring_ib_execute(rdev, ib); 126 radeon_ring_ib_execute(rdev, ib);
149 radeon_fence_emit(rdev, ib->fence); 127 radeon_fence_emit(rdev, ib->fence);
150 mutex_lock(&rdev->ib_pool.mutex); 128 mutex_lock(&rdev->ib_pool.mutex);
151 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); 129 /* once scheduled IB is considered free and protected by the fence */
130 ib->free = true;
152 mutex_unlock(&rdev->ib_pool.mutex); 131 mutex_unlock(&rdev->ib_pool.mutex);
153 radeon_ring_unlock_commit(rdev); 132 radeon_ring_unlock_commit(rdev);
154 return 0; 133 return 0;
@@ -164,7 +143,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
164 if (rdev->ib_pool.robj) 143 if (rdev->ib_pool.robj)
165 return 0; 144 return 0;
166 /* Allocate 1M object buffer */ 145 /* Allocate 1M object buffer */
167 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
168 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 146 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
169 true, RADEON_GEM_DOMAIN_GTT, 147 true, RADEON_GEM_DOMAIN_GTT,
170 &rdev->ib_pool.robj); 148 &rdev->ib_pool.robj);
@@ -195,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
195 rdev->ib_pool.ibs[i].ptr = ptr + offset; 173 rdev->ib_pool.ibs[i].ptr = ptr + offset;
196 rdev->ib_pool.ibs[i].idx = i; 174 rdev->ib_pool.ibs[i].idx = i;
197 rdev->ib_pool.ibs[i].length_dw = 0; 175 rdev->ib_pool.ibs[i].length_dw = 0;
198 INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list); 176 rdev->ib_pool.ibs[i].free = true;
199 } 177 }
200 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 178 rdev->ib_pool.head_id = 0;
201 rdev->ib_pool.ready = true; 179 rdev->ib_pool.ready = true;
202 DRM_INFO("radeon: ib pool ready.\n"); 180 DRM_INFO("radeon: ib pool ready.\n");
203 if (radeon_debugfs_ib_init(rdev)) { 181 if (radeon_debugfs_ib_init(rdev)) {
@@ -214,7 +192,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
214 return; 192 return;
215 } 193 }
216 mutex_lock(&rdev->ib_pool.mutex); 194 mutex_lock(&rdev->ib_pool.mutex);
217 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
218 if (rdev->ib_pool.robj) { 195 if (rdev->ib_pool.robj) {
219 r = radeon_bo_reserve(rdev->ib_pool.robj, false); 196 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
220 if (likely(r == 0)) { 197 if (likely(r == 0)) {
@@ -363,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
363 if (ib == NULL) { 340 if (ib == NULL) {
364 return 0; 341 return 0;
365 } 342 }
366 seq_printf(m, "IB %04lu\n", ib->idx); 343 seq_printf(m, "IB %04u\n", ib->idx);
367 seq_printf(m, "IB fence %p\n", ib->fence); 344 seq_printf(m, "IB fence %p\n", ib->fence);
368 seq_printf(m, "IB size %05u dwords\n", ib->length_dw); 345 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
369 for (i = 0; i < ib->length_dw; i++) { 346 for (i = 0; i < ib->length_dw; i++) {
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 5943d561fd1e..03021674d097 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
549 549
550 gb_tiling_config |= BANK_SWAPS(1); 550 gb_tiling_config |= BANK_SWAPS(1);
551 551
552 backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes, 552 if (rdev->family == CHIP_RV740)
553 rdev->config.rv770.max_backends, 553 backend_map = 0x28;
554 (0xff << rdev->config.rv770.max_backends) & 0xff); 554 else
555 backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
556 rdev->config.rv770.max_backends,
557 (0xff << rdev->config.rv770.max_backends) & 0xff);
555 gb_tiling_config |= BACKEND_MAP(backend_map); 558 gb_tiling_config |= BACKEND_MAP(backend_map);
556 559
557 cc_gc_shader_pipe_config = 560 cc_gc_shader_pipe_config =
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1a3e909b7bba..c7320ce4567d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1020,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1020 struct ttm_mem_reg *mem) 1020 struct ttm_mem_reg *mem)
1021{ 1021{
1022 int i; 1022 int i;
1023 struct drm_mm_node *node = mem->mm_node;
1024
1025 if (node && placement->lpfn != 0 &&
1026 (node->start < placement->fpfn ||
1027 node->start + node->size > placement->lpfn))
1028 return -1;
1023 1029
1024 for (i = 0; i < placement->num_placement; i++) { 1030 for (i = 0; i < placement->num_placement; i++) {
1025 if ((placement->placement[i] & mem->placement & 1031 if ((placement->placement[i] & mem->placement &
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index e2123af7775a..3d47a2c12322 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -196,14 +196,15 @@ EXPORT_SYMBOL(ttm_tt_populate);
196 196
197#ifdef CONFIG_X86 197#ifdef CONFIG_X86
198static inline int ttm_tt_set_page_caching(struct page *p, 198static inline int ttm_tt_set_page_caching(struct page *p,
199 enum ttm_caching_state c_state) 199 enum ttm_caching_state c_old,
200 enum ttm_caching_state c_new)
200{ 201{
201 int ret = 0; 202 int ret = 0;
202 203
203 if (PageHighMem(p)) 204 if (PageHighMem(p))
204 return 0; 205 return 0;
205 206
206 if (get_page_memtype(p) != -1) { 207 if (c_old != tt_cached) {
207 /* p isn't in the default caching state, set it to 208 /* p isn't in the default caching state, set it to
208 * writeback first to free its current memtype. */ 209 * writeback first to free its current memtype. */
209 210
@@ -212,16 +213,17 @@ static inline int ttm_tt_set_page_caching(struct page *p,
212 return ret; 213 return ret;
213 } 214 }
214 215
215 if (c_state == tt_wc) 216 if (c_new == tt_wc)
216 ret = set_memory_wc((unsigned long) page_address(p), 1); 217 ret = set_memory_wc((unsigned long) page_address(p), 1);
217 else if (c_state == tt_uncached) 218 else if (c_new == tt_uncached)
218 ret = set_pages_uc(p, 1); 219 ret = set_pages_uc(p, 1);
219 220
220 return ret; 221 return ret;
221} 222}
222#else /* CONFIG_X86 */ 223#else /* CONFIG_X86 */
223static inline int ttm_tt_set_page_caching(struct page *p, 224static inline int ttm_tt_set_page_caching(struct page *p,
224 enum ttm_caching_state c_state) 225 enum ttm_caching_state c_old,
226 enum ttm_caching_state c_new)
225{ 227{
226 return 0; 228 return 0;
227} 229}
@@ -254,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
254 for (i = 0; i < ttm->num_pages; ++i) { 256 for (i = 0; i < ttm->num_pages; ++i) {
255 cur_page = ttm->pages[i]; 257 cur_page = ttm->pages[i];
256 if (likely(cur_page != NULL)) { 258 if (likely(cur_page != NULL)) {
257 ret = ttm_tt_set_page_caching(cur_page, c_state); 259 ret = ttm_tt_set_page_caching(cur_page,
260 ttm->caching_state,
261 c_state);
258 if (unlikely(ret != 0)) 262 if (unlikely(ret != 0))
259 goto out_err; 263 goto out_err;
260 } 264 }
@@ -268,7 +272,7 @@ out_err:
268 for (j = 0; j < i; ++j) { 272 for (j = 0; j < i; ++j) {
269 cur_page = ttm->pages[j]; 273 cur_page = ttm->pages[j];
270 if (likely(cur_page != NULL)) { 274 if (likely(cur_page != NULL)) {
271 (void)ttm_tt_set_page_caching(cur_page, 275 (void)ttm_tt_set_page_caching(cur_page, c_state,
272 ttm->caching_state); 276 ttm->caching_state);
273 } 277 }
274 } 278 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index a6e8f687fa64..0c9c0811f42d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
348 */ 348 */
349 349
350 DRM_INFO("It appears like vesafb is loaded. " 350 DRM_INFO("It appears like vesafb is loaded. "
351 "Ignore above error if any. Entering stealth mode.\n"); 351 "Ignore above error if any.\n");
352 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); 352 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
353 if (unlikely(ret != 0)) { 353 if (unlikely(ret != 0)) {
354 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); 354 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
355 goto out_no_device; 355 goto out_no_device;
356 } 356 }
357 vmw_kms_init(dev_priv);
358 vmw_overlay_init(dev_priv);
359 } else {
360 ret = vmw_request_device(dev_priv);
361 if (unlikely(ret != 0))
362 goto out_no_device;
363 vmw_kms_init(dev_priv);
364 vmw_overlay_init(dev_priv);
365 vmw_fb_init(dev_priv);
366 } 357 }
358 ret = vmw_request_device(dev_priv);
359 if (unlikely(ret != 0))
360 goto out_no_device;
361 vmw_kms_init(dev_priv);
362 vmw_overlay_init(dev_priv);
363 vmw_fb_init(dev_priv);
367 364
368 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 365 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
369 register_pm_notifier(&dev_priv->pm_nb); 366 register_pm_notifier(&dev_priv->pm_nb);
@@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev)
406 403
407 unregister_pm_notifier(&dev_priv->pm_nb); 404 unregister_pm_notifier(&dev_priv->pm_nb);
408 405
409 if (!dev_priv->stealth) { 406 vmw_fb_close(dev_priv);
410 vmw_fb_close(dev_priv); 407 vmw_kms_close(dev_priv);
411 vmw_kms_close(dev_priv); 408 vmw_overlay_close(dev_priv);
412 vmw_overlay_close(dev_priv); 409 vmw_release_device(dev_priv);
413 vmw_release_device(dev_priv); 410 if (dev_priv->stealth)
414 pci_release_regions(dev->pdev);
415 } else {
416 vmw_kms_close(dev_priv);
417 vmw_overlay_close(dev_priv);
418 pci_release_region(dev->pdev, 2); 411 pci_release_region(dev->pdev, 2);
419 } 412 else
413 pci_release_regions(dev->pdev);
414
420 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 415 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
421 drm_irq_uninstall(dev_priv->dev); 416 drm_irq_uninstall(dev_priv->dev);
422 if (dev->devname == vmw_devname) 417 if (dev->devname == vmw_devname)
@@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev,
585 int ret = 0; 580 int ret = 0;
586 581
587 DRM_INFO("Master set.\n"); 582 DRM_INFO("Master set.\n");
588 if (dev_priv->stealth) {
589 ret = vmw_request_device(dev_priv);
590 if (unlikely(ret != 0))
591 return ret;
592 }
593 583
594 if (active) { 584 if (active) {
595 BUG_ON(active != &dev_priv->fbdev_master); 585 BUG_ON(active != &dev_priv->fbdev_master);
@@ -649,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev,
649 639
650 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 640 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
651 641
652 if (dev_priv->stealth) {
653 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
654 if (unlikely(ret != 0))
655 DRM_ERROR("Unable to clean VRAM on master drop.\n");
656 vmw_release_device(dev_priv);
657 }
658 dev_priv->active_master = &dev_priv->fbdev_master; 642 dev_priv->active_master = &dev_priv->fbdev_master;
659 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 643 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
660 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 644 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
661 645
662 if (!dev_priv->stealth) 646 vmw_fb_on(dev_priv);
663 vmw_fb_on(dev_priv);
664} 647}
665 648
666 649
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 135be9688c90..356dc935ec13 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -39,10 +39,10 @@
39#include "ttm/ttm_execbuf_util.h" 39#include "ttm/ttm_execbuf_util.h"
40#include "ttm/ttm_module.h" 40#include "ttm/ttm_module.h"
41 41
42#define VMWGFX_DRIVER_DATE "20090724" 42#define VMWGFX_DRIVER_DATE "20100209"
43#define VMWGFX_DRIVER_MAJOR 0 43#define VMWGFX_DRIVER_MAJOR 1
44#define VMWGFX_DRIVER_MINOR 1 44#define VMWGFX_DRIVER_MINOR 0
45#define VMWGFX_DRIVER_PATCHLEVEL 2 45#define VMWGFX_DRIVER_PATCHLEVEL 0
46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 46#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 47#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
48#define VMWGFX_MAX_RELOCATIONS 2048 48#define VMWGFX_MAX_RELOCATIONS 2048
@@ -113,6 +113,7 @@ struct vmw_fifo_state {
113 unsigned long static_buffer_size; 113 unsigned long static_buffer_size;
114 bool using_bounce_buffer; 114 bool using_bounce_buffer;
115 uint32_t capabilities; 115 uint32_t capabilities;
116 struct mutex fifo_mutex;
116 struct rw_semaphore rwsem; 117 struct rw_semaphore rwsem;
117}; 118};
118 119
@@ -213,7 +214,7 @@ struct vmw_private {
213 * Fencing and IRQs. 214 * Fencing and IRQs.
214 */ 215 */
215 216
216 uint32_t fence_seq; 217 atomic_t fence_seq;
217 wait_queue_head_t fence_queue; 218 wait_queue_head_t fence_queue;
218 wait_queue_head_t fifo_queue; 219 wait_queue_head_t fifo_queue;
219 atomic_t fence_queue_waiters; 220 atomic_t fence_queue_waiters;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index d69caf92ffe7..0897359b3e4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); 182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
183} 183}
184 184
185static int vmw_cmd_dma(struct vmw_private *dev_priv, 185static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186 struct vmw_sw_context *sw_context, 186 struct vmw_sw_context *sw_context,
187 SVGA3dCmdHeader *header) 187 SVGAGuestPtr *ptr,
188 struct vmw_dma_buffer **vmw_bo_p)
188{ 189{
189 uint32_t handle;
190 struct vmw_dma_buffer *vmw_bo = NULL; 190 struct vmw_dma_buffer *vmw_bo = NULL;
191 struct ttm_buffer_object *bo; 191 struct ttm_buffer_object *bo;
192 struct vmw_surface *srf = NULL; 192 uint32_t handle = ptr->gmrId;
193 struct vmw_dma_cmd {
194 SVGA3dCmdHeader header;
195 SVGA3dCmdSurfaceDMA dma;
196 } *cmd;
197 struct vmw_relocation *reloc; 193 struct vmw_relocation *reloc;
198 int ret;
199 uint32_t cur_validate_node; 194 uint32_t cur_validate_node;
200 struct ttm_validate_buffer *val_buf; 195 struct ttm_validate_buffer *val_buf;
196 int ret;
201 197
202 cmd = container_of(header, struct vmw_dma_cmd, header);
203 handle = cmd->dma.guest.ptr.gmrId;
204 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 198 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
205 if (unlikely(ret != 0)) { 199 if (unlikely(ret != 0)) {
206 DRM_ERROR("Could not find or use GMR region.\n"); 200 DRM_ERROR("Could not find or use GMR region.\n");
@@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
209 bo = &vmw_bo->base; 203 bo = &vmw_bo->base;
210 204
211 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { 205 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
212 DRM_ERROR("Max number of DMA commands per submission" 206 DRM_ERROR("Max number relocations per submission"
213 " exceeded\n"); 207 " exceeded\n");
214 ret = -EINVAL; 208 ret = -EINVAL;
215 goto out_no_reloc; 209 goto out_no_reloc;
216 } 210 }
217 211
218 reloc = &sw_context->relocs[sw_context->cur_reloc++]; 212 reloc = &sw_context->relocs[sw_context->cur_reloc++];
219 reloc->location = &cmd->dma.guest.ptr; 213 reloc->location = ptr;
220 214
221 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); 215 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
222 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { 216 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
@@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
234 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 228 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
235 ++sw_context->cur_val_buf; 229 ++sw_context->cur_val_buf;
236 } 230 }
231 *vmw_bo_p = vmw_bo;
232 return 0;
233
234out_no_reloc:
235 vmw_dmabuf_unreference(&vmw_bo);
236 vmw_bo_p = NULL;
237 return ret;
238}
239
240static int vmw_cmd_end_query(struct vmw_private *dev_priv,
241 struct vmw_sw_context *sw_context,
242 SVGA3dCmdHeader *header)
243{
244 struct vmw_dma_buffer *vmw_bo;
245 struct vmw_query_cmd {
246 SVGA3dCmdHeader header;
247 SVGA3dCmdEndQuery q;
248 } *cmd;
249 int ret;
250
251 cmd = container_of(header, struct vmw_query_cmd, header);
252 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
253 if (unlikely(ret != 0))
254 return ret;
255
256 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
257 &cmd->q.guestResult,
258 &vmw_bo);
259 if (unlikely(ret != 0))
260 return ret;
261
262 vmw_dmabuf_unreference(&vmw_bo);
263 return 0;
264}
237 265
266static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
267 struct vmw_sw_context *sw_context,
268 SVGA3dCmdHeader *header)
269{
270 struct vmw_dma_buffer *vmw_bo;
271 struct vmw_query_cmd {
272 SVGA3dCmdHeader header;
273 SVGA3dCmdWaitForQuery q;
274 } *cmd;
275 int ret;
276
277 cmd = container_of(header, struct vmw_query_cmd, header);
278 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
279 if (unlikely(ret != 0))
280 return ret;
281
282 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
283 &cmd->q.guestResult,
284 &vmw_bo);
285 if (unlikely(ret != 0))
286 return ret;
287
288 vmw_dmabuf_unreference(&vmw_bo);
289 return 0;
290}
291
292
293static int vmw_cmd_dma(struct vmw_private *dev_priv,
294 struct vmw_sw_context *sw_context,
295 SVGA3dCmdHeader *header)
296{
297 struct vmw_dma_buffer *vmw_bo = NULL;
298 struct ttm_buffer_object *bo;
299 struct vmw_surface *srf = NULL;
300 struct vmw_dma_cmd {
301 SVGA3dCmdHeader header;
302 SVGA3dCmdSurfaceDMA dma;
303 } *cmd;
304 int ret;
305
306 cmd = container_of(header, struct vmw_dma_cmd, header);
307 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
308 &cmd->dma.guest.ptr,
309 &vmw_bo);
310 if (unlikely(ret != 0))
311 return ret;
312
313 bo = &vmw_bo->base;
238 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, 314 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
239 cmd->dma.host.sid, &srf); 315 cmd->dma.host.sid, &srf);
240 if (ret) { 316 if (ret) {
@@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
379 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), 455 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
380 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), 456 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
381 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), 457 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
382 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), 458 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
383 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check), 459 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
384 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), 460 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
385 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, 461 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
386 &vmw_cmd_blt_surf_screen_check) 462 &vmw_cmd_blt_surf_screen_check)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 4f4f6432be8b..a93367041cdc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
559 info->pixmap.scan_align = 1; 559 info->pixmap.scan_align = 1;
560#endif 560#endif
561 561
562 info->aperture_base = vmw_priv->vram_start;
563 info->aperture_size = vmw_priv->vram_size;
564
562 /* 565 /*
563 * Dirty & Deferred IO 566 * Dirty & Deferred IO
564 */ 567 */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 4157547cc6e4..39d43a01d846 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -74,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
74 fifo->reserved_size = 0; 74 fifo->reserved_size = 0;
75 fifo->using_bounce_buffer = false; 75 fifo->using_bounce_buffer = false;
76 76
77 mutex_init(&fifo->fifo_mutex);
77 init_rwsem(&fifo->rwsem); 78 init_rwsem(&fifo->rwsem);
78 79
79 /* 80 /*
@@ -117,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
117 (unsigned int) min, 118 (unsigned int) min,
118 (unsigned int) fifo->capabilities); 119 (unsigned int) fifo->capabilities);
119 120
120 dev_priv->fence_seq = dev_priv->last_read_sequence; 121 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
121 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 122 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
122 123
123 return vmw_fifo_send_fence(dev_priv, &dummy); 124 return vmw_fifo_send_fence(dev_priv, &dummy);
@@ -283,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
283 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 284 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
284 int ret; 285 int ret;
285 286
286 down_write(&fifo_state->rwsem); 287 mutex_lock(&fifo_state->fifo_mutex);
287 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 288 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
288 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 289 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
289 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 290 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
@@ -351,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
351 } 352 }
352out_err: 353out_err:
353 fifo_state->reserved_size = 0; 354 fifo_state->reserved_size = 0;
354 up_write(&fifo_state->rwsem); 355 mutex_unlock(&fifo_state->fifo_mutex);
355 return NULL; 356 return NULL;
356} 357}
357 358
@@ -426,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
426 427
427 } 428 }
428 429
430 down_write(&fifo_state->rwsem);
429 if (fifo_state->using_bounce_buffer || reserveable) { 431 if (fifo_state->using_bounce_buffer || reserveable) {
430 next_cmd += bytes; 432 next_cmd += bytes;
431 if (next_cmd >= max) 433 if (next_cmd >= max)
@@ -437,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
437 if (reserveable) 439 if (reserveable)
438 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); 440 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
439 mb(); 441 mb();
440 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
441 up_write(&fifo_state->rwsem); 442 up_write(&fifo_state->rwsem);
443 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
444 mutex_unlock(&fifo_state->fifo_mutex);
442} 445}
443 446
444int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) 447int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
@@ -451,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
451 454
452 fm = vmw_fifo_reserve(dev_priv, bytes); 455 fm = vmw_fifo_reserve(dev_priv, bytes);
453 if (unlikely(fm == NULL)) { 456 if (unlikely(fm == NULL)) {
454 down_write(&fifo_state->rwsem); 457 *sequence = atomic_read(&dev_priv->fence_seq);
455 *sequence = dev_priv->fence_seq;
456 up_write(&fifo_state->rwsem);
457 ret = -ENOMEM; 458 ret = -ENOMEM;
458 (void)vmw_fallback_wait(dev_priv, false, true, *sequence, 459 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
459 false, 3*HZ); 460 false, 3*HZ);
@@ -461,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
461 } 462 }
462 463
463 do { 464 do {
464 *sequence = dev_priv->fence_seq++; 465 *sequence = atomic_add_return(1, &dev_priv->fence_seq);
465 } while (*sequence == 0); 466 } while (*sequence == 0);
466 467
467 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { 468 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 778851f9f1d6..1c7a316454d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -48,6 +48,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
48 case DRM_VMW_PARAM_FIFO_OFFSET: 48 case DRM_VMW_PARAM_FIFO_OFFSET:
49 param->value = dev_priv->mmio_start; 49 param->value = dev_priv->mmio_start;
50 break; 50 break;
51 case DRM_VMW_PARAM_HW_CAPS:
52 param->value = dev_priv->capabilities;
53 break;
54 case DRM_VMW_PARAM_FIFO_CAPS:
55 param->value = dev_priv->fifo.capabilities;
56 break;
51 default: 57 default:
52 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 58 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
53 param->param); 59 param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index d40086fc8647..4d7cb5393860 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
85 return true; 85 return true;
86 86
87 /** 87 /**
88 * Below is to signal stale fences that have wrapped.
89 * First, block fence submission.
90 */
91
92 down_read(&fifo_state->rwsem);
93
94 /**
95 * Then check if the sequence is higher than what we've actually 88 * Then check if the sequence is higher than what we've actually
96 * emitted. Then the fence is stale and signaled. 89 * emitted. Then the fence is stale and signaled.
97 */ 90 */
98 91
99 ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); 92 ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
100 up_read(&fifo_state->rwsem); 93 > VMW_FENCE_WRAP);
101 94
102 return ret; 95 return ret;
103} 96}
@@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
127 120
128 if (fifo_idle) 121 if (fifo_idle)
129 down_read(&fifo_state->rwsem); 122 down_read(&fifo_state->rwsem);
130 signal_seq = dev_priv->fence_seq; 123 signal_seq = atomic_read(&dev_priv->fence_seq);
131 ret = 0; 124 ret = 0;
132 125
133 for (;;) { 126 for (;;) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index eeba6d1d06e4..31f9afed0a63 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -769,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv)
769 769
770 drm_mode_config_init(dev); 770 drm_mode_config_init(dev);
771 dev->mode_config.funcs = &vmw_kms_funcs; 771 dev->mode_config.funcs = &vmw_kms_funcs;
772 dev->mode_config.min_width = 640; 772 dev->mode_config.min_width = 1;
773 dev->mode_config.min_height = 480; 773 dev->mode_config.min_height = 1;
774 dev->mode_config.max_width = 2048; 774 dev->mode_config.max_width = dev_priv->fb_max_width;
775 dev->mode_config.max_height = 2048; 775 dev->mode_config.max_height = dev_priv->fb_max_height;
776 776
777 ret = vmw_kms_init_legacy_display_system(dev_priv); 777 ret = vmw_kms_init_legacy_display_system(dev_priv);
778 778
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c7efbd47ab84..f8fbbc67a406 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -35,11 +35,6 @@
35#define VMW_RES_SURFACE ttm_driver_type1 35#define VMW_RES_SURFACE ttm_driver_type1
36#define VMW_RES_STREAM ttm_driver_type2 36#define VMW_RES_STREAM ttm_driver_type2
37 37
38/* XXX: This isn't a real hardware flag, but just a hack for kernel to
39 * know about primary surfaces. Find a better way to accomplish this.
40 */
41#define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
42
43struct vmw_user_context { 38struct vmw_user_context {
44 struct ttm_base_object base; 39 struct ttm_base_object base;
45 struct vmw_resource res; 40 struct vmw_resource res;
@@ -579,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
579 574
580 srf->flags = req->flags; 575 srf->flags = req->flags;
581 srf->format = req->format; 576 srf->format = req->format;
577 srf->scanout = req->scanout;
582 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 578 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
583 srf->num_sizes = 0; 579 srf->num_sizes = 0;
584 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 580 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
@@ -604,16 +600,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
604 if (unlikely(ret != 0)) 600 if (unlikely(ret != 0))
605 goto out_err1; 601 goto out_err1;
606 602
607 if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) {
608 /* we should not send this flag down to hardware since
609 * its not a official one
610 */
611 srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT;
612 srf->scanout = true;
613 } else {
614 srf->scanout = false;
615 }
616
617 if (srf->scanout && 603 if (srf->scanout &&
618 srf->num_sizes == 1 && 604 srf->num_sizes == 1 &&
619 srf->sizes[0].width == 64 && 605 srf->sizes[0].width == 64 &&
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 1ac0c93603c9..2f6cf69ecb39 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
961 remaining -= 7; 961 remaining -= 7;
962 pr_devel("client 0x%p called 'target'\n", priv); 962 pr_devel("client 0x%p called 'target'\n", priv);
963 /* if target is default */ 963 /* if target is default */
964 if (!strncmp(buf, "default", 7)) 964 if (!strncmp(curr_pos, "default", 7))
965 pdev = pci_dev_get(vga_default_device()); 965 pdev = pci_dev_get(vga_default_device());
966 else { 966 else {
967 if (!vga_pci_str_to_vars(curr_pos, remaining, 967 if (!vga_pci_str_to_vars(curr_pos, remaining,
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index b1c050ff311d..e29b6d5ba8ef 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/types.h>
16 17
17/* include interfaces to usb layer */ 18/* include interfaces to usb layer */
18#include <linux/usb.h> 19#include <linux/usb.h>
@@ -31,8 +32,8 @@
31#define CMD_I2C_IO_END (1<<1) 32#define CMD_I2C_IO_END (1<<1)
32 33
33/* i2c bit delay, default is 10us -> 100kHz */ 34/* i2c bit delay, default is 10us -> 100kHz */
34static int delay = 10; 35static unsigned short delay = 10;
35module_param(delay, int, 0); 36module_param(delay, ushort, 0);
36MODULE_PARM_DESC(delay, "bit delay in microseconds, " 37MODULE_PARM_DESC(delay, "bit delay in microseconds, "
37 "e.g. 10 for 100kHz (default is 100kHz)"); 38 "e.g. 10 for 100kHz (default is 100kHz)");
38 39
@@ -109,7 +110,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
109 110
110static u32 usb_func(struct i2c_adapter *adapter) 111static u32 usb_func(struct i2c_adapter *adapter)
111{ 112{
112 u32 func; 113 __le32 func;
113 114
114 /* get functionality from adapter */ 115 /* get functionality from adapter */
115 if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) != 116 if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) !=
@@ -118,7 +119,7 @@ static u32 usb_func(struct i2c_adapter *adapter)
118 return 0; 119 return 0;
119 } 120 }
120 121
121 return func; 122 return le32_to_cpu(func);
122} 123}
123 124
124/* This is the actual algorithm we define */ 125/* This is the actual algorithm we define */
@@ -216,8 +217,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
216 "i2c-tiny-usb at bus %03d device %03d", 217 "i2c-tiny-usb at bus %03d device %03d",
217 dev->usb_dev->bus->busnum, dev->usb_dev->devnum); 218 dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
218 219
219 if (usb_write(&dev->adapter, CMD_SET_DELAY, 220 if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) {
220 cpu_to_le16(delay), 0, NULL, 0) != 0) {
221 dev_err(&dev->adapter.dev, 221 dev_err(&dev->adapter.dev,
222 "failure setting delay to %dus\n", delay); 222 "failure setting delay to %dus\n", delay);
223 retval = -EIO; 223 retval = -EIO;
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index dd0db67bf8d7..975adce5f40c 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -20,6 +20,7 @@ config INFINIBAND_USER_MAD
20 20
21config INFINIBAND_USER_ACCESS 21config INFINIBAND_USER_ACCESS
22 tristate "InfiniBand userspace access (verbs and CM)" 22 tristate "InfiniBand userspace access (verbs and CM)"
23 select ANON_INODES
23 ---help--- 24 ---help---
24 Userspace InfiniBand access support. This enables the 25 Userspace InfiniBand access support. This enables the
25 kernel side of userspace verbs and the userspace 26 kernel side of userspace verbs and the userspace
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index cc9b5940fa97..875e34e0b235 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2115,9 +2115,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2115 if (ret) 2115 if (ret)
2116 goto err1; 2116 goto err1;
2117 2117
2118 if (cma_loopback_addr(addr)) { 2118 if (!cma_any_addr(addr)) {
2119 ret = cma_bind_loopback(id_priv);
2120 } else if (!cma_zero_addr(addr)) {
2121 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); 2119 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
2122 if (ret) 2120 if (ret)
2123 goto err1; 2121 goto err1;
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index b3ea9587dc80..0b3862080c0f 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -145,7 +145,7 @@ extern struct idr ib_uverbs_srq_idr;
145void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj); 145void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
146 146
147struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, 147struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
148 int is_async, int *fd); 148 int is_async);
149struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd); 149struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd);
150 150
151void ib_uverbs_release_ucq(struct ib_uverbs_file *file, 151void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 112d3970222a..f71cf138d674 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -301,10 +301,15 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
301 301
302 resp.num_comp_vectors = file->device->num_comp_vectors; 302 resp.num_comp_vectors = file->device->num_comp_vectors;
303 303
304 filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd); 304 ret = get_unused_fd();
305 if (ret < 0)
306 goto err_free;
307 resp.async_fd = ret;
308
309 filp = ib_uverbs_alloc_event_file(file, 1);
305 if (IS_ERR(filp)) { 310 if (IS_ERR(filp)) {
306 ret = PTR_ERR(filp); 311 ret = PTR_ERR(filp);
307 goto err_free; 312 goto err_fd;
308 } 313 }
309 314
310 if (copy_to_user((void __user *) (unsigned long) cmd.response, 315 if (copy_to_user((void __user *) (unsigned long) cmd.response,
@@ -332,9 +337,11 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
332 return in_len; 337 return in_len;
333 338
334err_file: 339err_file:
335 put_unused_fd(resp.async_fd);
336 fput(filp); 340 fput(filp);
337 341
342err_fd:
343 put_unused_fd(resp.async_fd);
344
338err_free: 345err_free:
339 ibdev->dealloc_ucontext(ucontext); 346 ibdev->dealloc_ucontext(ucontext);
340 347
@@ -715,6 +722,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
715 struct ib_uverbs_create_comp_channel cmd; 722 struct ib_uverbs_create_comp_channel cmd;
716 struct ib_uverbs_create_comp_channel_resp resp; 723 struct ib_uverbs_create_comp_channel_resp resp;
717 struct file *filp; 724 struct file *filp;
725 int ret;
718 726
719 if (out_len < sizeof resp) 727 if (out_len < sizeof resp)
720 return -ENOSPC; 728 return -ENOSPC;
@@ -722,9 +730,16 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
722 if (copy_from_user(&cmd, buf, sizeof cmd)) 730 if (copy_from_user(&cmd, buf, sizeof cmd))
723 return -EFAULT; 731 return -EFAULT;
724 732
725 filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd); 733 ret = get_unused_fd();
726 if (IS_ERR(filp)) 734 if (ret < 0)
735 return ret;
736 resp.fd = ret;
737
738 filp = ib_uverbs_alloc_event_file(file, 0);
739 if (IS_ERR(filp)) {
740 put_unused_fd(resp.fd);
727 return PTR_ERR(filp); 741 return PTR_ERR(filp);
742 }
728 743
729 if (copy_to_user((void __user *) (unsigned long) cmd.response, 744 if (copy_to_user((void __user *) (unsigned long) cmd.response,
730 &resp, sizeof resp)) { 745 &resp, sizeof resp)) {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5f284ffd430e..810f277739e2 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -41,8 +41,8 @@
41#include <linux/fs.h> 41#include <linux/fs.h>
42#include <linux/poll.h> 42#include <linux/poll.h>
43#include <linux/sched.h> 43#include <linux/sched.h>
44#include <linux/anon_inodes.h>
44#include <linux/file.h> 45#include <linux/file.h>
45#include <linux/mount.h>
46#include <linux/cdev.h> 46#include <linux/cdev.h>
47 47
48#include <asm/uaccess.h> 48#include <asm/uaccess.h>
@@ -53,8 +53,6 @@ MODULE_AUTHOR("Roland Dreier");
53MODULE_DESCRIPTION("InfiniBand userspace verbs access"); 53MODULE_DESCRIPTION("InfiniBand userspace verbs access");
54MODULE_LICENSE("Dual BSD/GPL"); 54MODULE_LICENSE("Dual BSD/GPL");
55 55
56#define INFINIBANDEVENTFS_MAGIC 0x49426576 /* "IBev" */
57
58enum { 56enum {
59 IB_UVERBS_MAJOR = 231, 57 IB_UVERBS_MAJOR = 231,
60 IB_UVERBS_BASE_MINOR = 192, 58 IB_UVERBS_BASE_MINOR = 192,
@@ -111,8 +109,6 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
111 [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, 109 [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
112}; 110};
113 111
114static struct vfsmount *uverbs_event_mnt;
115
116static void ib_uverbs_add_one(struct ib_device *device); 112static void ib_uverbs_add_one(struct ib_device *device);
117static void ib_uverbs_remove_one(struct ib_device *device); 113static void ib_uverbs_remove_one(struct ib_device *device);
118 114
@@ -489,12 +485,10 @@ void ib_uverbs_event_handler(struct ib_event_handler *handler,
489} 485}
490 486
491struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, 487struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
492 int is_async, int *fd) 488 int is_async)
493{ 489{
494 struct ib_uverbs_event_file *ev_file; 490 struct ib_uverbs_event_file *ev_file;
495 struct path path;
496 struct file *filp; 491 struct file *filp;
497 int ret;
498 492
499 ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL); 493 ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL);
500 if (!ev_file) 494 if (!ev_file)
@@ -509,38 +503,12 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
509 ev_file->is_async = is_async; 503 ev_file->is_async = is_async;
510 ev_file->is_closed = 0; 504 ev_file->is_closed = 0;
511 505
512 *fd = get_unused_fd(); 506 filp = anon_inode_getfile("[infinibandevent]", &uverbs_event_fops,
513 if (*fd < 0) { 507 ev_file, O_RDONLY);
514 ret = *fd; 508 if (IS_ERR(filp))
515 goto err; 509 kfree(ev_file);
516 }
517
518 /*
519 * fops_get() can't fail here, because we're coming from a
520 * system call on a uverbs file, which will already have a
521 * module reference.
522 */
523 path.mnt = uverbs_event_mnt;
524 path.dentry = uverbs_event_mnt->mnt_root;
525 path_get(&path);
526 filp = alloc_file(&path, FMODE_READ, fops_get(&uverbs_event_fops));
527 if (!filp) {
528 ret = -ENFILE;
529 goto err_fd;
530 }
531
532 filp->private_data = ev_file;
533 510
534 return filp; 511 return filp;
535
536err_fd:
537 fops_put(&uverbs_event_fops);
538 path_put(&path);
539 put_unused_fd(*fd);
540
541err:
542 kfree(ev_file);
543 return ERR_PTR(ret);
544} 512}
545 513
546/* 514/*
@@ -825,21 +793,6 @@ static void ib_uverbs_remove_one(struct ib_device *device)
825 kfree(uverbs_dev); 793 kfree(uverbs_dev);
826} 794}
827 795
828static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags,
829 const char *dev_name, void *data,
830 struct vfsmount *mnt)
831{
832 return get_sb_pseudo(fs_type, "infinibandevent:", NULL,
833 INFINIBANDEVENTFS_MAGIC, mnt);
834}
835
836static struct file_system_type uverbs_event_fs = {
837 /* No owner field so module can be unloaded */
838 .name = "infinibandeventfs",
839 .get_sb = uverbs_event_get_sb,
840 .kill_sb = kill_litter_super
841};
842
843static int __init ib_uverbs_init(void) 796static int __init ib_uverbs_init(void)
844{ 797{
845 int ret; 798 int ret;
@@ -864,33 +817,14 @@ static int __init ib_uverbs_init(void)
864 goto out_class; 817 goto out_class;
865 } 818 }
866 819
867 ret = register_filesystem(&uverbs_event_fs);
868 if (ret) {
869 printk(KERN_ERR "user_verbs: couldn't register infinibandeventfs\n");
870 goto out_class;
871 }
872
873 uverbs_event_mnt = kern_mount(&uverbs_event_fs);
874 if (IS_ERR(uverbs_event_mnt)) {
875 ret = PTR_ERR(uverbs_event_mnt);
876 printk(KERN_ERR "user_verbs: couldn't mount infinibandeventfs\n");
877 goto out_fs;
878 }
879
880 ret = ib_register_client(&uverbs_client); 820 ret = ib_register_client(&uverbs_client);
881 if (ret) { 821 if (ret) {
882 printk(KERN_ERR "user_verbs: couldn't register client\n"); 822 printk(KERN_ERR "user_verbs: couldn't register client\n");
883 goto out_mnt; 823 goto out_class;
884 } 824 }
885 825
886 return 0; 826 return 0;
887 827
888out_mnt:
889 mntput(uverbs_event_mnt);
890
891out_fs:
892 unregister_filesystem(&uverbs_event_fs);
893
894out_class: 828out_class:
895 class_destroy(uverbs_class); 829 class_destroy(uverbs_class);
896 830
@@ -904,8 +838,6 @@ out:
904static void __exit ib_uverbs_cleanup(void) 838static void __exit ib_uverbs_cleanup(void)
905{ 839{
906 ib_unregister_client(&uverbs_client); 840 ib_unregister_client(&uverbs_client);
907 mntput(uverbs_event_mnt);
908 unregister_filesystem(&uverbs_event_fs);
909 class_destroy(uverbs_class); 841 class_destroy(uverbs_class);
910 unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES); 842 unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
911 idr_destroy(&ib_uverbs_pd_idr); 843 idr_destroy(&ib_uverbs_pd_idr);
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index aa6713b4a988..291d9393d359 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -100,6 +100,12 @@ static void input_close_polled_device(struct input_dev *input)
100 struct input_polled_dev *dev = input_get_drvdata(input); 100 struct input_polled_dev *dev = input_get_drvdata(input);
101 101
102 cancel_delayed_work_sync(&dev->work); 102 cancel_delayed_work_sync(&dev->work);
103 /*
104 * Clean up work struct to remove references to the workqueue.
105 * It may be destroyed by the next call. This causes problems
106 * at next device open-close in case of poll_interval == 0.
107 */
108 INIT_DELAYED_WORK(&dev->work, dev->work.work.func);
103 input_polldev_stop_workqueue(); 109 input_polldev_stop_workqueue();
104 110
105 if (dev->close) 111 if (dev->close)
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 9774bdfaa482..d8c0c8d6992c 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1141,7 +1141,14 @@ static void psmouse_cleanup(struct serio *serio)
1141 psmouse_deactivate(parent); 1141 psmouse_deactivate(parent);
1142 } 1142 }
1143 1143
1144 psmouse_deactivate(psmouse); 1144 psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
1145
1146 /*
1147 * Disable stream mode so cleanup routine can proceed undisturbed.
1148 */
1149 if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE))
1150 printk(KERN_WARNING "psmouse.c: Failed to disable mouse on %s\n",
1151 psmouse->ps2dev.serio->phys);
1145 1152
1146 if (psmouse->cleanup) 1153 if (psmouse->cleanup)
1147 psmouse->cleanup(psmouse); 1154 psmouse->cleanup(psmouse);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index d84a36e545f6..b54aee7cd9e3 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1161,9 +1161,17 @@ static int i8042_pm_restore(struct device *dev)
1161 return 0; 1161 return 0;
1162} 1162}
1163 1163
1164static int i8042_pm_thaw(struct device *dev)
1165{
1166 i8042_interrupt(0, NULL);
1167
1168 return 0;
1169}
1170
1164static const struct dev_pm_ops i8042_pm_ops = { 1171static const struct dev_pm_ops i8042_pm_ops = {
1165 .suspend = i8042_pm_reset, 1172 .suspend = i8042_pm_reset,
1166 .resume = i8042_pm_restore, 1173 .resume = i8042_pm_restore,
1174 .thaw = i8042_pm_thaw,
1167 .poweroff = i8042_pm_reset, 1175 .poweroff = i8042_pm_reset,
1168 .restore = i8042_pm_restore, 1176 .restore = i8042_pm_restore,
1169}; 1177};
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 09a5e7341bd5..5256123a5228 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -618,8 +618,8 @@ static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
618#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH 618#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
619static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) 619static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
620{ 620{
621 dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1] ; 621 dev->x = (pkt[2] << 8) | pkt[1];
622 dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3] ; 622 dev->y = (pkt[4] << 8) | pkt[3];
623 dev->press = pkt[5] & 0xff; 623 dev->press = pkt[5] & 0xff;
624 dev->touch = pkt[0] & 0x01; 624 dev->touch = pkt[0] & 0x01;
625 625
@@ -809,9 +809,9 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
809#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH 809#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
810 [DEVTYPE_GENERAL_TOUCH] = { 810 [DEVTYPE_GENERAL_TOUCH] = {
811 .min_xc = 0x0, 811 .min_xc = 0x0,
812 .max_xc = 0x0500, 812 .max_xc = 0x7fff,
813 .min_yc = 0x0, 813 .min_yc = 0x0,
814 .max_yc = 0x0500, 814 .max_yc = 0x7fff,
815 .rept_size = 7, 815 .rept_size = 7,
816 .read_data = general_touch_read_data, 816 .read_data = general_touch_read_data,
817 }, 817 },
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 54abf9e303b7..f1c8cae70b4b 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -172,11 +172,15 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
172{ 172{
173 int r = 0; 173 int r = 0;
174 size_t dummy = 0; 174 size_t dummy = 0;
175 int overhead_size = 175 int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
176 sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg);
177 struct dm_ulog_request *tfr = prealloced_ulog_tfr; 176 struct dm_ulog_request *tfr = prealloced_ulog_tfr;
178 struct receiving_pkg pkg; 177 struct receiving_pkg pkg;
179 178
179 /*
180 * Given the space needed to hold the 'struct cn_msg' and
181 * 'struct dm_ulog_request' - do we have enough payload
182 * space remaining?
183 */
180 if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) { 184 if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
181 DMINFO("Size of tfr exceeds preallocated size"); 185 DMINFO("Size of tfr exceeds preallocated size");
182 return -EINVAL; 186 return -EINVAL;
@@ -191,7 +195,7 @@ resend:
191 */ 195 */
192 mutex_lock(&dm_ulog_lock); 196 mutex_lock(&dm_ulog_lock);
193 197
194 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); 198 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
195 memcpy(tfr->uuid, uuid, DM_UUID_LEN); 199 memcpy(tfr->uuid, uuid, DM_UUID_LEN);
196 tfr->luid = luid; 200 tfr->luid = luid;
197 tfr->seq = dm_ulog_seq++; 201 tfr->seq = dm_ulog_seq++;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ad779bd13aec..6c1046df81f6 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -724,7 +724,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
724 /* 724 /*
725 * Dispatch io. 725 * Dispatch io.
726 */ 726 */
727 if (unlikely(ms->log_failure)) { 727 if (unlikely(ms->log_failure) && errors_handled(ms)) {
728 spin_lock_irq(&ms->lock); 728 spin_lock_irq(&ms->lock);
729 bio_list_merge(&ms->failures, &sync); 729 bio_list_merge(&ms->failures, &sync);
730 spin_unlock_irq(&ms->lock); 730 spin_unlock_irq(&ms->lock);
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 5f19ceb6fe91..168bd38f5006 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -660,10 +660,9 @@ void dm_rh_recovery_end(struct dm_region *reg, int success)
660 spin_lock_irq(&rh->region_lock); 660 spin_lock_irq(&rh->region_lock);
661 if (success) 661 if (success)
662 list_add(&reg->list, &reg->rh->recovered_regions); 662 list_add(&reg->list, &reg->rh->recovered_regions);
663 else { 663 else
664 reg->state = DM_RH_NOSYNC;
665 list_add(&reg->list, &reg->rh->failed_recovered_regions); 664 list_add(&reg->list, &reg->rh->failed_recovered_regions);
666 } 665
667 spin_unlock_irq(&rh->region_lock); 666 spin_unlock_irq(&rh->region_lock);
668 667
669 rh->wakeup_workers(rh->context); 668 rh->wakeup_workers(rh->context);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 7d08879689ac..c097d8a4823d 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
254 * Issue the synchronous I/O from a different thread 254 * Issue the synchronous I/O from a different thread
255 * to avoid generic_make_request recursion. 255 * to avoid generic_make_request recursion.
256 */ 256 */
257 INIT_WORK(&req.work, do_metadata); 257 INIT_WORK_ON_STACK(&req.work, do_metadata);
258 queue_work(ps->metadata_wq, &req.work); 258 queue_work(ps->metadata_wq, &req.work);
259 flush_workqueue(ps->metadata_wq); 259 flush_workqueue(ps->metadata_wq);
260 260
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index e0efc1adcaff..bd58703ee8f6 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
110 } 110 }
111 111
112 stripes = simple_strtoul(argv[0], &end, 10); 112 stripes = simple_strtoul(argv[0], &end, 10);
113 if (*end) { 113 if (!stripes || *end) {
114 ti->error = "Invalid stripe count"; 114 ti->error = "Invalid stripe count";
115 return -EINVAL; 115 return -EINVAL;
116 } 116 }
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index f53392df7b97..f91b40942e07 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -80,20 +80,12 @@ static struct sysfs_ops dm_sysfs_ops = {
80}; 80};
81 81
82/* 82/*
83 * The sysfs structure is embedded in md struct, nothing to do here
84 */
85static void dm_sysfs_release(struct kobject *kobj)
86{
87}
88
89/*
90 * dm kobject is embedded in mapped_device structure 83 * dm kobject is embedded in mapped_device structure
91 * no need to define release function here 84 * no need to define release function here
92 */ 85 */
93static struct kobj_type dm_ktype = { 86static struct kobj_type dm_ktype = {
94 .sysfs_ops = &dm_sysfs_ops, 87 .sysfs_ops = &dm_sysfs_ops,
95 .default_attrs = dm_attrs, 88 .default_attrs = dm_attrs,
96 .release = dm_sysfs_release
97}; 89};
98 90
99/* 91/*
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3167480b532c..aa4e2aa86d49 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1595,10 +1595,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
1595 return BLKPREP_OK; 1595 return BLKPREP_OK;
1596} 1596}
1597 1597
1598static void map_request(struct dm_target *ti, struct request *clone, 1598/*
1599 struct mapped_device *md) 1599 * Returns:
1600 * 0 : the request has been processed (not requeued)
1601 * !0 : the request has been requeued
1602 */
1603static int map_request(struct dm_target *ti, struct request *clone,
1604 struct mapped_device *md)
1600{ 1605{
1601 int r; 1606 int r, requeued = 0;
1602 struct dm_rq_target_io *tio = clone->end_io_data; 1607 struct dm_rq_target_io *tio = clone->end_io_data;
1603 1608
1604 /* 1609 /*
@@ -1625,6 +1630,7 @@ static void map_request(struct dm_target *ti, struct request *clone,
1625 case DM_MAPIO_REQUEUE: 1630 case DM_MAPIO_REQUEUE:
1626 /* The target wants to requeue the I/O */ 1631 /* The target wants to requeue the I/O */
1627 dm_requeue_unmapped_request(clone); 1632 dm_requeue_unmapped_request(clone);
1633 requeued = 1;
1628 break; 1634 break;
1629 default: 1635 default:
1630 if (r > 0) { 1636 if (r > 0) {
@@ -1636,6 +1642,8 @@ static void map_request(struct dm_target *ti, struct request *clone,
1636 dm_kill_unmapped_request(clone, r); 1642 dm_kill_unmapped_request(clone, r);
1637 break; 1643 break;
1638 } 1644 }
1645
1646 return requeued;
1639} 1647}
1640 1648
1641/* 1649/*
@@ -1677,12 +1685,17 @@ static void dm_request_fn(struct request_queue *q)
1677 atomic_inc(&md->pending[rq_data_dir(clone)]); 1685 atomic_inc(&md->pending[rq_data_dir(clone)]);
1678 1686
1679 spin_unlock(q->queue_lock); 1687 spin_unlock(q->queue_lock);
1680 map_request(ti, clone, md); 1688 if (map_request(ti, clone, md))
1689 goto requeued;
1690
1681 spin_lock_irq(q->queue_lock); 1691 spin_lock_irq(q->queue_lock);
1682 } 1692 }
1683 1693
1684 goto out; 1694 goto out;
1685 1695
1696requeued:
1697 spin_lock_irq(q->queue_lock);
1698
1686plug_and_out: 1699plug_and_out:
1687 if (!elv_queue_empty(q)) 1700 if (!elv_queue_empty(q))
1688 /* Some requests still remain, retry later */ 1701 /* Some requests still remain, retry later */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index dd3dfe42d5a9..a20a71e5efd3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4075,8 +4075,10 @@ static void mddev_delayed_delete(struct work_struct *ws)
4075{ 4075{
4076 mddev_t *mddev = container_of(ws, mddev_t, del_work); 4076 mddev_t *mddev = container_of(ws, mddev_t, del_work);
4077 4077
4078 if (mddev->private == &md_redundancy_group) { 4078 if (mddev->private) {
4079 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 4079 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
4080 if (mddev->private != (void*)1)
4081 sysfs_remove_group(&mddev->kobj, mddev->private);
4080 if (mddev->sysfs_action) 4082 if (mddev->sysfs_action)
4081 sysfs_put(mddev->sysfs_action); 4083 sysfs_put(mddev->sysfs_action);
4082 mddev->sysfs_action = NULL; 4084 mddev->sysfs_action = NULL;
@@ -4287,10 +4289,7 @@ static int do_md_run(mddev_t * mddev)
4287 sysfs_notify_dirent(rdev->sysfs_state); 4289 sysfs_notify_dirent(rdev->sysfs_state);
4288 } 4290 }
4289 4291
4290 md_probe(mddev->unit, NULL, NULL);
4291 disk = mddev->gendisk; 4292 disk = mddev->gendisk;
4292 if (!disk)
4293 return -ENOMEM;
4294 4293
4295 spin_lock(&pers_lock); 4294 spin_lock(&pers_lock);
4296 pers = find_pers(mddev->level, mddev->clevel); 4295 pers = find_pers(mddev->level, mddev->clevel);
@@ -4530,8 +4529,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4530 mddev->queue->unplug_fn = NULL; 4529 mddev->queue->unplug_fn = NULL;
4531 mddev->queue->backing_dev_info.congested_fn = NULL; 4530 mddev->queue->backing_dev_info.congested_fn = NULL;
4532 module_put(mddev->pers->owner); 4531 module_put(mddev->pers->owner);
4533 if (mddev->pers->sync_request) 4532 if (mddev->pers->sync_request && mddev->private == NULL)
4534 mddev->private = &md_redundancy_group; 4533 mddev->private = (void*)1;
4535 mddev->pers = NULL; 4534 mddev->pers = NULL;
4536 /* tell userspace to handle 'inactive' */ 4535 /* tell userspace to handle 'inactive' */
4537 sysfs_notify_dirent(mddev->sysfs_state); 4536 sysfs_notify_dirent(mddev->sysfs_state);
@@ -4578,9 +4577,6 @@ out:
4578 } 4577 }
4579 mddev->bitmap_info.offset = 0; 4578 mddev->bitmap_info.offset = 0;
4580 4579
4581 /* make sure all md_delayed_delete calls have finished */
4582 flush_scheduled_work();
4583
4584 export_array(mddev); 4580 export_array(mddev);
4585 4581
4586 mddev->array_sectors = 0; 4582 mddev->array_sectors = 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e84204eb12df..ceb24afdc147 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5136,9 +5136,8 @@ static int stop(mddev_t *mddev)
5136 mddev->thread = NULL; 5136 mddev->thread = NULL;
5137 mddev->queue->backing_dev_info.congested_fn = NULL; 5137 mddev->queue->backing_dev_info.congested_fn = NULL;
5138 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 5138 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5139 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
5140 free_conf(conf); 5139 free_conf(conf);
5141 mddev->private = NULL; 5140 mddev->private = &raid5_attrs_group;
5142 return 0; 5141 return 0;
5143} 5142}
5144 5143
@@ -5464,11 +5463,11 @@ static int raid5_start_reshape(mddev_t *mddev)
5464 !test_bit(Faulty, &rdev->flags)) { 5463 !test_bit(Faulty, &rdev->flags)) {
5465 if (raid5_add_disk(mddev, rdev) == 0) { 5464 if (raid5_add_disk(mddev, rdev) == 0) {
5466 char nm[20]; 5465 char nm[20];
5467 if (rdev->raid_disk >= conf->previous_raid_disks) 5466 if (rdev->raid_disk >= conf->previous_raid_disks) {
5468 set_bit(In_sync, &rdev->flags); 5467 set_bit(In_sync, &rdev->flags);
5469 else 5468 added_devices++;
5469 } else
5470 rdev->recovery_offset = 0; 5470 rdev->recovery_offset = 0;
5471 added_devices++;
5472 sprintf(nm, "rd%d", rdev->raid_disk); 5471 sprintf(nm, "rd%d", rdev->raid_disk);
5473 if (sysfs_create_link(&mddev->kobj, 5472 if (sysfs_create_link(&mddev->kobj,
5474 &rdev->kobj, nm)) 5473 &rdev->kobj, nm))
@@ -5480,9 +5479,12 @@ static int raid5_start_reshape(mddev_t *mddev)
5480 break; 5479 break;
5481 } 5480 }
5482 5481
5482 /* When a reshape changes the number of devices, ->degraded
5483 * is measured against the large of the pre and post number of
5484 * devices.*/
5483 if (mddev->delta_disks > 0) { 5485 if (mddev->delta_disks > 0) {
5484 spin_lock_irqsave(&conf->device_lock, flags); 5486 spin_lock_irqsave(&conf->device_lock, flags);
5485 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) 5487 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
5486 - added_devices; 5488 - added_devices;
5487 spin_unlock_irqrestore(&conf->device_lock, flags); 5489 spin_unlock_irqrestore(&conf->device_lock, flags);
5488 } 5490 }
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index c37790ad92d0..9ddc57909d49 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -761,7 +761,6 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
761 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); 761 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
762 dmxdevfilter->type = DMXDEV_TYPE_NONE; 762 dmxdevfilter->type = DMXDEV_TYPE_NONE;
763 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); 763 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
764 INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
765 init_timer(&dmxdevfilter->timer); 764 init_timer(&dmxdevfilter->timer);
766 765
767 dvbdev->users++; 766 dvbdev->users++;
@@ -887,6 +886,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
887 dmxdevfilter->type = DMXDEV_TYPE_PES; 886 dmxdevfilter->type = DMXDEV_TYPE_PES;
888 memcpy(&dmxdevfilter->params, params, 887 memcpy(&dmxdevfilter->params, params,
889 sizeof(struct dmx_pes_filter_params)); 888 sizeof(struct dmx_pes_filter_params));
889 INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
890 890
891 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); 891 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
892 892
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index b78cfb7d1897..67f189b7aa1f 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -426,16 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
426 }; 426 };
427 }; 427 };
428 428
429 if (dvb_demux_tscheck) { 429 if (demux->cnt_storage) {
430 if (!demux->cnt_storage)
431 demux->cnt_storage = vmalloc(MAX_PID + 1);
432
433 if (!demux->cnt_storage) {
434 printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
435 dvb_demux_tscheck = 0;
436 goto no_dvb_demux_tscheck;
437 }
438
439 /* check pkt counter */ 430 /* check pkt counter */
440 if (pid < MAX_PID) { 431 if (pid < MAX_PID) {
441 if (buf[1] & 0x80) 432 if (buf[1] & 0x80)
@@ -454,7 +445,6 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
454 }; 445 };
455 /* end check */ 446 /* end check */
456 }; 447 };
457no_dvb_demux_tscheck:
458 448
459 list_for_each_entry(feed, &demux->feed_list, list_head) { 449 list_for_each_entry(feed, &demux->feed_list, list_head) {
460 if ((feed->pid != pid) && (feed->pid != 0x2000)) 450 if ((feed->pid != pid) && (feed->pid != 0x2000))
@@ -1246,6 +1236,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
1246 dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed)); 1236 dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed));
1247 if (!dvbdemux->feed) { 1237 if (!dvbdemux->feed) {
1248 vfree(dvbdemux->filter); 1238 vfree(dvbdemux->filter);
1239 dvbdemux->filter = NULL;
1249 return -ENOMEM; 1240 return -ENOMEM;
1250 } 1241 }
1251 for (i = 0; i < dvbdemux->filternum; i++) { 1242 for (i = 0; i < dvbdemux->filternum; i++) {
@@ -1257,6 +1248,13 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
1257 dvbdemux->feed[i].index = i; 1248 dvbdemux->feed[i].index = i;
1258 } 1249 }
1259 1250
1251 if (dvb_demux_tscheck) {
1252 dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
1253
1254 if (!dvbdemux->cnt_storage)
1255 printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
1256 }
1257
1260 INIT_LIST_HEAD(&dvbdemux->frontend_list); 1258 INIT_LIST_HEAD(&dvbdemux->frontend_list);
1261 1259
1262 for (i = 0; i < DMX_TS_PES_OTHER; i++) { 1260 for (i = 0; i < DMX_TS_PES_OTHER; i++) {
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 1b249897c9fb..465295b1d14b 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -112,11 +112,13 @@ config DVB_USB_CXUSB
112 select DVB_MT352 if !DVB_FE_CUSTOMISE 112 select DVB_MT352 if !DVB_FE_CUSTOMISE
113 select DVB_ZL10353 if !DVB_FE_CUSTOMISE 113 select DVB_ZL10353 if !DVB_FE_CUSTOMISE
114 select DVB_DIB7000P if !DVB_FE_CUSTOMISE 114 select DVB_DIB7000P if !DVB_FE_CUSTOMISE
115 select DVB_LGS8GL5 if !DVB_FE_CUSTOMISE
116 select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE 115 select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE
116 select DVB_ATBM8830 if !DVB_FE_CUSTOMISE
117 select DVB_LGS8GXX if !DVB_FE_CUSTOMISE
117 select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE 118 select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE
118 select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE 119 select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
119 select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE 120 select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE
121 select MEDIA_TUNER_MAX2165 if !MEDIA_TUNER_CUSTOMISE
120 help 122 help
121 Say Y here to support the Conexant USB2.0 hybrid reference design. 123 Say Y here to support the Conexant USB2.0 hybrid reference design.
122 Currently, only DVB and ATSC modes are supported, analog mode 124 Currently, only DVB and ATSC modes are supported, analog mode
diff --git a/drivers/media/dvb/frontends/l64781.c b/drivers/media/dvb/frontends/l64781.c
index 3051b64aa17c..445fa1068064 100644
--- a/drivers/media/dvb/frontends/l64781.c
+++ b/drivers/media/dvb/frontends/l64781.c
@@ -192,8 +192,8 @@ static int apply_frontend_param (struct dvb_frontend* fe, struct dvb_frontend_pa
192 spi_bias *= qam_tab[p->constellation]; 192 spi_bias *= qam_tab[p->constellation];
193 spi_bias /= p->code_rate_HP + 1; 193 spi_bias /= p->code_rate_HP + 1;
194 spi_bias /= (guard_tab[p->guard_interval] + 32); 194 spi_bias /= (guard_tab[p->guard_interval] + 32);
195 spi_bias *= 1000ULL; 195 spi_bias *= 1000;
196 spi_bias /= 1000ULL + ppm/1000; 196 spi_bias /= 1000 + ppm/1000;
197 spi_bias *= p->code_rate_HP; 197 spi_bias *= p->code_rate_HP;
198 198
199 val0x04 = (p->transmission_mode << 2) | p->guard_interval; 199 val0x04 = (p->transmission_mode << 2) | p->guard_interval;
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 3182a406bdd1..ae08b077fd04 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -4461,6 +4461,7 @@ static int __devinit bttv_probe(struct pci_dev *dev,
4461 request_modules(btv); 4461 request_modules(btv);
4462 } 4462 }
4463 4463
4464 init_bttv_i2c_ir(btv);
4464 bttv_input_init(btv); 4465 bttv_input_init(btv);
4465 4466
4466 /* everything is fine */ 4467 /* everything is fine */
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index 63aa31a041e8..407fa61e4cda 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -388,7 +388,12 @@ int __devinit init_bttv_i2c(struct bttv *btv)
388 if (0 == btv->i2c_rc && i2c_scan) 388 if (0 == btv->i2c_rc && i2c_scan)
389 do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client); 389 do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client);
390 390
391 /* Instantiate the IR receiver device, if present */ 391 return btv->i2c_rc;
392}
393
394/* Instantiate the I2C IR receiver device, if present */
395void __devinit init_bttv_i2c_ir(struct bttv *btv)
396{
392 if (0 == btv->i2c_rc) { 397 if (0 == btv->i2c_rc) {
393 struct i2c_board_info info; 398 struct i2c_board_info info;
394 /* The external IR receiver is at i2c address 0x34 (0x35 for 399 /* The external IR receiver is at i2c address 0x34 (0x35 for
@@ -408,7 +413,6 @@ int __devinit init_bttv_i2c(struct bttv *btv)
408 strlcpy(info.type, "ir_video", I2C_NAME_SIZE); 413 strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
409 i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list); 414 i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list);
410 } 415 }
411 return btv->i2c_rc;
412} 416}
413 417
414int __devexit fini_bttv_i2c(struct bttv *btv) 418int __devexit fini_bttv_i2c(struct bttv *btv)
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index a1d0e9c9f286..6cccc2a17eee 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -279,6 +279,7 @@ extern unsigned int bttv_debug;
279extern unsigned int bttv_gpio; 279extern unsigned int bttv_gpio;
280extern void bttv_gpio_tracking(struct bttv *btv, char *comment); 280extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
281extern int init_bttv_i2c(struct bttv *btv); 281extern int init_bttv_i2c(struct bttv *btv);
282extern void init_bttv_i2c_ir(struct bttv *btv);
282extern int fini_bttv_i2c(struct bttv *btv); 283extern int fini_bttv_i2c(struct bttv *btv);
283 284
284#define bttv_printk if (bttv_verbose) printk 285#define bttv_printk if (bttv_verbose) printk
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index fc4dd6045720..7438f8d775ba 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -514,7 +514,7 @@ static int mt9t112_init_pll(const struct i2c_client *client)
514 /* poll to verify out of standby. Must Poll this bit */ 514 /* poll to verify out of standby. Must Poll this bit */
515 for (i = 0; i < 100; i++) { 515 for (i = 0; i < 100; i++) {
516 mt9t112_reg_read(data, client, 0x0018); 516 mt9t112_reg_read(data, client, 0x0018);
517 if (0x4000 & data) 517 if (!(0x4000 & data))
518 break; 518 break;
519 519
520 mdelay(10); 520 mdelay(10);
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 50b415e07eda..f7f7e04cf485 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -753,7 +753,7 @@ int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value)
753 buf[0] = 0xff; /* fixed */ 753 buf[0] = 0xff; /* fixed */
754 754
755 ret = send_control_msg(pdev, 755 ret = send_control_msg(pdev,
756 SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, sizeof(buf)); 756 SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, 1);
757 757
758 if (!mode && ret >= 0) { 758 if (!mode && ret >= 0) {
759 if (value < 0) 759 if (value < 0)
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 57752751712b..81279b3d694c 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1796,7 +1796,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1796 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: " 1796 dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: "
1797 "Command not in the active list! (sc=%p)\n", ioc->name, 1797 "Command not in the active list! (sc=%p)\n", ioc->name,
1798 SCpnt)); 1798 SCpnt));
1799 retval = 0; 1799 retval = SUCCESS;
1800 goto out; 1800 goto out;
1801 } 1801 }
1802 1802
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index b9f1e84897cc..e7f8027165e6 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -74,6 +74,9 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
74 } 74 }
75 75
76 mrq->cmd->arg = dev_addr; 76 mrq->cmd->arg = dev_addr;
77 if (!mmc_card_blockaddr(test->card))
78 mrq->cmd->arg <<= 9;
79
77 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 80 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
78 81
79 if (blocks == 1) 82 if (blocks == 1)
@@ -190,7 +193,7 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write)
190 } 193 }
191 194
192 for (i = 0;i < BUFFER_SIZE / 512;i++) { 195 for (i = 0;i < BUFFER_SIZE / 512;i++) {
193 ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); 196 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
194 if (ret) 197 if (ret)
195 return ret; 198 return ret;
196 } 199 }
@@ -219,7 +222,7 @@ static int mmc_test_cleanup(struct mmc_test_card *test)
219 memset(test->buffer, 0, 512); 222 memset(test->buffer, 0, 512);
220 223
221 for (i = 0;i < BUFFER_SIZE / 512;i++) { 224 for (i = 0;i < BUFFER_SIZE / 512;i++) {
222 ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); 225 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
223 if (ret) 226 if (ret)
224 return ret; 227 return ret;
225 } 228 }
@@ -426,7 +429,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
426 for (i = 0;i < sectors;i++) { 429 for (i = 0;i < sectors;i++) {
427 ret = mmc_test_buffer_transfer(test, 430 ret = mmc_test_buffer_transfer(test,
428 test->buffer + i * 512, 431 test->buffer + i * 512,
429 dev_addr + i * 512, 512, 0); 432 dev_addr + i, 512, 0);
430 if (ret) 433 if (ret)
431 return ret; 434 return ret;
432 } 435 }
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 62d9c9cc5671..1dd4403247ca 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -921,7 +921,7 @@ static int ax_probe(struct platform_device *pdev)
921 size = (res->end - res->start) + 1; 921 size = (res->end - res->start) + 1;
922 922
923 ax->mem2 = request_mem_region(res->start, size, pdev->name); 923 ax->mem2 = request_mem_region(res->start, size, pdev->name);
924 if (ax->mem == NULL) { 924 if (ax->mem2 == NULL) {
925 dev_err(&pdev->dev, "cannot reserve registers\n"); 925 dev_err(&pdev->dev, "cannot reserve registers\n");
926 ret = -ENXIO; 926 ret = -ENXIO;
927 goto exit_mem1; 927 goto exit_mem1;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index fee6eee7ae5b..006cb2efcd22 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -296,6 +296,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
296 req_hdr->opcode = opcode; 296 req_hdr->opcode = opcode;
297 req_hdr->subsystem = subsystem; 297 req_hdr->subsystem = subsystem;
298 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 298 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
299 req_hdr->version = 0;
299} 300}
300 301
301static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 302static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index bdbd14727e4b..318a018ca7c5 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2079,6 +2079,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2079 struct sge_fl *fl, int len, int complete) 2079 struct sge_fl *fl, int len, int complete)
2080{ 2080{
2081 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 2081 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2082 struct port_info *pi = netdev_priv(qs->netdev);
2082 struct sk_buff *skb = NULL; 2083 struct sk_buff *skb = NULL;
2083 struct cpl_rx_pkt *cpl; 2084 struct cpl_rx_pkt *cpl;
2084 struct skb_frag_struct *rx_frag; 2085 struct skb_frag_struct *rx_frag;
@@ -2116,11 +2117,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2116 2117
2117 if (!nr_frags) { 2118 if (!nr_frags) {
2118 offset = 2 + sizeof(struct cpl_rx_pkt); 2119 offset = 2 + sizeof(struct cpl_rx_pkt);
2119 qs->lro_va = sd->pg_chunk.va + 2; 2120 cpl = qs->lro_va = sd->pg_chunk.va + 2;
2120 }
2121 len -= offset;
2122 2121
2123 prefetch(qs->lro_va); 2122 if ((pi->rx_offload & T3_RX_CSUM) &&
2123 cpl->csum_valid && cpl->csum == htons(0xffff)) {
2124 skb->ip_summed = CHECKSUM_UNNECESSARY;
2125 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2126 } else
2127 skb->ip_summed = CHECKSUM_NONE;
2128 } else
2129 cpl = qs->lro_va;
2130
2131 len -= offset;
2124 2132
2125 rx_frag += nr_frags; 2133 rx_frag += nr_frags;
2126 rx_frag->page = sd->pg_chunk.page; 2134 rx_frag->page = sd->pg_chunk.page;
@@ -2136,12 +2144,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2136 return; 2144 return;
2137 2145
2138 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); 2146 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
2139 skb->ip_summed = CHECKSUM_UNNECESSARY;
2140 cpl = qs->lro_va;
2141 2147
2142 if (unlikely(cpl->vlan_valid)) { 2148 if (unlikely(cpl->vlan_valid)) {
2143 struct net_device *dev = qs->netdev;
2144 struct port_info *pi = netdev_priv(dev);
2145 struct vlan_group *grp = pi->vlan_grp; 2149 struct vlan_group *grp = pi->vlan_grp;
2146 2150
2147 if (likely(grp != NULL)) { 2151 if (likely(grp != NULL)) {
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index d29bb532eccf..765543663a4f 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -4006,11 +4006,21 @@ check_page:
4006 } 4006 }
4007 } 4007 }
4008 4008
4009 if (!buffer_info->dma) 4009 if (!buffer_info->dma) {
4010 buffer_info->dma = pci_map_page(pdev, 4010 buffer_info->dma = pci_map_page(pdev,
4011 buffer_info->page, 0, 4011 buffer_info->page, 0,
4012 buffer_info->length, 4012 buffer_info->length,
4013 PCI_DMA_FROMDEVICE); 4013 PCI_DMA_FROMDEVICE);
4014 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
4015 put_page(buffer_info->page);
4016 dev_kfree_skb(skb);
4017 buffer_info->page = NULL;
4018 buffer_info->skb = NULL;
4019 buffer_info->dma = 0;
4020 adapter->alloc_rx_buff_failed++;
4021 break; /* while !buffer_info->skb */
4022 }
4023 }
4014 4024
4015 rx_desc = E1000_RX_DESC(*rx_ring, i); 4025 rx_desc = E1000_RX_DESC(*rx_ring, i);
4016 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4026 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -4101,6 +4111,13 @@ map_skb:
4101 skb->data, 4111 skb->data,
4102 buffer_info->length, 4112 buffer_info->length,
4103 PCI_DMA_FROMDEVICE); 4113 PCI_DMA_FROMDEVICE);
4114 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
4115 dev_kfree_skb(skb);
4116 buffer_info->skb = NULL;
4117 buffer_info->dma = 0;
4118 adapter->alloc_rx_buff_failed++;
4119 break; /* while !buffer_info->skb */
4120 }
4104 4121
4105 /* 4122 /*
4106 * XXX if it was allocated cleanly it will never map to a 4123 * XXX if it was allocated cleanly it will never map to a
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 997124d2992a..c881347cb26d 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -421,6 +421,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
421 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 421 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
422 if (tx_queue > IGB_N0_QUEUE) 422 if (tx_queue > IGB_N0_QUEUE)
423 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; 423 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
424 if (!adapter->msix_entries && msix_vector == 0)
425 msixbm |= E1000_EIMS_OTHER;
424 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 426 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
425 q_vector->eims_value = msixbm; 427 q_vector->eims_value = msixbm;
426 break; 428 break;
@@ -877,7 +879,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
877{ 879{
878 struct net_device *netdev = adapter->netdev; 880 struct net_device *netdev = adapter->netdev;
879 struct pci_dev *pdev = adapter->pdev; 881 struct pci_dev *pdev = adapter->pdev;
880 struct e1000_hw *hw = &adapter->hw;
881 int err = 0; 882 int err = 0;
882 883
883 if (adapter->msix_entries) { 884 if (adapter->msix_entries) {
@@ -909,20 +910,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
909 igb_setup_all_tx_resources(adapter); 910 igb_setup_all_tx_resources(adapter);
910 igb_setup_all_rx_resources(adapter); 911 igb_setup_all_rx_resources(adapter);
911 } else { 912 } else {
912 switch (hw->mac.type) { 913 igb_assign_vector(adapter->q_vector[0], 0);
913 case e1000_82575:
914 wr32(E1000_MSIXBM(0),
915 (E1000_EICR_RX_QUEUE0 |
916 E1000_EICR_TX_QUEUE0 |
917 E1000_EIMS_OTHER));
918 break;
919 case e1000_82580:
920 case e1000_82576:
921 wr32(E1000_IVAR0, E1000_IVAR_VALID);
922 break;
923 default:
924 break;
925 }
926 } 914 }
927 915
928 if (adapter->flags & IGB_FLAG_HAS_MSI) { 916 if (adapter->flags & IGB_FLAG_HAS_MSI) {
@@ -1140,6 +1128,8 @@ int igb_up(struct igb_adapter *adapter)
1140 } 1128 }
1141 if (adapter->msix_entries) 1129 if (adapter->msix_entries)
1142 igb_configure_msix(adapter); 1130 igb_configure_msix(adapter);
1131 else
1132 igb_assign_vector(adapter->q_vector[0], 0);
1143 1133
1144 /* Clear any pending interrupts. */ 1134 /* Clear any pending interrupts. */
1145 rd32(E1000_ICR); 1135 rd32(E1000_ICR);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 3103f4165311..35a06b47587b 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -357,12 +357,34 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
357 u32 fctrl_reg; 357 u32 fctrl_reg;
358 u32 rmcs_reg; 358 u32 rmcs_reg;
359 u32 reg; 359 u32 reg;
360 u32 link_speed = 0;
361 bool link_up;
360 362
361#ifdef CONFIG_DCB 363#ifdef CONFIG_DCB
362 if (hw->fc.requested_mode == ixgbe_fc_pfc) 364 if (hw->fc.requested_mode == ixgbe_fc_pfc)
363 goto out; 365 goto out;
364 366
365#endif /* CONFIG_DCB */ 367#endif /* CONFIG_DCB */
368 /*
369 * On 82598 having Rx FC on causes resets while doing 1G
370 * so if it's on turn it off once we know link_speed. For
371 * more details see 82598 Specification update.
372 */
373 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
374 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
375 switch (hw->fc.requested_mode) {
376 case ixgbe_fc_full:
377 hw->fc.requested_mode = ixgbe_fc_tx_pause;
378 break;
379 case ixgbe_fc_rx_pause:
380 hw->fc.requested_mode = ixgbe_fc_none;
381 break;
382 default:
383 /* no change */
384 break;
385 }
386 }
387
366 /* Negotiate the fc mode to use */ 388 /* Negotiate the fc mode to use */
367 ret_val = ixgbe_fc_autoneg(hw); 389 ret_val = ixgbe_fc_autoneg(hw);
368 if (ret_val) 390 if (ret_val)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index b5f64ad67975..951b73cf5ca2 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -5179,7 +5179,7 @@ dma_error:
5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
5180 } 5180 }
5181 5181
5182 return count; 5182 return 0;
5183} 5183}
5184 5184
5185static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 5185static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5329,8 +5329,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
5329 struct ixgbe_adapter *adapter = netdev_priv(dev); 5329 struct ixgbe_adapter *adapter = netdev_priv(dev);
5330 int txq = smp_processor_id(); 5330 int txq = smp_processor_id();
5331 5331
5332 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) 5332 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5333 while (unlikely(txq >= dev->real_num_tx_queues))
5334 txq -= dev->real_num_tx_queues;
5333 return txq; 5335 return txq;
5336 }
5334 5337
5335#ifdef IXGBE_FCOE 5338#ifdef IXGBE_FCOE
5336 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 5339 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
@@ -5760,6 +5763,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
5760 if (err) 5763 if (err)
5761 goto err_sw_init; 5764 goto err_sw_init;
5762 5765
5766 /* Make it possible the adapter to be woken up via WOL */
5767 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
5768 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5769
5763 /* 5770 /*
5764 * If there is a fan on this device and it has failed log the 5771 * If there is a fan on this device and it has failed log the
5765 * failure. 5772 * failure.
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 9f9d6081959b..24279e6e55f5 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1941,7 +1941,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
1941 netif_wake_queue(adapter->netdev); 1941 netif_wake_queue(adapter->netdev);
1942 1942
1943 clear_bit(__NX_RESETTING, &adapter->state); 1943 clear_bit(__NX_RESETTING, &adapter->state);
1944 1944 return;
1945 } else { 1945 } else {
1946 clear_bit(__NX_RESETTING, &adapter->state); 1946 clear_bit(__NX_RESETTING, &adapter->state);
1947 if (!netxen_nic_reset_context(adapter)) { 1947 if (!netxen_nic_reset_context(adapter)) {
@@ -2240,7 +2240,9 @@ netxen_detach_work(struct work_struct *work)
2240 2240
2241 netxen_nic_down(adapter, netdev); 2241 netxen_nic_down(adapter, netdev);
2242 2242
2243 rtnl_lock();
2243 netxen_nic_detach(adapter); 2244 netxen_nic_detach(adapter);
2245 rtnl_unlock();
2244 2246
2245 status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); 2247 status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
2246 2248
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 103e8b0e2a0d..46997e177ee3 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -2284,6 +2284,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2284 fail2: 2284 fail2:
2285 efx_fini_struct(efx); 2285 efx_fini_struct(efx);
2286 fail1: 2286 fail1:
2287 WARN_ON(rc > 0);
2287 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); 2288 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2288 free_netdev(net_dev); 2289 free_netdev(net_dev);
2289 return rc; 2290 return rc;
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index bf0b96af5334..5712fddd72f2 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -29,6 +29,15 @@
29#define FALCON_BOARD_SFN4111T 0x51 29#define FALCON_BOARD_SFN4111T 0x51
30#define FALCON_BOARD_SFN4112F 0x52 30#define FALCON_BOARD_SFN4112F 0x52
31 31
32/* Board temperature is about 15°C above ambient when air flow is
33 * limited. */
34#define FALCON_BOARD_TEMP_BIAS 15
35
36/* SFC4000 datasheet says: 'The maximum permitted junction temperature
37 * is 125°C; the thermal design of the environment for the SFC4000
38 * should aim to keep this well below 100°C.' */
39#define FALCON_JUNC_TEMP_MAX 90
40
32/***************************************************************************** 41/*****************************************************************************
33 * Support for LM87 sensor chip used on several boards 42 * Support for LM87 sensor chip used on several boards
34 */ 43 */
@@ -548,16 +557,16 @@ fail_hwmon:
548static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */ 557static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
549 558
550static const u8 sfe4002_lm87_regs[] = { 559static const u8 sfe4002_lm87_regs[] = {
551 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */ 560 LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
552 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */ 561 LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
553 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */ 562 LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
554 LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */ 563 LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */
555 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */ 564 LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
556 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */ 565 LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
557 LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */ 566 LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */
558 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */ 567 LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
559 LM87_TEMP_INT_LIMITS(10, 60), /* board */ 568 LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS),
560 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */ 569 LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
561 0 570 0
562}; 571};
563 572
@@ -619,14 +628,14 @@ static int sfe4002_init(struct efx_nic *efx)
619static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */ 628static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
620 629
621static const u8 sfn4112f_lm87_regs[] = { 630static const u8 sfn4112f_lm87_regs[] = {
622 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */ 631 LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
623 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */ 632 LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
624 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */ 633 LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
625 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */ 634 LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
626 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */ 635 LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
627 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */ 636 LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
628 LM87_TEMP_INT_LIMITS(10, 60), /* board */ 637 LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS),
629 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */ 638 LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
630 0 639 0
631}; 640};
632 641
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 9f035b9f0350..f66b3da6ddff 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -127,7 +127,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
127 efx_dword_t reg; 127 efx_dword_t reg;
128 128
129 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ 129 /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
130 rc = efx_mcdi_poll_reboot(efx); 130 rc = -efx_mcdi_poll_reboot(efx);
131 if (rc) 131 if (rc)
132 goto out; 132 goto out;
133 133
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index e0d13a451019..67eec7a6e487 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -320,7 +320,7 @@ static int qt202x_reset_phy(struct efx_nic *efx)
320 320
321 falcon_board(efx)->type->init_phy(efx); 321 falcon_board(efx)->type->init_phy(efx);
322 322
323 return rc; 323 return 0;
324 324
325 fail: 325 fail:
326 EFX_ERR(efx, "PHY reset timed out\n"); 326 EFX_ERR(efx, "PHY reset timed out\n");
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index d760650c5c04..67249c3c9f50 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1025,11 +1025,8 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
1025static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) 1025static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
1026{ 1026{
1027 struct sky2_tx_le *le = sky2->tx_le + *slot; 1027 struct sky2_tx_le *le = sky2->tx_le + *slot;
1028 struct tx_ring_info *re = sky2->tx_ring + *slot;
1029 1028
1030 *slot = RING_NEXT(*slot, sky2->tx_ring_size); 1029 *slot = RING_NEXT(*slot, sky2->tx_ring_size);
1031 re->flags = 0;
1032 re->skb = NULL;
1033 le->ctrl = 0; 1030 le->ctrl = 0;
1034 return le; 1031 return le;
1035} 1032}
@@ -1622,8 +1619,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
1622 return count; 1619 return count;
1623} 1620}
1624 1621
1625static void sky2_tx_unmap(struct pci_dev *pdev, 1622static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
1626 const struct tx_ring_info *re)
1627{ 1623{
1628 if (re->flags & TX_MAP_SINGLE) 1624 if (re->flags & TX_MAP_SINGLE)
1629 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), 1625 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
@@ -1633,6 +1629,7 @@ static void sky2_tx_unmap(struct pci_dev *pdev,
1633 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), 1629 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
1634 pci_unmap_len(re, maplen), 1630 pci_unmap_len(re, maplen),
1635 PCI_DMA_TODEVICE); 1631 PCI_DMA_TODEVICE);
1632 re->flags = 0;
1636} 1633}
1637 1634
1638/* 1635/*
@@ -1839,6 +1836,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1839 dev->stats.tx_packets++; 1836 dev->stats.tx_packets++;
1840 dev->stats.tx_bytes += skb->len; 1837 dev->stats.tx_bytes += skb->len;
1841 1838
1839 re->skb = NULL;
1842 dev_kfree_skb_any(skb); 1840 dev_kfree_skb_any(skb);
1843 1841
1844 sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); 1842 sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 75a669d48e5e..d71c1976072e 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1437,7 +1437,6 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
1437 /* Transmit complete. */ 1437 /* Transmit complete. */
1438 lp->lstats.tx_ints++; 1438 lp->lstats.tx_ints++;
1439 tc35815_txdone(dev); 1439 tc35815_txdone(dev);
1440 netif_wake_queue(dev);
1441 if (ret < 0) 1440 if (ret < 0)
1442 ret = 0; 1441 ret = 0;
1443 } 1442 }
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 4f27f022fbf7..5f3b9eaeb04f 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -584,6 +584,11 @@ static const struct usb_device_id products [] = {
584 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 584 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
585 .driver_info = (unsigned long) &mbm_info, 585 .driver_info = (unsigned long) &mbm_info,
586}, { 586}, {
587 /* Ericsson C3607w ver 2 */
588 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
589 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
590 .driver_info = (unsigned long) &mbm_info,
591}, {
587 /* Toshiba F3507g */ 592 /* Toshiba F3507g */
588 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM, 593 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
589 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 594 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index c93f58f5c6f2..317aa34b21cf 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1877,13 +1877,12 @@ static void velocity_error(struct velocity_info *vptr, int status)
1877/** 1877/**
1878 * tx_srv - transmit interrupt service 1878 * tx_srv - transmit interrupt service
1879 * @vptr; Velocity 1879 * @vptr; Velocity
1880 * @status:
1881 * 1880 *
1882 * Scan the queues looking for transmitted packets that 1881 * Scan the queues looking for transmitted packets that
1883 * we can complete and clean up. Update any statistics as 1882 * we can complete and clean up. Update any statistics as
1884 * necessary/ 1883 * necessary/
1885 */ 1884 */
1886static int velocity_tx_srv(struct velocity_info *vptr, u32 status) 1885static int velocity_tx_srv(struct velocity_info *vptr)
1887{ 1886{
1888 struct tx_desc *td; 1887 struct tx_desc *td;
1889 int qnum; 1888 int qnum;
@@ -2090,14 +2089,12 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2090/** 2089/**
2091 * velocity_rx_srv - service RX interrupt 2090 * velocity_rx_srv - service RX interrupt
2092 * @vptr: velocity 2091 * @vptr: velocity
2093 * @status: adapter status (unused)
2094 * 2092 *
2095 * Walk the receive ring of the velocity adapter and remove 2093 * Walk the receive ring of the velocity adapter and remove
2096 * any received packets from the receive queue. Hand the ring 2094 * any received packets from the receive queue. Hand the ring
2097 * slots back to the adapter for reuse. 2095 * slots back to the adapter for reuse.
2098 */ 2096 */
2099static int velocity_rx_srv(struct velocity_info *vptr, int status, 2097static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2100 int budget_left)
2101{ 2098{
2102 struct net_device_stats *stats = &vptr->dev->stats; 2099 struct net_device_stats *stats = &vptr->dev->stats;
2103 int rd_curr = vptr->rx.curr; 2100 int rd_curr = vptr->rx.curr;
@@ -2151,32 +2148,24 @@ static int velocity_poll(struct napi_struct *napi, int budget)
2151 struct velocity_info *vptr = container_of(napi, 2148 struct velocity_info *vptr = container_of(napi,
2152 struct velocity_info, napi); 2149 struct velocity_info, napi);
2153 unsigned int rx_done; 2150 unsigned int rx_done;
2154 u32 isr_status; 2151 unsigned long flags;
2155
2156 spin_lock(&vptr->lock);
2157 isr_status = mac_read_isr(vptr->mac_regs);
2158
2159 /* Ack the interrupt */
2160 mac_write_isr(vptr->mac_regs, isr_status);
2161 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2162 velocity_error(vptr, isr_status);
2163 2152
2153 spin_lock_irqsave(&vptr->lock, flags);
2164 /* 2154 /*
2165 * Do rx and tx twice for performance (taken from the VIA 2155 * Do rx and tx twice for performance (taken from the VIA
2166 * out-of-tree driver). 2156 * out-of-tree driver).
2167 */ 2157 */
2168 rx_done = velocity_rx_srv(vptr, isr_status, budget / 2); 2158 rx_done = velocity_rx_srv(vptr, budget / 2);
2169 velocity_tx_srv(vptr, isr_status); 2159 velocity_tx_srv(vptr);
2170 rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done); 2160 rx_done += velocity_rx_srv(vptr, budget - rx_done);
2171 velocity_tx_srv(vptr, isr_status); 2161 velocity_tx_srv(vptr);
2172
2173 spin_unlock(&vptr->lock);
2174 2162
2175 /* If budget not fully consumed, exit the polling mode */ 2163 /* If budget not fully consumed, exit the polling mode */
2176 if (rx_done < budget) { 2164 if (rx_done < budget) {
2177 napi_complete(napi); 2165 napi_complete(napi);
2178 mac_enable_int(vptr->mac_regs); 2166 mac_enable_int(vptr->mac_regs);
2179 } 2167 }
2168 spin_unlock_irqrestore(&vptr->lock, flags);
2180 2169
2181 return rx_done; 2170 return rx_done;
2182} 2171}
@@ -2206,10 +2195,17 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance)
2206 return IRQ_NONE; 2195 return IRQ_NONE;
2207 } 2196 }
2208 2197
2198 /* Ack the interrupt */
2199 mac_write_isr(vptr->mac_regs, isr_status);
2200
2209 if (likely(napi_schedule_prep(&vptr->napi))) { 2201 if (likely(napi_schedule_prep(&vptr->napi))) {
2210 mac_disable_int(vptr->mac_regs); 2202 mac_disable_int(vptr->mac_regs);
2211 __napi_schedule(&vptr->napi); 2203 __napi_schedule(&vptr->napi);
2212 } 2204 }
2205
2206 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2207 velocity_error(vptr, isr_status);
2208
2213 spin_unlock(&vptr->lock); 2209 spin_unlock(&vptr->lock);
2214 2210
2215 return IRQ_HANDLED; 2211 return IRQ_HANDLED;
@@ -3100,7 +3096,7 @@ static int velocity_resume(struct pci_dev *pdev)
3100 velocity_init_registers(vptr, VELOCITY_INIT_WOL); 3096 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3101 mac_disable_int(vptr->mac_regs); 3097 mac_disable_int(vptr->mac_regs);
3102 3098
3103 velocity_tx_srv(vptr, 0); 3099 velocity_tx_srv(vptr);
3104 3100
3105 for (i = 0; i < vptr->tx.numq; i++) { 3101 for (i = 0; i < vptr->tx.numq; i++) {
3106 if (vptr->tx.used[i]) 3102 if (vptr->tx.used[i])
@@ -3344,6 +3340,7 @@ static int velocity_set_coalesce(struct net_device *dev,
3344{ 3340{
3345 struct velocity_info *vptr = netdev_priv(dev); 3341 struct velocity_info *vptr = netdev_priv(dev);
3346 int max_us = 0x3f * 64; 3342 int max_us = 0x3f * 64;
3343 unsigned long flags;
3347 3344
3348 /* 6 bits of */ 3345 /* 6 bits of */
3349 if (ecmd->tx_coalesce_usecs > max_us) 3346 if (ecmd->tx_coalesce_usecs > max_us)
@@ -3365,6 +3362,7 @@ static int velocity_set_coalesce(struct net_device *dev,
3365 ecmd->tx_coalesce_usecs); 3362 ecmd->tx_coalesce_usecs);
3366 3363
3367 /* Setup the interrupt suppression and queue timers */ 3364 /* Setup the interrupt suppression and queue timers */
3365 spin_lock_irqsave(&vptr->lock, flags);
3368 mac_disable_int(vptr->mac_regs); 3366 mac_disable_int(vptr->mac_regs);
3369 setup_adaptive_interrupts(vptr); 3367 setup_adaptive_interrupts(vptr);
3370 setup_queue_timers(vptr); 3368 setup_queue_timers(vptr);
@@ -3372,6 +3370,7 @@ static int velocity_set_coalesce(struct net_device *dev,
3372 mac_write_int_mask(vptr->int_mask, vptr->mac_regs); 3370 mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3373 mac_clear_isr(vptr->mac_regs); 3371 mac_clear_isr(vptr->mac_regs);
3374 mac_enable_int(vptr->mac_regs); 3372 mac_enable_int(vptr->mac_regs);
3373 spin_unlock_irqrestore(&vptr->lock, flags);
3375 3374
3376 return 0; 3375 return 0;
3377} 3376}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index fa12b9060b0b..29bf33692f71 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1615,7 +1615,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1615 bf->bf_frmlen -= padsize; 1615 bf->bf_frmlen -= padsize;
1616 } 1616 }
1617 1617
1618 if (conf_is_ht(&hw->conf) && !is_pae(skb)) 1618 if (conf_is_ht(&hw->conf))
1619 bf->bf_state.bf_type |= BUF_HT; 1619 bf->bf_state.bf_type |= BUF_HT;
1620 1620
1621 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); 1621 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
@@ -1701,7 +1701,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1701 goto tx_done; 1701 goto tx_done;
1702 } 1702 }
1703 1703
1704 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 1704 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) {
1705 /* 1705 /*
1706 * Try aggregation if it's a unicast data frame 1706 * Try aggregation if it's a unicast data frame
1707 * and the destination is HT capable. 1707 * and the destination is HT capable.
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index fe3bf9491997..c484cc253892 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -115,6 +115,7 @@
115#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ 115#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */
116#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ 116#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */
117#define B43_MMIO_RNG 0x65A 117#define B43_MMIO_RNG 0x65A
118#define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */
118#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */ 119#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */
119#define B43_MMIO_IFSCTL_USE_EDCF 0x0004 120#define B43_MMIO_IFSCTL_USE_EDCF 0x0004
120#define B43_MMIO_POWERUP_DELAY 0x6A8 121#define B43_MMIO_POWERUP_DELAY 0x6A8
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 4c41cfe44f26..490fb45d1d05 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -628,10 +628,17 @@ static void b43_upload_card_macaddress(struct b43_wldev *dev)
628static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) 628static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time)
629{ 629{
630 /* slot_time is in usec. */ 630 /* slot_time is in usec. */
631 if (dev->phy.type != B43_PHYTYPE_G) 631 /* This test used to exit for all but a G PHY. */
632 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
632 return; 633 return;
633 b43_write16(dev, 0x684, 510 + slot_time); 634 b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time);
634 b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); 635 /* Shared memory location 0x0010 is the slot time and should be
636 * set to slot_time; however, this register is initially 0 and changing
637 * the value adversely affects the transmit rate for BCM4311
638 * devices. Until this behavior is unterstood, delete this step
639 *
640 * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
641 */
635} 642}
636 643
637static void b43_short_slot_timing_enable(struct b43_wldev *dev) 644static void b43_short_slot_timing_enable(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 9b4b8b5c7574..31462813bac0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2008,7 +2008,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2008 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " 2008 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
2009 "%d index %d\n", scd_ssn , index); 2009 "%d index %d\n", scd_ssn , index);
2010 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2010 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2011 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 2011 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
2012 2012
2013 if (priv->mac80211_registered && 2013 if (priv->mac80211_registered &&
2014 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 2014 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index de45f308b744..cffaae772d51 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1125,7 +1125,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1125 scd_ssn , index, txq_id, txq->swq_id); 1125 scd_ssn , index, txq_id, txq->swq_id);
1126 1126
1127 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 1127 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1128 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 1128 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1129 1129
1130 if (priv->mac80211_registered && 1130 if (priv->mac80211_registered &&
1131 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 1131 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
@@ -1153,16 +1153,14 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1153 tx_resp->failure_frame); 1153 tx_resp->failure_frame);
1154 1154
1155 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 1155 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1156 if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) 1156 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1157 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1158 1157
1159 if (priv->mac80211_registered && 1158 if (priv->mac80211_registered &&
1160 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 1159 (iwl_queue_space(&txq->q) > txq->q.low_mark))
1161 iwl_wake_queue(priv, txq_id); 1160 iwl_wake_queue(priv, txq_id);
1162 } 1161 }
1163 1162
1164 if (ieee80211_is_data_qos(tx_resp->frame_ctrl)) 1163 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1165 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1166 1164
1167 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 1165 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
1168 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); 1166 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 5461f105bd2d..f36f804804fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2745,6 +2745,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2745 priv->staging_rxon.flags = 0; 2745 priv->staging_rxon.flags = 0;
2746 2746
2747 iwl_set_rxon_channel(priv, conf->channel); 2747 iwl_set_rxon_channel(priv, conf->channel);
2748 iwl_set_rxon_ht(priv, ht_conf);
2748 2749
2749 iwl_set_flags_for_band(priv, conf->channel->band); 2750 iwl_set_flags_for_band(priv, conf->channel->band);
2750 spin_unlock_irqrestore(&priv->lock, flags); 2751 spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 27ca859e7453..b69e972671b2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -446,6 +446,8 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
446int iwl_hw_tx_queue_init(struct iwl_priv *priv, 446int iwl_hw_tx_queue_init(struct iwl_priv *priv,
447 struct iwl_tx_queue *txq); 447 struct iwl_tx_queue *txq);
448int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 448int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
449void iwl_free_tfds_in_queue(struct iwl_priv *priv,
450 int sta_id, int tid, int freed);
449int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 451int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
450 int slots_num, u32 txq_id); 452 int slots_num, u32 txq_id);
451void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); 453void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 6f36b6e79f5e..2dbce85404aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -928,7 +928,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
928 if (ieee80211_is_mgmt(fc) || 928 if (ieee80211_is_mgmt(fc) ||
929 ieee80211_has_protected(fc) || 929 ieee80211_has_protected(fc) ||
930 ieee80211_has_morefrags(fc) || 930 ieee80211_has_morefrags(fc) ||
931 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) 931 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
932 (ieee80211_is_data_qos(fc) &&
933 *ieee80211_get_qos_ctl(hdr) &
934 IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
932 ret = skb_linearize(skb); 935 ret = skb_linearize(skb);
933 else 936 else
934 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? 937 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 87ce2bd292c7..8f4071562857 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -120,6 +120,20 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
120EXPORT_SYMBOL(iwl_txq_update_write_ptr); 120EXPORT_SYMBOL(iwl_txq_update_write_ptr);
121 121
122 122
123void iwl_free_tfds_in_queue(struct iwl_priv *priv,
124 int sta_id, int tid, int freed)
125{
126 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
127 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
128 else {
129 IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n",
130 priv->stations[sta_id].tid[tid].tfds_in_queue,
131 freed);
132 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
133 }
134}
135EXPORT_SYMBOL(iwl_free_tfds_in_queue);
136
123/** 137/**
124 * iwl_tx_queue_free - Deallocate DMA queue. 138 * iwl_tx_queue_free - Deallocate DMA queue.
125 * @txq: Transmit queue to deallocate. 139 * @txq: Transmit queue to deallocate.
@@ -1131,6 +1145,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1131 struct iwl_queue *q = &txq->q; 1145 struct iwl_queue *q = &txq->q;
1132 struct iwl_tx_info *tx_info; 1146 struct iwl_tx_info *tx_info;
1133 int nfreed = 0; 1147 int nfreed = 0;
1148 struct ieee80211_hdr *hdr;
1134 1149
1135 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { 1150 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1136 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " 1151 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
@@ -1145,13 +1160,16 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1145 1160
1146 tx_info = &txq->txb[txq->q.read_ptr]; 1161 tx_info = &txq->txb[txq->q.read_ptr];
1147 iwl_tx_status(priv, tx_info->skb[0]); 1162 iwl_tx_status(priv, tx_info->skb[0]);
1163
1164 hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
1165 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
1166 nfreed++;
1148 tx_info->skb[0] = NULL; 1167 tx_info->skb[0] = NULL;
1149 1168
1150 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) 1169 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1151 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); 1170 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1152 1171
1153 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 1172 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1154 nfreed++;
1155 } 1173 }
1156 return nfreed; 1174 return nfreed;
1157} 1175}
@@ -1559,7 +1577,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1559 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { 1577 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1560 /* calculate mac80211 ampdu sw queue to wake */ 1578 /* calculate mac80211 ampdu sw queue to wake */
1561 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); 1579 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
1562 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 1580 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1563 1581
1564 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && 1582 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1565 priv->mac80211_registered && 1583 priv->mac80211_registered &&
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 6d6ed7485175..f727b4a83196 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -794,7 +794,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
794 } 794 }
795 795
796 bss->bss = kzalloc(bss_len, GFP_KERNEL); 796 bss->bss = kzalloc(bss_len, GFP_KERNEL);
797 if (!bss) { 797 if (!bss->bss) {
798 kfree(bss); 798 kfree(bss);
799 IWM_ERR(iwm, "Couldn't allocate bss\n"); 799 IWM_ERR(iwm, "Couldn't allocate bss\n");
800 return -ENOMEM; 800 return -ENOMEM;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index bc5726dd5fe4..7ba3052b0708 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -65,6 +65,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
65 /* Sitecom */ 65 /* Sitecom */
66 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, 66 {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
67 {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B}, 67 {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
68 {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B},
68 /* Sphairon Access Systems GmbH */ 69 /* Sphairon Access Systems GmbH */
69 {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187}, 70 {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187},
70 /* Dick Smith Electronics */ 71 /* Dick Smith Electronics */
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 8e952fdab764..cb2fd01eddae 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -720,12 +720,6 @@ static int acpiphp_bus_add(struct acpiphp_func *func)
720 -ret_val); 720 -ret_val);
721 goto acpiphp_bus_add_out; 721 goto acpiphp_bus_add_out;
722 } 722 }
723 /*
724 * try to start anyway. We could have failed to add
725 * simply because this bus had previously been added
726 * on another add. Don't bother with the return value
727 * we just keep going.
728 */
729 ret_val = acpi_bus_start(device); 723 ret_val = acpi_bus_start(device);
730 724
731acpiphp_bus_add_out: 725acpiphp_bus_add_out:
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 07d14dfdf0b4..226b3e93498c 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -934,7 +934,7 @@ static int __devinit acer_backlight_init(struct device *dev)
934 acer_backlight_device = bd; 934 acer_backlight_device = bd;
935 935
936 bd->props.power = FB_BLANK_UNBLANK; 936 bd->props.power = FB_BLANK_UNBLANK;
937 bd->props.brightness = max_brightness; 937 bd->props.brightness = read_brightness(bd);
938 bd->props.max_brightness = max_brightness; 938 bd->props.max_brightness = max_brightness;
939 backlight_update_status(bd); 939 backlight_update_status(bd);
940 return 0; 940 return 0;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e67e4feb35cb..eb603f1d55ca 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -5771,7 +5771,7 @@ static void thermal_exit(void)
5771 case TPACPI_THERMAL_ACPI_TMP07: 5771 case TPACPI_THERMAL_ACPI_TMP07:
5772 case TPACPI_THERMAL_ACPI_UPDT: 5772 case TPACPI_THERMAL_ACPI_UPDT:
5773 sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, 5773 sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
5774 &thermal_temp_input16_group); 5774 &thermal_temp_input8_group);
5775 break; 5775 break;
5776 case TPACPI_THERMAL_NONE: 5776 case TPACPI_THERMAL_NONE:
5777 default: 5777 default:
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index fa39e759a275..6ea3cb5837c7 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -175,8 +175,14 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
175 dev_err(&dev->dev, "Do not pass platform_data through " 175 dev_err(&dev->dev, "Do not pass platform_data through "
176 "wm97xx_bat_set_pdata!\n"); 176 "wm97xx_bat_set_pdata!\n");
177 return -EINVAL; 177 return -EINVAL;
178 } else 178 }
179 pdata = wmdata->batt_pdata; 179
180 if (!wmdata) {
181 dev_err(&dev->dev, "No platform data supplied\n");
182 return -EINVAL;
183 }
184
185 pdata = wmdata->batt_pdata;
180 186
181 if (dev->id != -1) 187 if (dev->id != -1)
182 return -EINVAL; 188 return -EINVAL;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 686ef270ecf7..b60a4c9f8f16 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -661,7 +661,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
661static void print_constraints(struct regulator_dev *rdev) 661static void print_constraints(struct regulator_dev *rdev)
662{ 662{
663 struct regulation_constraints *constraints = rdev->constraints; 663 struct regulation_constraints *constraints = rdev->constraints;
664 char buf[80]; 664 char buf[80] = "";
665 int count = 0; 665 int count = 0;
666 int ret; 666 int ret;
667 667
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 76d08c282f9c..4f33a0f4a179 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -183,7 +183,7 @@ static int lp3971_ldo_set_voltage(struct regulator_dev *dev,
183 if (vol_map[val] >= min_vol) 183 if (vol_map[val] >= min_vol)
184 break; 184 break;
185 185
186 if (vol_map[val] > max_vol) 186 if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol)
187 return -EINVAL; 187 return -EINVAL;
188 188
189 return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo), 189 return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo),
@@ -272,7 +272,7 @@ static int lp3971_dcdc_set_voltage(struct regulator_dev *dev,
272 if (vol_map[val] >= min_vol) 272 if (vol_map[val] >= min_vol)
273 break; 273 break;
274 274
275 if (vol_map[val] > max_vol) 275 if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol)
276 return -EINVAL; 276 return -EINVAL;
277 277
278 ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck), 278 ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck),
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 999fe80c4051..62b654af9237 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -531,7 +531,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
531 qdio_siga_sync_q(q); 531 qdio_siga_sync_q(q);
532 get_buf_state(q, q->first_to_check, &state, 0); 532 get_buf_state(q, q->first_to_check, &state, 0);
533 533
534 if (state == SLSB_P_INPUT_PRIMED) 534 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
535 /* more work coming */ 535 /* more work coming */
536 return 0; 536 return 0;
537 537
@@ -960,6 +960,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
960 qdio_handle_activate_check(cdev, intparm, cstat, 960 qdio_handle_activate_check(cdev, intparm, cstat,
961 dstat); 961 dstat);
962 break; 962 break;
963 case QDIO_IRQ_STATE_STOPPED:
964 break;
963 default: 965 default:
964 WARN_ON(1); 966 WARN_ON(1);
965 } 967 }
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 0f7b493fb105..271399f62f1b 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -671,12 +671,11 @@ static void zfcp_fc_ct_els_job_handler(void *data)
671{ 671{
672 struct fc_bsg_job *job = data; 672 struct fc_bsg_job *job = data;
673 struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data; 673 struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
674 int status = zfcp_ct_els->status; 674 struct fc_bsg_reply *jr = job->reply;
675 int reply_status;
676 675
677 reply_status = status ? FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK; 676 jr->reply_payload_rcv_len = job->reply_payload.payload_len;
678 job->reply->reply_data.ctels_reply.status = reply_status; 677 jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
679 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; 678 jr->result = zfcp_ct_els->status ? -EIO : 0;
680 job->job_done(job); 679 job->job_done(job);
681} 680}
682 681
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 477542602284..9e71ac611146 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2516,7 +2516,7 @@ int fas216_eh_device_reset(struct scsi_cmnd *SCpnt)
2516 if (info->scsi.phase == PHASE_IDLE) 2516 if (info->scsi.phase == PHASE_IDLE)
2517 fas216_kick(info); 2517 fas216_kick(info);
2518 2518
2519 mod_timer(&info->eh_timer, 30 * HZ); 2519 mod_timer(&info->eh_timer, jiffies + 30 * HZ);
2520 spin_unlock_irqrestore(&info->host_lock, flags); 2520 spin_unlock_irqrestore(&info->host_lock, flags);
2521 2521
2522 /* 2522 /*
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 10be9f36a4cc..2f47ae7cce91 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -2009,6 +2009,8 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
2009 fcoe_interface_cleanup(fcoe); 2009 fcoe_interface_cleanup(fcoe);
2010 rtnl_unlock(); 2010 rtnl_unlock();
2011 fcoe_if_destroy(fcoe->ctlr.lp); 2011 fcoe_if_destroy(fcoe->ctlr.lp);
2012 module_put(THIS_MODULE);
2013
2012out_putdev: 2014out_putdev:
2013 dev_put(netdev); 2015 dev_put(netdev);
2014out_nodev: 2016out_nodev:
@@ -2059,6 +2061,11 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
2059 } 2061 }
2060#endif 2062#endif
2061 2063
2064 if (!try_module_get(THIS_MODULE)) {
2065 rc = -EINVAL;
2066 goto out_nomod;
2067 }
2068
2062 rtnl_lock(); 2069 rtnl_lock();
2063 netdev = fcoe_if_to_netdev(buffer); 2070 netdev = fcoe_if_to_netdev(buffer);
2064 if (!netdev) { 2071 if (!netdev) {
@@ -2099,17 +2106,24 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
2099 if (!fcoe_link_ok(lport)) 2106 if (!fcoe_link_ok(lport))
2100 fcoe_ctlr_link_up(&fcoe->ctlr); 2107 fcoe_ctlr_link_up(&fcoe->ctlr);
2101 2108
2102 rc = 0;
2103out_free:
2104 /* 2109 /*
2105 * Release from init in fcoe_interface_create(), on success lport 2110 * Release from init in fcoe_interface_create(), on success lport
2106 * should be holding a reference taken in fcoe_if_create(). 2111 * should be holding a reference taken in fcoe_if_create().
2107 */ 2112 */
2108 fcoe_interface_put(fcoe); 2113 fcoe_interface_put(fcoe);
2114 dev_put(netdev);
2115 rtnl_unlock();
2116 mutex_unlock(&fcoe_config_mutex);
2117
2118 return 0;
2119out_free:
2120 fcoe_interface_put(fcoe);
2109out_putdev: 2121out_putdev:
2110 dev_put(netdev); 2122 dev_put(netdev);
2111out_nodev: 2123out_nodev:
2112 rtnl_unlock(); 2124 rtnl_unlock();
2125 module_put(THIS_MODULE);
2126out_nomod:
2113 mutex_unlock(&fcoe_config_mutex); 2127 mutex_unlock(&fcoe_config_mutex);
2114 return rc; 2128 return rc;
2115} 2129}
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 9823291395ad..511cb6b371ee 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -1187,7 +1187,7 @@ static void fcoe_ctlr_timeout(unsigned long arg)
1187 next_timer = fip->ctlr_ka_time; 1187 next_timer = fip->ctlr_ka_time;
1188 1188
1189 if (time_after_eq(jiffies, fip->port_ka_time)) { 1189 if (time_after_eq(jiffies, fip->port_ka_time)) {
1190 fip->port_ka_time += jiffies + 1190 fip->port_ka_time = jiffies +
1191 msecs_to_jiffies(FIP_VN_KA_PERIOD); 1191 msecs_to_jiffies(FIP_VN_KA_PERIOD);
1192 fip->send_port_ka = 1; 1192 fip->send_port_ka = 1;
1193 } 1193 }
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 19d711cb938c..7f4364770e4a 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1890,7 +1890,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
1890 fc_exch_setup_hdr(ep, fp, ep->f_ctl); 1890 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
1891 sp->cnt++; 1891 sp->cnt++;
1892 1892
1893 if (ep->xid <= lport->lro_xid) 1893 if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD)
1894 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid); 1894 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
1895 1895
1896 if (unlikely(lport->tt.frame_send(lport, fp))) 1896 if (unlikely(lport->tt.frame_send(lport, fp)))
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 881d5dfe8c74..6fde2fabfd9b 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -298,9 +298,6 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
298{ 298{
299 struct fc_lport *lport; 299 struct fc_lport *lport;
300 300
301 if (!fsp)
302 return;
303
304 lport = fsp->lp; 301 lport = fsp->lp;
305 if ((fsp->req_flags & FC_SRB_READ) && 302 if ((fsp->req_flags & FC_SRB_READ) &&
306 (lport->lro_enabled) && (lport->tt.ddp_setup)) { 303 (lport->lro_enabled) && (lport->tt.ddp_setup)) {
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 0b165024a219..7ec8ce75007c 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1800,7 +1800,8 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
1800 u32 did; 1800 u32 did;
1801 1801
1802 job->reply->reply_payload_rcv_len = 0; 1802 job->reply->reply_payload_rcv_len = 0;
1803 rsp->resid_len = job->reply_payload.payload_len; 1803 if (rsp)
1804 rsp->resid_len = job->reply_payload.payload_len;
1804 1805
1805 mutex_lock(&lport->lp_mutex); 1806 mutex_lock(&lport->lp_mutex);
1806 1807
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 02300523b234..97923bb07765 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -623,7 +623,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
623 623
624 tov = ntohl(plp->fl_csp.sp_e_d_tov); 624 tov = ntohl(plp->fl_csp.sp_e_d_tov);
625 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR) 625 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
626 tov /= 1000; 626 tov /= 1000000;
627 if (tov > rdata->e_d_tov) 627 if (tov > rdata->e_d_tov)
628 rdata->e_d_tov = tov; 628 rdata->e_d_tov = tov;
629 csp_seq = ntohs(plp->fl_csp.sp_tot_seq); 629 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index db6856c138fc..4ad87fd74ddd 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -992,12 +992,10 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
992 if (r2t == NULL) { 992 if (r2t == NULL) {
993 if (kfifo_out(&tcp_task->r2tqueue, 993 if (kfifo_out(&tcp_task->r2tqueue,
994 (void *)&tcp_task->r2t, sizeof(void *)) != 994 (void *)&tcp_task->r2t, sizeof(void *)) !=
995 sizeof(void *)) { 995 sizeof(void *))
996 WARN_ONCE(1, "unexpected fifo state");
997 r2t = NULL; 996 r2t = NULL;
998 } 997 else
999 998 r2t = tcp_task->r2t;
1000 r2t = tcp_task->r2t;
1001 } 999 }
1002 spin_unlock_bh(&session->lock); 1000 spin_unlock_bh(&session->lock);
1003 } 1001 }
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 708ea3157b60..d9b8ca5116bc 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -3781,6 +3781,7 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
3781 compat_alloc_user_space(sizeof(struct megasas_iocpacket)); 3781 compat_alloc_user_space(sizeof(struct megasas_iocpacket));
3782 int i; 3782 int i;
3783 int error = 0; 3783 int error = 0;
3784 compat_uptr_t ptr;
3784 3785
3785 if (clear_user(ioc, sizeof(*ioc))) 3786 if (clear_user(ioc, sizeof(*ioc)))
3786 return -EFAULT; 3787 return -EFAULT;
@@ -3793,9 +3794,22 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
3793 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) 3794 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
3794 return -EFAULT; 3795 return -EFAULT;
3795 3796
3796 for (i = 0; i < MAX_IOCTL_SGE; i++) { 3797 /*
3797 compat_uptr_t ptr; 3798 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
3799 * sense_len is not null, so prepare the 64bit value under
3800 * the same condition.
3801 */
3802 if (ioc->sense_len) {
3803 void __user **sense_ioc_ptr =
3804 (void __user **)(ioc->frame.raw + ioc->sense_off);
3805 compat_uptr_t *sense_cioc_ptr =
3806 (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
3807 if (get_user(ptr, sense_cioc_ptr) ||
3808 put_user(compat_ptr(ptr), sense_ioc_ptr))
3809 return -EFAULT;
3810 }
3798 3811
3812 for (i = 0; i < MAX_IOCTL_SGE; i++) {
3799 if (get_user(ptr, &cioc->sgl[i].iov_base) || 3813 if (get_user(ptr, &cioc->sgl[i].iov_base) ||
3800 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || 3814 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
3801 copy_in_user(&ioc->sgl[i].iov_len, 3815 copy_in_user(&ioc->sgl[i].iov_len,
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index f61fb8d01330..8bc6f53691e9 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -453,6 +453,5 @@ extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
453extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 453extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
454extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 454extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
455extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 455extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
456extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
457 456
458#endif /* _QLA_GBL_H */ 457#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ffd0efdff40e..6fc63b98818c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1917,6 +1917,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1917 struct rsp_que *rsp; 1917 struct rsp_que *rsp;
1918 struct device_reg_24xx __iomem *reg; 1918 struct device_reg_24xx __iomem *reg;
1919 struct scsi_qla_host *vha; 1919 struct scsi_qla_host *vha;
1920 unsigned long flags;
1920 1921
1921 rsp = (struct rsp_que *) dev_id; 1922 rsp = (struct rsp_que *) dev_id;
1922 if (!rsp) { 1923 if (!rsp) {
@@ -1927,15 +1928,15 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1927 ha = rsp->hw; 1928 ha = rsp->hw;
1928 reg = &ha->iobase->isp24; 1929 reg = &ha->iobase->isp24;
1929 1930
1930 spin_lock_irq(&ha->hardware_lock); 1931 spin_lock_irqsave(&ha->hardware_lock, flags);
1931 1932
1932 vha = qla25xx_get_host(rsp); 1933 vha = pci_get_drvdata(ha->pdev);
1933 qla24xx_process_response_queue(vha, rsp); 1934 qla24xx_process_response_queue(vha, rsp);
1934 if (!ha->flags.disable_msix_handshake) { 1935 if (!ha->flags.disable_msix_handshake) {
1935 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1936 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1936 RD_REG_DWORD_RELAXED(&reg->hccr); 1937 RD_REG_DWORD_RELAXED(&reg->hccr);
1937 } 1938 }
1938 spin_unlock_irq(&ha->hardware_lock); 1939 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1939 1940
1940 return IRQ_HANDLED; 1941 return IRQ_HANDLED;
1941} 1942}
@@ -1946,6 +1947,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1946 struct qla_hw_data *ha; 1947 struct qla_hw_data *ha;
1947 struct rsp_que *rsp; 1948 struct rsp_que *rsp;
1948 struct device_reg_24xx __iomem *reg; 1949 struct device_reg_24xx __iomem *reg;
1950 unsigned long flags;
1949 1951
1950 rsp = (struct rsp_que *) dev_id; 1952 rsp = (struct rsp_que *) dev_id;
1951 if (!rsp) { 1953 if (!rsp) {
@@ -1958,10 +1960,10 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
1958 /* Clear the interrupt, if enabled, for this response queue */ 1960 /* Clear the interrupt, if enabled, for this response queue */
1959 if (rsp->options & ~BIT_6) { 1961 if (rsp->options & ~BIT_6) {
1960 reg = &ha->iobase->isp24; 1962 reg = &ha->iobase->isp24;
1961 spin_lock_irq(&ha->hardware_lock); 1963 spin_lock_irqsave(&ha->hardware_lock, flags);
1962 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1964 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1963 RD_REG_DWORD_RELAXED(&reg->hccr); 1965 RD_REG_DWORD_RELAXED(&reg->hccr);
1964 spin_unlock_irq(&ha->hardware_lock); 1966 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1965 } 1967 }
1966 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 1968 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
1967 1969
@@ -1979,6 +1981,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1979 uint32_t stat; 1981 uint32_t stat;
1980 uint32_t hccr; 1982 uint32_t hccr;
1981 uint16_t mb[4]; 1983 uint16_t mb[4];
1984 unsigned long flags;
1982 1985
1983 rsp = (struct rsp_que *) dev_id; 1986 rsp = (struct rsp_que *) dev_id;
1984 if (!rsp) { 1987 if (!rsp) {
@@ -1990,7 +1993,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1990 reg = &ha->iobase->isp24; 1993 reg = &ha->iobase->isp24;
1991 status = 0; 1994 status = 0;
1992 1995
1993 spin_lock_irq(&ha->hardware_lock); 1996 spin_lock_irqsave(&ha->hardware_lock, flags);
1994 vha = pci_get_drvdata(ha->pdev); 1997 vha = pci_get_drvdata(ha->pdev);
1995 do { 1998 do {
1996 stat = RD_REG_DWORD(&reg->host_status); 1999 stat = RD_REG_DWORD(&reg->host_status);
@@ -2039,7 +2042,7 @@ qla24xx_msix_default(int irq, void *dev_id)
2039 } 2042 }
2040 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2043 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2041 } while (0); 2044 } while (0);
2042 spin_unlock_irq(&ha->hardware_lock); 2045 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2043 2046
2044 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2047 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2045 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2048 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -2277,30 +2280,3 @@ int qla25xx_request_irq(struct rsp_que *rsp)
2277 msix->rsp = rsp; 2280 msix->rsp = rsp;
2278 return ret; 2281 return ret;
2279} 2282}
2280
2281struct scsi_qla_host *
2282qla25xx_get_host(struct rsp_que *rsp)
2283{
2284 srb_t *sp;
2285 struct qla_hw_data *ha = rsp->hw;
2286 struct scsi_qla_host *vha = NULL;
2287 struct sts_entry_24xx *pkt;
2288 struct req_que *req;
2289 uint16_t que;
2290 uint32_t handle;
2291
2292 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2293 que = MSW(pkt->handle);
2294 handle = (uint32_t) LSW(pkt->handle);
2295 req = ha->req_q_map[que];
2296 if (handle < MAX_OUTSTANDING_COMMANDS) {
2297 sp = req->outstanding_cmds[handle];
2298 if (sp)
2299 return sp->fcport->vha;
2300 else
2301 goto base_que;
2302 }
2303base_que:
2304 vha = pci_get_drvdata(ha->pdev);
2305 return vha;
2306}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index b901aa267e7d..ff17dee28613 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -636,13 +636,15 @@ failed:
636 636
637static void qla_do_work(struct work_struct *work) 637static void qla_do_work(struct work_struct *work)
638{ 638{
639 unsigned long flags;
639 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); 640 struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
640 struct scsi_qla_host *vha; 641 struct scsi_qla_host *vha;
642 struct qla_hw_data *ha = rsp->hw;
641 643
642 spin_lock_irq(&rsp->hw->hardware_lock); 644 spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
643 vha = qla25xx_get_host(rsp); 645 vha = pci_get_drvdata(ha->pdev);
644 qla24xx_process_response_queue(vha, rsp); 646 qla24xx_process_response_queue(vha, rsp);
645 spin_unlock_irq(&rsp->hw->hardware_lock); 647 spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
646} 648}
647 649
648/* create response queue */ 650/* create response queue */
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index c3e37c8e7e26..e9b15c3746fa 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -83,6 +83,9 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */
83 83
84#define PASS_LIMIT 256 84#define PASS_LIMIT 256
85 85
86#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
87
88
86/* 89/*
87 * We default to IRQ0 for the "no irq" hack. Some 90 * We default to IRQ0 for the "no irq" hack. Some
88 * machine types want others as well - they're free 91 * machine types want others as well - they're free
@@ -1792,7 +1795,7 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
1792 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; 1795 up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
1793 spin_unlock_irqrestore(&up->port.lock, flags); 1796 spin_unlock_irqrestore(&up->port.lock, flags);
1794 1797
1795 return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0; 1798 return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
1796} 1799}
1797 1800
1798static unsigned int serial8250_get_mctrl(struct uart_port *port) 1801static unsigned int serial8250_get_mctrl(struct uart_port *port)
@@ -1850,8 +1853,6 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
1850 spin_unlock_irqrestore(&up->port.lock, flags); 1853 spin_unlock_irqrestore(&up->port.lock, flags);
1851} 1854}
1852 1855
1853#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
1854
1855/* 1856/*
1856 * Wait for transmitter & holding register to empty 1857 * Wait for transmitter & holding register to empty
1857 */ 1858 */
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 5681ebed9c65..03dfd27c4bfb 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -494,8 +494,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
494#endif 494#endif
495 break; 495 break;
496 case SSB_BUSTYPE_SDIO: 496 case SSB_BUSTYPE_SDIO:
497#ifdef CONFIG_SSB_SDIO 497#ifdef CONFIG_SSB_SDIOHOST
498 sdev->irq = bus->host_sdio->dev.irq;
499 dev->parent = &bus->host_sdio->dev; 498 dev->parent = &bus->host_sdio->dev;
500#endif 499#endif
501 break; 500 break;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 6e8bcdfd23b4..a678186f218f 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1312,9 +1312,9 @@ static int processcompl(struct async *as, void __user * __user *arg)
1312 void __user *addr = as->userurb; 1312 void __user *addr = as->userurb;
1313 unsigned int i; 1313 unsigned int i;
1314 1314
1315 if (as->userbuffer) 1315 if (as->userbuffer && urb->actual_length)
1316 if (copy_to_user(as->userbuffer, urb->transfer_buffer, 1316 if (copy_to_user(as->userbuffer, urb->transfer_buffer,
1317 urb->transfer_buffer_length)) 1317 urb->actual_length))
1318 goto err_out; 1318 goto err_out;
1319 if (put_user(as->status, &userurb->status)) 1319 if (put_user(as->status, &userurb->status))
1320 goto err_out; 1320 goto err_out;
@@ -1334,14 +1334,11 @@ static int processcompl(struct async *as, void __user * __user *arg)
1334 } 1334 }
1335 } 1335 }
1336 1336
1337 free_async(as);
1338
1339 if (put_user(addr, (void __user * __user *)arg)) 1337 if (put_user(addr, (void __user * __user *)arg))
1340 return -EFAULT; 1338 return -EFAULT;
1341 return 0; 1339 return 0;
1342 1340
1343err_out: 1341err_out:
1344 free_async(as);
1345 return -EFAULT; 1342 return -EFAULT;
1346} 1343}
1347 1344
@@ -1371,8 +1368,11 @@ static struct async *reap_as(struct dev_state *ps)
1371static int proc_reapurb(struct dev_state *ps, void __user *arg) 1368static int proc_reapurb(struct dev_state *ps, void __user *arg)
1372{ 1369{
1373 struct async *as = reap_as(ps); 1370 struct async *as = reap_as(ps);
1374 if (as) 1371 if (as) {
1375 return processcompl(as, (void __user * __user *)arg); 1372 int retval = processcompl(as, (void __user * __user *)arg);
1373 free_async(as);
1374 return retval;
1375 }
1376 if (signal_pending(current)) 1376 if (signal_pending(current))
1377 return -EINTR; 1377 return -EINTR;
1378 return -EIO; 1378 return -EIO;
@@ -1380,11 +1380,16 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg)
1380 1380
1381static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) 1381static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
1382{ 1382{
1383 int retval;
1383 struct async *as; 1384 struct async *as;
1384 1385
1385 if (!(as = async_getcompleted(ps))) 1386 as = async_getcompleted(ps);
1386 return -EAGAIN; 1387 retval = -EAGAIN;
1387 return processcompl(as, (void __user * __user *)arg); 1388 if (as) {
1389 retval = processcompl(as, (void __user * __user *)arg);
1390 free_async(as);
1391 }
1392 return retval;
1388} 1393}
1389 1394
1390#ifdef CONFIG_COMPAT 1395#ifdef CONFIG_COMPAT
@@ -1475,9 +1480,9 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
1475 void __user *addr = as->userurb; 1480 void __user *addr = as->userurb;
1476 unsigned int i; 1481 unsigned int i;
1477 1482
1478 if (as->userbuffer) 1483 if (as->userbuffer && urb->actual_length)
1479 if (copy_to_user(as->userbuffer, urb->transfer_buffer, 1484 if (copy_to_user(as->userbuffer, urb->transfer_buffer,
1480 urb->transfer_buffer_length)) 1485 urb->actual_length))
1481 return -EFAULT; 1486 return -EFAULT;
1482 if (put_user(as->status, &userurb->status)) 1487 if (put_user(as->status, &userurb->status))
1483 return -EFAULT; 1488 return -EFAULT;
@@ -1497,7 +1502,6 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
1497 } 1502 }
1498 } 1503 }
1499 1504
1500 free_async(as);
1501 if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) 1505 if (put_user(ptr_to_compat(addr), (u32 __user *)arg))
1502 return -EFAULT; 1506 return -EFAULT;
1503 return 0; 1507 return 0;
@@ -1506,8 +1510,11 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
1506static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) 1510static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
1507{ 1511{
1508 struct async *as = reap_as(ps); 1512 struct async *as = reap_as(ps);
1509 if (as) 1513 if (as) {
1510 return processcompl_compat(as, (void __user * __user *)arg); 1514 int retval = processcompl_compat(as, (void __user * __user *)arg);
1515 free_async(as);
1516 return retval;
1517 }
1511 if (signal_pending(current)) 1518 if (signal_pending(current))
1512 return -EINTR; 1519 return -EINTR;
1513 return -EIO; 1520 return -EIO;
@@ -1515,11 +1522,16 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
1515 1522
1516static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) 1523static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
1517{ 1524{
1525 int retval;
1518 struct async *as; 1526 struct async *as;
1519 1527
1520 if (!(as = async_getcompleted(ps))) 1528 retval = -EAGAIN;
1521 return -EAGAIN; 1529 as = async_getcompleted(ps);
1522 return processcompl_compat(as, (void __user * __user *)arg); 1530 if (as) {
1531 retval = processcompl_compat(as, (void __user * __user *)arg);
1532 free_async(as);
1533 }
1534 return retval;
1523} 1535}
1524 1536
1525 1537
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index 0a577d5694fd..d4f0db58a8ad 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -358,7 +358,7 @@ done:
358 * b15: bmType (0 == data) 358 * b15: bmType (0 == data)
359 */ 359 */
360 len = skb->len; 360 len = skb->len;
361 put_unaligned_le16((len & 0x3FFF) | BIT(14), skb_push(skb, 2)); 361 put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2));
362 362
363 /* add a zero-length EEM packet, if needed */ 363 /* add a zero-length EEM packet, if needed */
364 if (padlen) 364 if (padlen)
@@ -464,7 +464,6 @@ static int eem_unwrap(struct gether *port,
464 } 464 }
465 465
466 /* validate CRC */ 466 /* validate CRC */
467 crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN);
468 if (header & BIT(14)) { 467 if (header & BIT(14)) {
469 crc = get_unaligned_le32(skb->data + len 468 crc = get_unaligned_le32(skb->data + len
470 - ETH_FCS_LEN); 469 - ETH_FCS_LEN);
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index a37640eba434..77fcd1b697e8 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -1041,7 +1041,7 @@ static void invalidate_sub(struct fsg_lun *curlun)
1041 unsigned long rc; 1041 unsigned long rc;
1042 1042
1043 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); 1043 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
1044 VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc); 1044 VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
1045} 1045}
1046 1046
1047static int do_verify(struct fsg_common *common) 1047static int do_verify(struct fsg_common *common)
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 29dfb0277ffb..7dcdbda49cac 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -1448,7 +1448,7 @@ static void invalidate_sub(struct fsg_lun *curlun)
1448 unsigned long rc; 1448 unsigned long rc;
1449 1449
1450 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); 1450 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
1451 VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc); 1451 VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
1452} 1452}
1453 1453
1454static int do_verify(struct fsg_dev *fsg) 1454static int do_verify(struct fsg_dev *fsg)
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 429560100b10..76496f5d272c 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -29,7 +29,7 @@
29#if defined USB_ETH_RNDIS 29#if defined USB_ETH_RNDIS
30# undef USB_ETH_RNDIS 30# undef USB_ETH_RNDIS
31#endif 31#endif
32#ifdef CONFIG_USB_ETH_RNDIS 32#ifdef CONFIG_USB_G_MULTI_RNDIS
33# define USB_ETH_RNDIS y 33# define USB_ETH_RNDIS y
34#endif 34#endif
35 35
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index e220fb8091a3..8b45145b9136 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -26,6 +26,7 @@
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/err.h>
29 30
30#include <linux/usb/ch9.h> 31#include <linux/usb/ch9.h>
31#include <linux/usb/gadget.h> 32#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 4b5dbd0127f5..5fc80a104150 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2582,6 +2582,7 @@ err:
2582 hsotg->gadget.dev.driver = NULL; 2582 hsotg->gadget.dev.driver = NULL;
2583 return ret; 2583 return ret;
2584} 2584}
2585EXPORT_SYMBOL(usb_gadget_register_driver);
2585 2586
2586int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) 2587int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2587{ 2588{
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index c75d9270c752..19372673bf09 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -196,7 +196,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
196 if (hostpc_reg) { 196 if (hostpc_reg) {
197 u32 t3; 197 u32 t3;
198 198
199 spin_unlock_irq(&ehci->lock);
199 msleep(5);/* 5ms for HCD enter low pwr mode */ 200 msleep(5);/* 5ms for HCD enter low pwr mode */
201 spin_lock_irq(&ehci->lock);
200 t3 = ehci_readl(ehci, hostpc_reg); 202 t3 = ehci_readl(ehci, hostpc_reg);
201 ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); 203 ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
202 t3 = ehci_readl(ehci, hostpc_reg); 204 t3 = ehci_readl(ehci, hostpc_reg);
@@ -904,17 +906,18 @@ static int ehci_hub_control (
904 if ((temp & PORT_PE) == 0 906 if ((temp & PORT_PE) == 0
905 || (temp & PORT_RESET) != 0) 907 || (temp & PORT_RESET) != 0)
906 goto error; 908 goto error;
907 ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); 909
908 /* After above check the port must be connected. 910 /* After above check the port must be connected.
909 * Set appropriate bit thus could put phy into low power 911 * Set appropriate bit thus could put phy into low power
910 * mode if we have hostpc feature 912 * mode if we have hostpc feature
911 */ 913 */
914 temp &= ~PORT_WKCONN_E;
915 temp |= PORT_WKDISC_E | PORT_WKOC_E;
916 ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
912 if (hostpc_reg) { 917 if (hostpc_reg) {
913 temp &= ~PORT_WKCONN_E; 918 spin_unlock_irqrestore(&ehci->lock, flags);
914 temp |= (PORT_WKDISC_E | PORT_WKOC_E);
915 ehci_writel(ehci, temp | PORT_SUSPEND,
916 status_reg);
917 msleep(5);/* 5ms for HCD enter low pwr mode */ 919 msleep(5);/* 5ms for HCD enter low pwr mode */
920 spin_lock_irqsave(&ehci->lock, flags);
918 temp1 = ehci_readl(ehci, hostpc_reg); 921 temp1 = ehci_readl(ehci, hostpc_reg);
919 ehci_writel(ehci, temp1 | HOSTPC_PHCD, 922 ehci_writel(ehci, temp1 | HOSTPC_PHCD,
920 hostpc_reg); 923 hostpc_reg);
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index d224ab467a40..e1232890c78b 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -105,7 +105,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
105 if (ep->td_base) 105 if (ep->td_base)
106 cpm_muram_free(cpm_muram_offset(ep->td_base)); 106 cpm_muram_free(cpm_muram_offset(ep->td_base));
107 107
108 if (ep->conf_frame_Q) { 108 if (kfifo_initialized(&ep->conf_frame_Q)) {
109 size = cq_howmany(&ep->conf_frame_Q); 109 size = cq_howmany(&ep->conf_frame_Q);
110 for (; size; size--) { 110 for (; size; size--) {
111 struct packet *pkt = cq_get(&ep->conf_frame_Q); 111 struct packet *pkt = cq_get(&ep->conf_frame_Q);
@@ -115,7 +115,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
115 cq_delete(&ep->conf_frame_Q); 115 cq_delete(&ep->conf_frame_Q);
116 } 116 }
117 117
118 if (ep->empty_frame_Q) { 118 if (kfifo_initialized(&ep->empty_frame_Q)) {
119 size = cq_howmany(&ep->empty_frame_Q); 119 size = cq_howmany(&ep->empty_frame_Q);
120 for (; size; size--) { 120 for (; size; size--) {
121 struct packet *pkt = cq_get(&ep->empty_frame_Q); 121 struct packet *pkt = cq_get(&ep->empty_frame_Q);
@@ -125,7 +125,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
125 cq_delete(&ep->empty_frame_Q); 125 cq_delete(&ep->empty_frame_Q);
126 } 126 }
127 127
128 if (ep->dummy_packets_Q) { 128 if (kfifo_initialized(&ep->dummy_packets_Q)) {
129 size = cq_howmany(&ep->dummy_packets_Q); 129 size = cq_howmany(&ep->dummy_packets_Q);
130 for (; size; size--) { 130 for (; size; size--) {
131 u8 *buff = cq_get(&ep->dummy_packets_Q); 131 u8 *buff = cq_get(&ep->dummy_packets_Q);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 0ceec123ddfd..bee558aed427 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -35,7 +35,9 @@
35#include <linux/usb.h> 35#include <linux/usb.h>
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/mm.h>
38#include <linux/irq.h> 39#include <linux/irq.h>
40#include <asm/cacheflush.h>
39 41
40#include "../core/hcd.h" 42#include "../core/hcd.h"
41#include "r8a66597.h" 43#include "r8a66597.h"
@@ -820,6 +822,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb,
820 enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); 822 enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb);
821} 823}
822 824
825static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb,
826 int status)
827__releases(r8a66597->lock)
828__acquires(r8a66597->lock)
829{
830 if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
831 void *ptr;
832
833 for (ptr = urb->transfer_buffer;
834 ptr < urb->transfer_buffer + urb->transfer_buffer_length;
835 ptr += PAGE_SIZE)
836 flush_dcache_page(virt_to_page(ptr));
837 }
838
839 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
840 spin_unlock(&r8a66597->lock);
841 usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status);
842 spin_lock(&r8a66597->lock);
843}
844
823/* this function must be called with interrupt disabled */ 845/* this function must be called with interrupt disabled */
824static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) 846static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
825{ 847{
@@ -838,15 +860,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
838 list_del(&td->queue); 860 list_del(&td->queue);
839 kfree(td); 861 kfree(td);
840 862
841 if (urb) { 863 if (urb)
842 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), 864 r8a66597_urb_done(r8a66597, urb, -ENODEV);
843 urb);
844 865
845 spin_unlock(&r8a66597->lock);
846 usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb,
847 -ENODEV);
848 spin_lock(&r8a66597->lock);
849 }
850 break; 866 break;
851 } 867 }
852} 868}
@@ -1006,6 +1022,8 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
1006/* this function must be called with interrupt disabled */ 1022/* this function must be called with interrupt disabled */
1007static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, 1023static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
1008 u16 syssts) 1024 u16 syssts)
1025__releases(r8a66597->lock)
1026__acquires(r8a66597->lock)
1009{ 1027{
1010 if (syssts == SE0) { 1028 if (syssts == SE0) {
1011 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); 1029 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
@@ -1023,7 +1041,9 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
1023 usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); 1041 usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
1024 } 1042 }
1025 1043
1044 spin_unlock(&r8a66597->lock);
1026 usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); 1045 usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597));
1046 spin_lock(&r8a66597->lock);
1027} 1047}
1028 1048
1029/* this function must be called with interrupt disabled */ 1049/* this function must be called with interrupt disabled */
@@ -1283,10 +1303,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock)
1283 if (usb_pipeisoc(urb->pipe)) 1303 if (usb_pipeisoc(urb->pipe))
1284 urb->start_frame = r8a66597_get_frame(hcd); 1304 urb->start_frame = r8a66597_get_frame(hcd);
1285 1305
1286 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); 1306 r8a66597_urb_done(r8a66597, urb, status);
1287 spin_unlock(&r8a66597->lock);
1288 usb_hcd_giveback_urb(hcd, urb, status);
1289 spin_lock(&r8a66597->lock);
1290 } 1307 }
1291 1308
1292 if (restart) { 1309 if (restart) {
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 0025847743f3..8b37a4b9839e 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3245,6 +3245,7 @@ static struct usb_device_id sisusb_table [] = {
3245 { USB_DEVICE(0x0711, 0x0902) }, 3245 { USB_DEVICE(0x0711, 0x0902) },
3246 { USB_DEVICE(0x0711, 0x0903) }, 3246 { USB_DEVICE(0x0711, 0x0903) },
3247 { USB_DEVICE(0x0711, 0x0918) }, 3247 { USB_DEVICE(0x0711, 0x0918) },
3248 { USB_DEVICE(0x0711, 0x0920) },
3248 { USB_DEVICE(0x182d, 0x021c) }, 3249 { USB_DEVICE(0x182d, 0x021c) },
3249 { USB_DEVICE(0x182d, 0x0269) }, 3250 { USB_DEVICE(0x182d, 0x0269) },
3250 { } 3251 { }
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index de56b3d743d7..3d2d3e549bd1 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -44,6 +44,7 @@ config ISP1301_OMAP
44config USB_ULPI 44config USB_ULPI
45 bool "Generic ULPI Transceiver Driver" 45 bool "Generic ULPI Transceiver Driver"
46 depends on ARM 46 depends on ARM
47 select USB_OTG_UTILS
47 help 48 help
48 Enable this to support ULPI connected USB OTG transceivers which 49 Enable this to support ULPI connected USB OTG transceivers which
49 are likely found on embedded boards. 50 are likely found on embedded boards.
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 216f187582ab..7638828e7317 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -50,7 +50,7 @@
50 * Version Information 50 * Version Information
51 */ 51 */
52#define DRIVER_VERSION "v1.5.0" 52#define DRIVER_VERSION "v1.5.0"
53#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>" 53#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr"
54#define DRIVER_DESC "USB FTDI Serial Converters Driver" 54#define DRIVER_DESC "USB FTDI Serial Converters Driver"
55 55
56static int debug; 56static int debug;
@@ -145,10 +145,15 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
145 145
146 146
147 147
148/*
149 * Device ID not listed? Test via module params product/vendor or
150 * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
151 */
148static struct usb_device_id id_table_combined [] = { 152static struct usb_device_id id_table_combined [] = {
149 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, 153 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
150 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, 154 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
151 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, 155 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
156 { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
152 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, 157 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
153 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, 158 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
154 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, 159 { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -552,9 +557,16 @@ static struct usb_device_id id_table_combined [] = {
552 { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) }, 557 { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
553 { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) }, 558 { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
554 /* 559 /*
555 * Due to many user requests for multiple ELV devices we enable 560 * ELV devices:
556 * them by default.
557 */ 561 */
562 { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
563 { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
564 { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
565 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) },
566 { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) },
567 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) },
568 { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) },
569 { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) },
558 { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) }, 570 { USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) },
559 { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) }, 571 { USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) },
560 { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) }, 572 { USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) },
@@ -571,11 +583,17 @@ static struct usb_device_id id_table_combined [] = {
571 { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) }, 583 { USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) },
572 { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) }, 584 { USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) },
573 { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, 585 { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) },
586 { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) },
574 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, 587 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) },
588 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) },
575 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, 589 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
576 { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, 590 { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
577 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, 591 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
578 { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) }, 592 { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
593 { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) },
594 { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
595 { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
596 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
579 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, 597 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
580 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, 598 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
581 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, 599 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -697,6 +715,7 @@ static struct usb_device_id id_table_combined [] = {
697 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, 715 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
698 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, 716 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
699 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, 717 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
718 { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) },
700 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, 719 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
701 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, 720 { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
702 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, 721 { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index da92b4952ffb..c8951aeed983 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -38,6 +38,8 @@
38/* www.candapter.com Ewert Energy Systems CANdapter device */ 38/* www.candapter.com Ewert Energy Systems CANdapter device */
39#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ 39#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
40 40
41#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
42
41/* OOCDlink by Joern Kaipf <joernk@web.de> 43/* OOCDlink by Joern Kaipf <joernk@web.de>
42 * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ 44 * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
43#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ 45#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
@@ -161,22 +163,37 @@
161/* 163/*
162 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de). 164 * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
163 * All of these devices use FTDI's vendor ID (0x0403). 165 * All of these devices use FTDI's vendor ID (0x0403).
166 * Further IDs taken from ELV Windows .inf file.
164 * 167 *
165 * The previously included PID for the UO 100 module was incorrect. 168 * The previously included PID for the UO 100 module was incorrect.
166 * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58). 169 * In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58).
167 * 170 *
168 * Armin Laeuger originally sent the PID for the UM 100 module. 171 * Armin Laeuger originally sent the PID for the UM 100 module.
169 */ 172 */
173#define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */
174#define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */
175#define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
176#define FTDI_ELV_WS550_PID 0xE004 /* WS 550 */
177#define FTDI_ELV_EC3000_PID 0xE006 /* ENERGY CONTROL 3000 USB */
178#define FTDI_ELV_WS888_PID 0xE008 /* WS 888 */
179#define FTDI_ELV_TWS550_PID 0xE009 /* Technoline WS 550 */
180#define FTDI_ELV_FEM_PID 0xE00A /* Funk Energie Monitor */
170#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ 181#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
171#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ 182#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
172#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */ 183#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
184#define FTDI_ELV_UMS100_PID 0xE0EB /* ELV USB Master-Slave Schaltsteckdose UMS 100 */
185#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */
186#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */
187#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */
173#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ 188#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
174#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */ 189#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
175#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */ 190#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
176#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */ 191#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
177#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */ 192#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */
178#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */ 193#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */
194#define FTDI_ELV_UTP8_PID 0xE0F5 /* ELV UTP 8 */
179#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ 195#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
196#define FTDI_ELV_WS444PC_PID 0xE0F7 /* Conrad WS 444 PC */
180#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */ 197#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
181#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */ 198#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */
182#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */ 199#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */
@@ -968,6 +985,7 @@
968#define PAPOUCH_VID 0x5050 /* Vendor ID */ 985#define PAPOUCH_VID 0x5050 /* Vendor ID */
969#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ 986#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
970#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */ 987#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */
988#define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */
971 989
972/* 990/*
973 * Marvell SheevaPlug 991 * Marvell SheevaPlug
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index ac1b6449fb6a..3eb6143bb646 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -298,6 +298,7 @@ static struct usb_device_id id_table [] = {
298 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ 298 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
299 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 299 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
300 }, 300 },
301 { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
301 302
302 { } 303 { }
303}; 304};
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index c932f9053188..49575fba3756 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -941,7 +941,7 @@ UNUSUAL_DEV( 0x07ab, 0xfccd, 0x0000, 0x9999,
941UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133, 941UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133,
942 "Microtech", 942 "Microtech",
943 "USB-SCSI-DB25", 943 "USB-SCSI-DB25",
944 US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init, 944 US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init,
945 US_FL_SCM_MULT_TARG ), 945 US_FL_SCM_MULT_TARG ),
946 946
947UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100, 947UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100,
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index eb12182b2059..d25df51bb0d2 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -161,8 +161,17 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
161 return 0; 161 return 0;
162} 162}
163 163
164static void efifb_destroy(struct fb_info *info)
165{
166 if (info->screen_base)
167 iounmap(info->screen_base);
168 release_mem_region(info->aperture_base, info->aperture_size);
169 framebuffer_release(info);
170}
171
164static struct fb_ops efifb_ops = { 172static struct fb_ops efifb_ops = {
165 .owner = THIS_MODULE, 173 .owner = THIS_MODULE,
174 .fb_destroy = efifb_destroy,
166 .fb_setcolreg = efifb_setcolreg, 175 .fb_setcolreg = efifb_setcolreg,
167 .fb_fillrect = cfb_fillrect, 176 .fb_fillrect = cfb_fillrect,
168 .fb_copyarea = cfb_copyarea, 177 .fb_copyarea = cfb_copyarea,
@@ -281,7 +290,7 @@ static int __init efifb_probe(struct platform_device *dev)
281 info->par = NULL; 290 info->par = NULL;
282 291
283 info->aperture_base = efifb_fix.smem_start; 292 info->aperture_base = efifb_fix.smem_start;
284 info->aperture_size = size_total; 293 info->aperture_size = size_remap;
285 294
286 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); 295 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
287 if (!info->screen_base) { 296 if (!info->screen_base) {
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index c7b3f9df2317..2159e668751c 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -1,9 +1,8 @@
1/* 1/*
2 * Blackfin On-Chip Watchdog Driver 2 * Blackfin On-Chip Watchdog Driver
3 * Supports BF53[123]/BF53[467]/BF54[2489]/BF561
4 * 3 *
5 * Originally based on softdog.c 4 * Originally based on softdog.c
6 * Copyright 2006-2007 Analog Devices Inc. 5 * Copyright 2006-2010 Analog Devices Inc.
7 * Copyright 2006-2007 Michele d'Amico 6 * Copyright 2006-2007 Michele d'Amico
8 * Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk> 7 * Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>
9 * 8 *
@@ -137,13 +136,15 @@ static int bfin_wdt_running(void)
137 */ 136 */
138static int bfin_wdt_set_timeout(unsigned long t) 137static int bfin_wdt_set_timeout(unsigned long t)
139{ 138{
140 u32 cnt; 139 u32 cnt, max_t, sclk;
141 unsigned long flags; 140 unsigned long flags;
142 141
143 stampit(); 142 sclk = get_sclk();
143 max_t = -1 / sclk;
144 cnt = t * sclk;
145 stamp("maxtimeout=%us newtimeout=%lus (cnt=%#x)", max_t, t, cnt);
144 146
145 cnt = t * get_sclk(); 147 if (t > max_t) {
146 if (cnt < get_sclk()) {
147 printk(KERN_WARNING PFX "timeout value is too large\n"); 148 printk(KERN_WARNING PFX "timeout value is too large\n");
148 return -EINVAL; 149 return -EINVAL;
149 } 150 }
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index cf62b05e296a..7d6c2139891d 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -84,7 +84,7 @@ static const match_table_t tokens = {
84 84
85static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) 85static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
86{ 86{
87 char *options; 87 char *options, *tmp_options;
88 substring_t args[MAX_OPT_ARGS]; 88 substring_t args[MAX_OPT_ARGS];
89 char *p; 89 char *p;
90 int option = 0; 90 int option = 0;
@@ -102,9 +102,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
102 if (!opts) 102 if (!opts)
103 return 0; 103 return 0;
104 104
105 options = kstrdup(opts, GFP_KERNEL); 105 tmp_options = kstrdup(opts, GFP_KERNEL);
106 if (!options) 106 if (!tmp_options) {
107 ret = -ENOMEM;
107 goto fail_option_alloc; 108 goto fail_option_alloc;
109 }
110 options = tmp_options;
108 111
109 while ((p = strsep(&options, ",")) != NULL) { 112 while ((p = strsep(&options, ",")) != NULL) {
110 int token; 113 int token;
@@ -159,8 +162,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
159 break; 162 break;
160 case Opt_cache: 163 case Opt_cache:
161 s = match_strdup(&args[0]); 164 s = match_strdup(&args[0]);
162 if (!s) 165 if (!s) {
163 goto fail_option_alloc; 166 ret = -ENOMEM;
167 P9_DPRINTK(P9_DEBUG_ERROR,
168 "problem allocating copy of cache arg\n");
169 goto free_and_return;
170 }
164 171
165 if (strcmp(s, "loose") == 0) 172 if (strcmp(s, "loose") == 0)
166 v9ses->cache = CACHE_LOOSE; 173 v9ses->cache = CACHE_LOOSE;
@@ -173,8 +180,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
173 180
174 case Opt_access: 181 case Opt_access:
175 s = match_strdup(&args[0]); 182 s = match_strdup(&args[0]);
176 if (!s) 183 if (!s) {
177 goto fail_option_alloc; 184 ret = -ENOMEM;
185 P9_DPRINTK(P9_DEBUG_ERROR,
186 "problem allocating copy of access arg\n");
187 goto free_and_return;
188 }
178 189
179 v9ses->flags &= ~V9FS_ACCESS_MASK; 190 v9ses->flags &= ~V9FS_ACCESS_MASK;
180 if (strcmp(s, "user") == 0) 191 if (strcmp(s, "user") == 0)
@@ -194,13 +205,11 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
194 continue; 205 continue;
195 } 206 }
196 } 207 }
197 kfree(options);
198 return ret;
199 208
209free_and_return:
210 kfree(tmp_options);
200fail_option_alloc: 211fail_option_alloc:
201 P9_DPRINTK(P9_DEBUG_ERROR, 212 return ret;
202 "failed to allocate copy of option argument\n");
203 return -ENOMEM;
204} 213}
205 214
206/** 215/**
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 3a7560e35865..ed835836e0dc 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -60,3 +60,4 @@ void v9fs_dentry_release(struct dentry *);
60int v9fs_uflags2omode(int uflags, int extended); 60int v9fs_uflags2omode(int uflags, int extended);
61 61
62ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64); 62ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
63void v9fs_blank_wstat(struct p9_wstat *wstat);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 3902bf43a088..74a0461a9ac0 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -257,6 +257,23 @@ v9fs_file_write(struct file *filp, const char __user * data,
257 return total; 257 return total;
258} 258}
259 259
260static int v9fs_file_fsync(struct file *filp, struct dentry *dentry,
261 int datasync)
262{
263 struct p9_fid *fid;
264 struct p9_wstat wstat;
265 int retval;
266
267 P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp,
268 dentry, datasync);
269
270 fid = filp->private_data;
271 v9fs_blank_wstat(&wstat);
272
273 retval = p9_client_wstat(fid, &wstat);
274 return retval;
275}
276
260static const struct file_operations v9fs_cached_file_operations = { 277static const struct file_operations v9fs_cached_file_operations = {
261 .llseek = generic_file_llseek, 278 .llseek = generic_file_llseek,
262 .read = do_sync_read, 279 .read = do_sync_read,
@@ -266,6 +283,7 @@ static const struct file_operations v9fs_cached_file_operations = {
266 .release = v9fs_dir_release, 283 .release = v9fs_dir_release,
267 .lock = v9fs_file_lock, 284 .lock = v9fs_file_lock,
268 .mmap = generic_file_readonly_mmap, 285 .mmap = generic_file_readonly_mmap,
286 .fsync = v9fs_file_fsync,
269}; 287};
270 288
271const struct file_operations v9fs_file_operations = { 289const struct file_operations v9fs_file_operations = {
@@ -276,4 +294,5 @@ const struct file_operations v9fs_file_operations = {
276 .release = v9fs_dir_release, 294 .release = v9fs_dir_release,
277 .lock = v9fs_file_lock, 295 .lock = v9fs_file_lock,
278 .mmap = generic_file_readonly_mmap, 296 .mmap = generic_file_readonly_mmap,
297 .fsync = v9fs_file_fsync,
279}; 298};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 9d03d1ebca6f..a407fa3388c0 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -176,7 +176,7 @@ int v9fs_uflags2omode(int uflags, int extended)
176 * 176 *
177 */ 177 */
178 178
179static void 179void
180v9fs_blank_wstat(struct p9_wstat *wstat) 180v9fs_blank_wstat(struct p9_wstat *wstat)
181{ 181{
182 wstat->type = ~0; 182 wstat->type = ~0;
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 0118d67221b2..3d283abf67d7 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -60,11 +60,6 @@ do { \
60 current->pid, __func__, ##args); \ 60 current->pid, __func__, ##args); \
61} while (0) 61} while (0)
62 62
63struct rehash_entry {
64 struct task_struct *task;
65 struct list_head list;
66};
67
68/* Unified info structure. This is pointed to by both the dentry and 63/* Unified info structure. This is pointed to by both the dentry and
69 inode structures. Each file in the filesystem has an instance of this 64 inode structures. Each file in the filesystem has an instance of this
70 structure. It holds a reference to the dentry, so dentries are never 65 structure. It holds a reference to the dentry, so dentries are never
@@ -81,7 +76,6 @@ struct autofs_info {
81 76
82 struct list_head active; 77 struct list_head active;
83 int active_count; 78 int active_count;
84 struct list_head rehash_list;
85 79
86 struct list_head expiring; 80 struct list_head expiring;
87 81
@@ -104,7 +98,6 @@ struct autofs_info {
104#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */ 98#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */
105#define AUTOFS_INF_MOUNTPOINT (1<<1) /* mountpoint status for direct expire */ 99#define AUTOFS_INF_MOUNTPOINT (1<<1) /* mountpoint status for direct expire */
106#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */ 100#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
107#define AUTOFS_INF_REHASH (1<<3) /* dentry in transit to ->lookup() */
108 101
109struct autofs_wait_queue { 102struct autofs_wait_queue {
110 wait_queue_head_t queue; 103 wait_queue_head_t queue;
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 00bf8fcb245f..c8a80dffb455 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -544,10 +544,9 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
544 goto out; 544 goto out;
545 devid = new_encode_dev(path.mnt->mnt_sb->s_dev); 545 devid = new_encode_dev(path.mnt->mnt_sb->s_dev);
546 err = 0; 546 err = 0;
547 if (path.dentry->d_inode && 547 if (path.mnt->mnt_root == path.dentry) {
548 path.mnt->mnt_root == path.dentry) {
549 err = 1; 548 err = 1;
550 magic = path.dentry->d_inode->i_sb->s_magic; 549 magic = path.mnt->mnt_sb->s_magic;
551 } 550 }
552 } else { 551 } else {
553 dev_t dev = sbi->sb->s_dev; 552 dev_t dev = sbi->sb->s_dev;
@@ -560,10 +559,8 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
560 559
561 err = have_submounts(path.dentry); 560 err = have_submounts(path.dentry);
562 561
563 if (path.mnt->mnt_mountpoint != path.mnt->mnt_root) { 562 if (follow_down(&path))
564 if (follow_down(&path)) 563 magic = path.mnt->mnt_sb->s_magic;
565 magic = path.mnt->mnt_sb->s_magic;
566 }
567 } 564 }
568 565
569 param->ismountpoint.out.devid = devid; 566 param->ismountpoint.out.devid = devid;
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 74bc9aa6df31..a796c9417fb1 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -279,7 +279,6 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
279 root->d_mounted--; 279 root->d_mounted--;
280 } 280 }
281 ino->flags |= AUTOFS_INF_EXPIRING; 281 ino->flags |= AUTOFS_INF_EXPIRING;
282 autofs4_add_expiring(root);
283 init_completion(&ino->expire_complete); 282 init_completion(&ino->expire_complete);
284 spin_unlock(&sbi->fs_lock); 283 spin_unlock(&sbi->fs_lock);
285 return root; 284 return root;
@@ -407,7 +406,6 @@ found:
407 expired, (int)expired->d_name.len, expired->d_name.name); 406 expired, (int)expired->d_name.len, expired->d_name.name);
408 ino = autofs4_dentry_ino(expired); 407 ino = autofs4_dentry_ino(expired);
409 ino->flags |= AUTOFS_INF_EXPIRING; 408 ino->flags |= AUTOFS_INF_EXPIRING;
410 autofs4_add_expiring(expired);
411 init_completion(&ino->expire_complete); 409 init_completion(&ino->expire_complete);
412 spin_unlock(&sbi->fs_lock); 410 spin_unlock(&sbi->fs_lock);
413 spin_lock(&dcache_lock); 411 spin_lock(&dcache_lock);
@@ -435,7 +433,7 @@ int autofs4_expire_wait(struct dentry *dentry)
435 433
436 DPRINTK("expire done status=%d", status); 434 DPRINTK("expire done status=%d", status);
437 435
438 if (d_unhashed(dentry) && IS_DEADDIR(dentry->d_inode)) 436 if (d_unhashed(dentry))
439 return -EAGAIN; 437 return -EAGAIN;
440 438
441 return status; 439 return status;
@@ -475,7 +473,6 @@ int autofs4_expire_run(struct super_block *sb,
475 spin_lock(&sbi->fs_lock); 473 spin_lock(&sbi->fs_lock);
476 ino = autofs4_dentry_ino(dentry); 474 ino = autofs4_dentry_ino(dentry);
477 ino->flags &= ~AUTOFS_INF_EXPIRING; 475 ino->flags &= ~AUTOFS_INF_EXPIRING;
478 autofs4_del_expiring(dentry);
479 complete_all(&ino->expire_complete); 476 complete_all(&ino->expire_complete);
480 spin_unlock(&sbi->fs_lock); 477 spin_unlock(&sbi->fs_lock);
481 478
@@ -506,7 +503,6 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
506 ino->flags &= ~AUTOFS_INF_MOUNTPOINT; 503 ino->flags &= ~AUTOFS_INF_MOUNTPOINT;
507 } 504 }
508 ino->flags &= ~AUTOFS_INF_EXPIRING; 505 ino->flags &= ~AUTOFS_INF_EXPIRING;
509 autofs4_del_expiring(dentry);
510 complete_all(&ino->expire_complete); 506 complete_all(&ino->expire_complete);
511 spin_unlock(&sbi->fs_lock); 507 spin_unlock(&sbi->fs_lock);
512 dput(dentry); 508 dput(dentry);
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index d0a3de247458..821b2b955dac 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -49,7 +49,6 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino,
49 ino->dentry = NULL; 49 ino->dentry = NULL;
50 ino->size = 0; 50 ino->size = 0;
51 INIT_LIST_HEAD(&ino->active); 51 INIT_LIST_HEAD(&ino->active);
52 INIT_LIST_HEAD(&ino->rehash_list);
53 ino->active_count = 0; 52 ino->active_count = 0;
54 INIT_LIST_HEAD(&ino->expiring); 53 INIT_LIST_HEAD(&ino->expiring);
55 atomic_set(&ino->count, 0); 54 atomic_set(&ino->count, 0);
@@ -97,63 +96,6 @@ void autofs4_free_ino(struct autofs_info *ino)
97 kfree(ino); 96 kfree(ino);
98} 97}
99 98
100/*
101 * Deal with the infamous "Busy inodes after umount ..." message.
102 *
103 * Clean up the dentry tree. This happens with autofs if the user
104 * space program goes away due to a SIGKILL, SIGSEGV etc.
105 */
106static void autofs4_force_release(struct autofs_sb_info *sbi)
107{
108 struct dentry *this_parent = sbi->sb->s_root;
109 struct list_head *next;
110
111 if (!sbi->sb->s_root)
112 return;
113
114 spin_lock(&dcache_lock);
115repeat:
116 next = this_parent->d_subdirs.next;
117resume:
118 while (next != &this_parent->d_subdirs) {
119 struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child);
120
121 /* Negative dentry - don`t care */
122 if (!simple_positive(dentry)) {
123 next = next->next;
124 continue;
125 }
126
127 if (!list_empty(&dentry->d_subdirs)) {
128 this_parent = dentry;
129 goto repeat;
130 }
131
132 next = next->next;
133 spin_unlock(&dcache_lock);
134
135 DPRINTK("dentry %p %.*s",
136 dentry, (int)dentry->d_name.len, dentry->d_name.name);
137
138 dput(dentry);
139 spin_lock(&dcache_lock);
140 }
141
142 if (this_parent != sbi->sb->s_root) {
143 struct dentry *dentry = this_parent;
144
145 next = this_parent->d_u.d_child.next;
146 this_parent = this_parent->d_parent;
147 spin_unlock(&dcache_lock);
148 DPRINTK("parent dentry %p %.*s",
149 dentry, (int)dentry->d_name.len, dentry->d_name.name);
150 dput(dentry);
151 spin_lock(&dcache_lock);
152 goto resume;
153 }
154 spin_unlock(&dcache_lock);
155}
156
157void autofs4_kill_sb(struct super_block *sb) 99void autofs4_kill_sb(struct super_block *sb)
158{ 100{
159 struct autofs_sb_info *sbi = autofs4_sbi(sb); 101 struct autofs_sb_info *sbi = autofs4_sbi(sb);
@@ -170,15 +112,12 @@ void autofs4_kill_sb(struct super_block *sb)
170 /* Free wait queues, close pipe */ 112 /* Free wait queues, close pipe */
171 autofs4_catatonic_mode(sbi); 113 autofs4_catatonic_mode(sbi);
172 114
173 /* Clean up and release dangling references */
174 autofs4_force_release(sbi);
175
176 sb->s_fs_info = NULL; 115 sb->s_fs_info = NULL;
177 kfree(sbi); 116 kfree(sbi);
178 117
179out_kill_sb: 118out_kill_sb:
180 DPRINTK("shutting down"); 119 DPRINTK("shutting down");
181 kill_anon_super(sb); 120 kill_litter_super(sb);
182} 121}
183 122
184static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt) 123static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt)
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 30cc9ddf4b70..a015b49891df 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -104,99 +104,6 @@ static void autofs4_del_active(struct dentry *dentry)
104 return; 104 return;
105} 105}
106 106
107static void autofs4_add_rehash_entry(struct autofs_info *ino,
108 struct rehash_entry *entry)
109{
110 entry->task = current;
111 INIT_LIST_HEAD(&entry->list);
112 list_add(&entry->list, &ino->rehash_list);
113 return;
114}
115
116static void autofs4_remove_rehash_entry(struct autofs_info *ino)
117{
118 struct list_head *head = &ino->rehash_list;
119 struct rehash_entry *entry;
120 list_for_each_entry(entry, head, list) {
121 if (entry->task == current) {
122 list_del(&entry->list);
123 kfree(entry);
124 break;
125 }
126 }
127 return;
128}
129
130static void autofs4_remove_rehash_entrys(struct autofs_info *ino)
131{
132 struct autofs_sb_info *sbi = ino->sbi;
133 struct rehash_entry *entry, *next;
134 struct list_head *head;
135
136 spin_lock(&sbi->fs_lock);
137 spin_lock(&sbi->lookup_lock);
138 if (!(ino->flags & AUTOFS_INF_REHASH)) {
139 spin_unlock(&sbi->lookup_lock);
140 spin_unlock(&sbi->fs_lock);
141 return;
142 }
143 ino->flags &= ~AUTOFS_INF_REHASH;
144 head = &ino->rehash_list;
145 list_for_each_entry_safe(entry, next, head, list) {
146 list_del(&entry->list);
147 kfree(entry);
148 }
149 spin_unlock(&sbi->lookup_lock);
150 spin_unlock(&sbi->fs_lock);
151 dput(ino->dentry);
152
153 return;
154}
155
156static void autofs4_revalidate_drop(struct dentry *dentry,
157 struct rehash_entry *entry)
158{
159 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
160 struct autofs_info *ino = autofs4_dentry_ino(dentry);
161 /*
162 * Add to the active list so we can pick this up in
163 * ->lookup(). Also add an entry to a rehash list so
164 * we know when there are no dentrys in flight so we
165 * know when we can rehash the dentry.
166 */
167 spin_lock(&sbi->lookup_lock);
168 if (list_empty(&ino->active))
169 list_add(&ino->active, &sbi->active_list);
170 autofs4_add_rehash_entry(ino, entry);
171 spin_unlock(&sbi->lookup_lock);
172 if (!(ino->flags & AUTOFS_INF_REHASH)) {
173 ino->flags |= AUTOFS_INF_REHASH;
174 dget(dentry);
175 spin_lock(&dentry->d_lock);
176 __d_drop(dentry);
177 spin_unlock(&dentry->d_lock);
178 }
179 return;
180}
181
182static void autofs4_revalidate_rehash(struct dentry *dentry)
183{
184 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
185 struct autofs_info *ino = autofs4_dentry_ino(dentry);
186 if (ino->flags & AUTOFS_INF_REHASH) {
187 spin_lock(&sbi->lookup_lock);
188 autofs4_remove_rehash_entry(ino);
189 if (list_empty(&ino->rehash_list)) {
190 spin_unlock(&sbi->lookup_lock);
191 ino->flags &= ~AUTOFS_INF_REHASH;
192 d_rehash(dentry);
193 dput(ino->dentry);
194 } else
195 spin_unlock(&sbi->lookup_lock);
196 }
197 return;
198}
199
200static unsigned int autofs4_need_mount(unsigned int flags) 107static unsigned int autofs4_need_mount(unsigned int flags)
201{ 108{
202 unsigned int res = 0; 109 unsigned int res = 0;
@@ -236,7 +143,7 @@ out:
236 return dcache_dir_open(inode, file); 143 return dcache_dir_open(inode, file);
237} 144}
238 145
239static int try_to_fill_dentry(struct dentry *dentry) 146static int try_to_fill_dentry(struct dentry *dentry, int flags)
240{ 147{
241 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 148 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
242 struct autofs_info *ino = autofs4_dentry_ino(dentry); 149 struct autofs_info *ino = autofs4_dentry_ino(dentry);
@@ -249,17 +156,55 @@ static int try_to_fill_dentry(struct dentry *dentry)
249 * Wait for a pending mount, triggering one if there 156 * Wait for a pending mount, triggering one if there
250 * isn't one already 157 * isn't one already
251 */ 158 */
252 DPRINTK("waiting for mount name=%.*s", 159 if (dentry->d_inode == NULL) {
253 dentry->d_name.len, dentry->d_name.name); 160 DPRINTK("waiting for mount name=%.*s",
161 dentry->d_name.len, dentry->d_name.name);
254 162
255 status = autofs4_wait(sbi, dentry, NFY_MOUNT); 163 status = autofs4_wait(sbi, dentry, NFY_MOUNT);
256 164
257 DPRINTK("mount done status=%d", status); 165 DPRINTK("mount done status=%d", status);
258 166
259 /* Update expiry counter */ 167 /* Turn this into a real negative dentry? */
260 ino->last_used = jiffies; 168 if (status == -ENOENT) {
169 spin_lock(&sbi->fs_lock);
170 ino->flags &= ~AUTOFS_INF_PENDING;
171 spin_unlock(&sbi->fs_lock);
172 return status;
173 } else if (status) {
174 /* Return a negative dentry, but leave it "pending" */
175 return status;
176 }
177 /* Trigger mount for path component or follow link */
178 } else if (ino->flags & AUTOFS_INF_PENDING ||
179 autofs4_need_mount(flags) ||
180 current->link_count) {
181 DPRINTK("waiting for mount name=%.*s",
182 dentry->d_name.len, dentry->d_name.name);
261 183
262 return status; 184 spin_lock(&sbi->fs_lock);
185 ino->flags |= AUTOFS_INF_PENDING;
186 spin_unlock(&sbi->fs_lock);
187 status = autofs4_wait(sbi, dentry, NFY_MOUNT);
188
189 DPRINTK("mount done status=%d", status);
190
191 if (status) {
192 spin_lock(&sbi->fs_lock);
193 ino->flags &= ~AUTOFS_INF_PENDING;
194 spin_unlock(&sbi->fs_lock);
195 return status;
196 }
197 }
198
199 /* Initialize expiry counter after successful mount */
200 if (ino)
201 ino->last_used = jiffies;
202
203 spin_lock(&sbi->fs_lock);
204 ino->flags &= ~AUTOFS_INF_PENDING;
205 spin_unlock(&sbi->fs_lock);
206
207 return 0;
263} 208}
264 209
265/* For autofs direct mounts the follow link triggers the mount */ 210/* For autofs direct mounts the follow link triggers the mount */
@@ -313,16 +258,10 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
313 */ 258 */
314 if (ino->flags & AUTOFS_INF_PENDING || 259 if (ino->flags & AUTOFS_INF_PENDING ||
315 (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) { 260 (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) {
316 ino->flags |= AUTOFS_INF_PENDING;
317 spin_unlock(&dcache_lock); 261 spin_unlock(&dcache_lock);
318 spin_unlock(&sbi->fs_lock); 262 spin_unlock(&sbi->fs_lock);
319 263
320 status = try_to_fill_dentry(dentry); 264 status = try_to_fill_dentry(dentry, 0);
321
322 spin_lock(&sbi->fs_lock);
323 ino->flags &= ~AUTOFS_INF_PENDING;
324 spin_unlock(&sbi->fs_lock);
325
326 if (status) 265 if (status)
327 goto out_error; 266 goto out_error;
328 267
@@ -361,47 +300,18 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
361{ 300{
362 struct inode *dir = dentry->d_parent->d_inode; 301 struct inode *dir = dentry->d_parent->d_inode;
363 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); 302 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
364 struct autofs_info *ino = autofs4_dentry_ino(dentry); 303 int oz_mode = autofs4_oz_mode(sbi);
365 struct rehash_entry *entry;
366 int flags = nd ? nd->flags : 0; 304 int flags = nd ? nd->flags : 0;
367 unsigned int mutex_aquired; 305 int status = 1;
368 306
369 DPRINTK("name = %.*s oz_mode = %d",
370 dentry->d_name.len, dentry->d_name.name, oz_mode);
371
372 /* Daemon never causes a mount to trigger */
373 if (autofs4_oz_mode(sbi))
374 return 1;
375
376 entry = kmalloc(sizeof(struct rehash_entry), GFP_KERNEL);
377 if (!entry)
378 return -ENOMEM;
379
380 mutex_aquired = mutex_trylock(&dir->i_mutex);
381
382 spin_lock(&sbi->fs_lock);
383 spin_lock(&dcache_lock);
384 /* Pending dentry */ 307 /* Pending dentry */
308 spin_lock(&sbi->fs_lock);
385 if (autofs4_ispending(dentry)) { 309 if (autofs4_ispending(dentry)) {
386 int status; 310 /* The daemon never causes a mount to trigger */
387
388 /*
389 * We can only unhash and send this to ->lookup() if
390 * the directory mutex is held over d_revalidate() and
391 * ->lookup(). This prevents the VFS from incorrectly
392 * seeing the dentry as non-existent.
393 */
394 ino->flags |= AUTOFS_INF_PENDING;
395 if (!mutex_aquired) {
396 autofs4_revalidate_drop(dentry, entry);
397 spin_unlock(&dcache_lock);
398 spin_unlock(&sbi->fs_lock);
399 return 0;
400 }
401 spin_unlock(&dcache_lock);
402 spin_unlock(&sbi->fs_lock); 311 spin_unlock(&sbi->fs_lock);
403 mutex_unlock(&dir->i_mutex); 312
404 kfree(entry); 313 if (oz_mode)
314 return 1;
405 315
406 /* 316 /*
407 * If the directory has gone away due to an expire 317 * If the directory has gone away due to an expire
@@ -415,82 +325,45 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
415 * A zero status is success otherwise we have a 325 * A zero status is success otherwise we have a
416 * negative error code. 326 * negative error code.
417 */ 327 */
418 status = try_to_fill_dentry(dentry); 328 status = try_to_fill_dentry(dentry, flags);
419
420 spin_lock(&sbi->fs_lock);
421 ino->flags &= ~AUTOFS_INF_PENDING;
422 spin_unlock(&sbi->fs_lock);
423
424 if (status == 0) 329 if (status == 0)
425 return 1; 330 return 1;
426 331
427 return status; 332 return status;
428 } 333 }
334 spin_unlock(&sbi->fs_lock);
335
336 /* Negative dentry.. invalidate if "old" */
337 if (dentry->d_inode == NULL)
338 return 0;
429 339
430 /* Check for a non-mountpoint directory with no contents */ 340 /* Check for a non-mountpoint directory with no contents */
341 spin_lock(&dcache_lock);
431 if (S_ISDIR(dentry->d_inode->i_mode) && 342 if (S_ISDIR(dentry->d_inode->i_mode) &&
432 !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) { 343 !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
433 DPRINTK("dentry=%p %.*s, emptydir", 344 DPRINTK("dentry=%p %.*s, emptydir",
434 dentry, dentry->d_name.len, dentry->d_name.name); 345 dentry, dentry->d_name.len, dentry->d_name.name);
346 spin_unlock(&dcache_lock);
435 347
436 if (autofs4_need_mount(flags) || current->link_count) { 348 /* The daemon never causes a mount to trigger */
437 int status; 349 if (oz_mode)
438 350 return 1;
439 /*
440 * We can only unhash and send this to ->lookup() if
441 * the directory mutex is held over d_revalidate() and
442 * ->lookup(). This prevents the VFS from incorrectly
443 * seeing the dentry as non-existent.
444 */
445 ino->flags |= AUTOFS_INF_PENDING;
446 if (!mutex_aquired) {
447 autofs4_revalidate_drop(dentry, entry);
448 spin_unlock(&dcache_lock);
449 spin_unlock(&sbi->fs_lock);
450 return 0;
451 }
452 spin_unlock(&dcache_lock);
453 spin_unlock(&sbi->fs_lock);
454 mutex_unlock(&dir->i_mutex);
455 kfree(entry);
456
457 /*
458 * A zero status is success otherwise we have a
459 * negative error code.
460 */
461 status = try_to_fill_dentry(dentry);
462
463 spin_lock(&sbi->fs_lock);
464 ino->flags &= ~AUTOFS_INF_PENDING;
465 spin_unlock(&sbi->fs_lock);
466 351
467 if (status == 0) 352 /*
468 return 1; 353 * A zero status is success otherwise we have a
354 * negative error code.
355 */
356 status = try_to_fill_dentry(dentry, flags);
357 if (status == 0)
358 return 1;
469 359
470 return status; 360 return status;
471 }
472 } 361 }
473 spin_unlock(&dcache_lock); 362 spin_unlock(&dcache_lock);
474 spin_unlock(&sbi->fs_lock);
475
476 if (mutex_aquired)
477 mutex_unlock(&dir->i_mutex);
478
479 kfree(entry);
480 363
481 return 1; 364 return 1;
482} 365}
483 366
484static void autofs4_free_rehash_entrys(struct autofs_info *inf)
485{
486 struct list_head *head = &inf->rehash_list;
487 struct rehash_entry *entry, *next;
488 list_for_each_entry_safe(entry, next, head, list) {
489 list_del(&entry->list);
490 kfree(entry);
491 }
492}
493
494void autofs4_dentry_release(struct dentry *de) 367void autofs4_dentry_release(struct dentry *de)
495{ 368{
496 struct autofs_info *inf; 369 struct autofs_info *inf;
@@ -509,8 +382,6 @@ void autofs4_dentry_release(struct dentry *de)
509 list_del(&inf->active); 382 list_del(&inf->active);
510 if (!list_empty(&inf->expiring)) 383 if (!list_empty(&inf->expiring))
511 list_del(&inf->expiring); 384 list_del(&inf->expiring);
512 if (!list_empty(&inf->rehash_list))
513 autofs4_free_rehash_entrys(inf);
514 spin_unlock(&sbi->lookup_lock); 385 spin_unlock(&sbi->lookup_lock);
515 } 386 }
516 387
@@ -543,7 +414,6 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
543 const unsigned char *str = name->name; 414 const unsigned char *str = name->name;
544 struct list_head *p, *head; 415 struct list_head *p, *head;
545 416
546restart:
547 spin_lock(&dcache_lock); 417 spin_lock(&dcache_lock);
548 spin_lock(&sbi->lookup_lock); 418 spin_lock(&sbi->lookup_lock);
549 head = &sbi->active_list; 419 head = &sbi->active_list;
@@ -561,19 +431,6 @@ restart:
561 if (atomic_read(&active->d_count) == 0) 431 if (atomic_read(&active->d_count) == 0)
562 goto next; 432 goto next;
563 433
564 if (active->d_inode && IS_DEADDIR(active->d_inode)) {
565 if (!list_empty(&ino->rehash_list)) {
566 dget(active);
567 spin_unlock(&active->d_lock);
568 spin_unlock(&sbi->lookup_lock);
569 spin_unlock(&dcache_lock);
570 autofs4_remove_rehash_entrys(ino);
571 dput(active);
572 goto restart;
573 }
574 goto next;
575 }
576
577 qstr = &active->d_name; 434 qstr = &active->d_name;
578 435
579 if (active->d_name.hash != hash) 436 if (active->d_name.hash != hash)
@@ -586,11 +443,13 @@ restart:
586 if (memcmp(qstr->name, str, len)) 443 if (memcmp(qstr->name, str, len))
587 goto next; 444 goto next;
588 445
589 dget(active); 446 if (d_unhashed(active)) {
590 spin_unlock(&active->d_lock); 447 dget(active);
591 spin_unlock(&sbi->lookup_lock); 448 spin_unlock(&active->d_lock);
592 spin_unlock(&dcache_lock); 449 spin_unlock(&sbi->lookup_lock);
593 return active; 450 spin_unlock(&dcache_lock);
451 return active;
452 }
594next: 453next:
595 spin_unlock(&active->d_lock); 454 spin_unlock(&active->d_lock);
596 } 455 }
@@ -639,11 +498,13 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
639 if (memcmp(qstr->name, str, len)) 498 if (memcmp(qstr->name, str, len))
640 goto next; 499 goto next;
641 500
642 dget(expiring); 501 if (d_unhashed(expiring)) {
643 spin_unlock(&expiring->d_lock); 502 dget(expiring);
644 spin_unlock(&sbi->lookup_lock); 503 spin_unlock(&expiring->d_lock);
645 spin_unlock(&dcache_lock); 504 spin_unlock(&sbi->lookup_lock);
646 return expiring; 505 spin_unlock(&dcache_lock);
506 return expiring;
507 }
647next: 508next:
648 spin_unlock(&expiring->d_lock); 509 spin_unlock(&expiring->d_lock);
649 } 510 }
@@ -653,48 +514,6 @@ next:
653 return NULL; 514 return NULL;
654} 515}
655 516
656static struct autofs_info *init_new_dentry(struct autofs_sb_info *sbi,
657 struct dentry *dentry, int oz_mode)
658{
659 struct autofs_info *ino;
660
661 /*
662 * Mark the dentry incomplete but don't hash it. We do this
663 * to serialize our inode creation operations (symlink and
664 * mkdir) which prevents deadlock during the callback to
665 * the daemon. Subsequent user space lookups for the same
666 * dentry are placed on the wait queue while the daemon
667 * itself is allowed passage unresticted so the create
668 * operation itself can then hash the dentry. Finally,
669 * we check for the hashed dentry and return the newly
670 * hashed dentry.
671 */
672 dentry->d_op = &autofs4_root_dentry_operations;
673
674 /*
675 * And we need to ensure that the same dentry is used for
676 * all following lookup calls until it is hashed so that
677 * the dentry flags are persistent throughout the request.
678 */
679 ino = autofs4_init_ino(NULL, sbi, 0555);
680 if (!ino)
681 return ERR_PTR(-ENOMEM);
682
683 dentry->d_fsdata = ino;
684 ino->dentry = dentry;
685
686 /*
687 * Only set the mount pending flag for new dentrys not created
688 * by the daemon.
689 */
690 if (!oz_mode)
691 ino->flags |= AUTOFS_INF_PENDING;
692
693 d_instantiate(dentry, NULL);
694
695 return ino;
696}
697
698/* Lookups in the root directory */ 517/* Lookups in the root directory */
699static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) 518static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
700{ 519{
@@ -702,7 +521,6 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
702 struct autofs_info *ino; 521 struct autofs_info *ino;
703 struct dentry *expiring, *active; 522 struct dentry *expiring, *active;
704 int oz_mode; 523 int oz_mode;
705 int status = 0;
706 524
707 DPRINTK("name = %.*s", 525 DPRINTK("name = %.*s",
708 dentry->d_name.len, dentry->d_name.name); 526 dentry->d_name.len, dentry->d_name.name);
@@ -717,26 +535,44 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
717 DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d", 535 DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d",
718 current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode); 536 current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode);
719 537
720 spin_lock(&sbi->fs_lock);
721 active = autofs4_lookup_active(dentry); 538 active = autofs4_lookup_active(dentry);
722 if (active) { 539 if (active) {
723 dentry = active; 540 dentry = active;
724 ino = autofs4_dentry_ino(dentry); 541 ino = autofs4_dentry_ino(dentry);
725 /* If this came from revalidate, rehash it */
726 autofs4_revalidate_rehash(dentry);
727 spin_unlock(&sbi->fs_lock);
728 } else { 542 } else {
729 spin_unlock(&sbi->fs_lock); 543 /*
730 ino = init_new_dentry(sbi, dentry, oz_mode); 544 * Mark the dentry incomplete but don't hash it. We do this
731 if (IS_ERR(ino)) 545 * to serialize our inode creation operations (symlink and
732 return (struct dentry *) ino; 546 * mkdir) which prevents deadlock during the callback to
733 } 547 * the daemon. Subsequent user space lookups for the same
548 * dentry are placed on the wait queue while the daemon
549 * itself is allowed passage unresticted so the create
550 * operation itself can then hash the dentry. Finally,
551 * we check for the hashed dentry and return the newly
552 * hashed dentry.
553 */
554 dentry->d_op = &autofs4_root_dentry_operations;
555
556 /*
557 * And we need to ensure that the same dentry is used for
558 * all following lookup calls until it is hashed so that
559 * the dentry flags are persistent throughout the request.
560 */
561 ino = autofs4_init_ino(NULL, sbi, 0555);
562 if (!ino)
563 return ERR_PTR(-ENOMEM);
734 564
735 autofs4_add_active(dentry); 565 dentry->d_fsdata = ino;
566 ino->dentry = dentry;
567
568 autofs4_add_active(dentry);
569
570 d_instantiate(dentry, NULL);
571 }
736 572
737 if (!oz_mode) { 573 if (!oz_mode) {
738 expiring = autofs4_lookup_expiring(dentry);
739 mutex_unlock(&dir->i_mutex); 574 mutex_unlock(&dir->i_mutex);
575 expiring = autofs4_lookup_expiring(dentry);
740 if (expiring) { 576 if (expiring) {
741 /* 577 /*
742 * If we are racing with expire the request might not 578 * If we are racing with expire the request might not
@@ -744,22 +580,23 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
744 * so it must have been successful, so just wait for it. 580 * so it must have been successful, so just wait for it.
745 */ 581 */
746 autofs4_expire_wait(expiring); 582 autofs4_expire_wait(expiring);
583 autofs4_del_expiring(expiring);
747 dput(expiring); 584 dput(expiring);
748 } 585 }
749 status = try_to_fill_dentry(dentry); 586
750 mutex_lock(&dir->i_mutex);
751 spin_lock(&sbi->fs_lock); 587 spin_lock(&sbi->fs_lock);
752 ino->flags &= ~AUTOFS_INF_PENDING; 588 ino->flags |= AUTOFS_INF_PENDING;
753 spin_unlock(&sbi->fs_lock); 589 spin_unlock(&sbi->fs_lock);
590 if (dentry->d_op && dentry->d_op->d_revalidate)
591 (dentry->d_op->d_revalidate)(dentry, nd);
592 mutex_lock(&dir->i_mutex);
754 } 593 }
755 594
756 autofs4_del_active(dentry);
757
758 /* 595 /*
759 * If we had a mount fail, check if we had to handle 596 * If we are still pending, check if we had to handle
760 * a signal. If so we can force a restart.. 597 * a signal. If so we can force a restart..
761 */ 598 */
762 if (status) { 599 if (ino->flags & AUTOFS_INF_PENDING) {
763 /* See if we were interrupted */ 600 /* See if we were interrupted */
764 if (signal_pending(current)) { 601 if (signal_pending(current)) {
765 sigset_t *sigset = &current->pending.signal; 602 sigset_t *sigset = &current->pending.signal;
@@ -771,46 +608,43 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
771 return ERR_PTR(-ERESTARTNOINTR); 608 return ERR_PTR(-ERESTARTNOINTR);
772 } 609 }
773 } 610 }
774 } 611 if (!oz_mode) {
775 612 spin_lock(&sbi->fs_lock);
776 /* 613 ino->flags &= ~AUTOFS_INF_PENDING;
777 * User space can (and has done in the past) remove and re-create 614 spin_unlock(&sbi->fs_lock);
778 * this directory during the callback. This can leave us with an
779 * unhashed dentry, but a successful mount! So we need to
780 * perform another cached lookup in case the dentry now exists.
781 */
782 if (!oz_mode && !have_submounts(dentry)) {
783 struct dentry *new;
784 new = d_lookup(dentry->d_parent, &dentry->d_name);
785 if (new) {
786 if (active)
787 dput(active);
788 return new;
789 } else {
790 if (!status)
791 status = -ENOENT;
792 } 615 }
793 } 616 }
794 617
795 /* 618 /*
796 * If we had a mount failure, return status to user space. 619 * If this dentry is unhashed, then we shouldn't honour this
797 * If the mount succeeded and we used a dentry from the active queue 620 * lookup. Returning ENOENT here doesn't do the right thing
798 * return it. 621 * for all system calls, but it should be OK for the operations
622 * we permit from an autofs.
799 */ 623 */
800 if (status) { 624 if (!oz_mode && d_unhashed(dentry)) {
801 dentry = ERR_PTR(status);
802 if (active)
803 dput(active);
804 return dentry;
805 } else {
806 /* 625 /*
807 * Valid successful mount, return active dentry or NULL 626 * A user space application can (and has done in the past)
808 * for a new dentry. 627 * remove and re-create this directory during the callback.
628 * This can leave us with an unhashed dentry, but a
629 * successful mount! So we need to perform another
630 * cached lookup in case the dentry now exists.
809 */ 631 */
632 struct dentry *parent = dentry->d_parent;
633 struct dentry *new = d_lookup(parent, &dentry->d_name);
634 if (new != NULL)
635 dentry = new;
636 else
637 dentry = ERR_PTR(-ENOENT);
638
810 if (active) 639 if (active)
811 return active; 640 dput(active);
641
642 return dentry;
812 } 643 }
813 644
645 if (active)
646 return active;
647
814 return NULL; 648 return NULL;
815} 649}
816 650
@@ -834,6 +668,8 @@ static int autofs4_dir_symlink(struct inode *dir,
834 if (!ino) 668 if (!ino)
835 return -ENOMEM; 669 return -ENOMEM;
836 670
671 autofs4_del_active(dentry);
672
837 ino->size = strlen(symname); 673 ino->size = strlen(symname);
838 cp = kmalloc(ino->size + 1, GFP_KERNEL); 674 cp = kmalloc(ino->size + 1, GFP_KERNEL);
839 if (!cp) { 675 if (!cp) {
@@ -910,6 +746,7 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
910 dir->i_mtime = CURRENT_TIME; 746 dir->i_mtime = CURRENT_TIME;
911 747
912 spin_lock(&dcache_lock); 748 spin_lock(&dcache_lock);
749 autofs4_add_expiring(dentry);
913 spin_lock(&dentry->d_lock); 750 spin_lock(&dentry->d_lock);
914 __d_drop(dentry); 751 __d_drop(dentry);
915 spin_unlock(&dentry->d_lock); 752 spin_unlock(&dentry->d_lock);
@@ -935,6 +772,7 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
935 spin_unlock(&dcache_lock); 772 spin_unlock(&dcache_lock);
936 return -ENOTEMPTY; 773 return -ENOTEMPTY;
937 } 774 }
775 autofs4_add_expiring(dentry);
938 spin_lock(&dentry->d_lock); 776 spin_lock(&dentry->d_lock);
939 __d_drop(dentry); 777 __d_drop(dentry);
940 spin_unlock(&dentry->d_lock); 778 spin_unlock(&dentry->d_lock);
@@ -972,6 +810,8 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
972 if (!ino) 810 if (!ino)
973 return -ENOMEM; 811 return -ENOMEM;
974 812
813 autofs4_del_active(dentry);
814
975 inode = autofs4_get_inode(dir->i_sb, ino); 815 inode = autofs4_get_inode(dir->i_sb, ino);
976 if (!inode) { 816 if (!inode) {
977 if (!dentry->d_fsdata) 817 if (!dentry->d_fsdata)
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 9d0809629967..6ed434ac037f 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -720,13 +720,15 @@ again:
720 inode->i_ino, orig_offset); 720 inode->i_ino, orig_offset);
721 BUG_ON(ret); 721 BUG_ON(ret);
722 } 722 }
723 fi = btrfs_item_ptr(leaf, path->slots[0],
724 struct btrfs_file_extent_item);
725 if (del_nr == 0) { 723 if (del_nr == 0) {
724 fi = btrfs_item_ptr(leaf, path->slots[0],
725 struct btrfs_file_extent_item);
726 btrfs_set_file_extent_type(leaf, fi, 726 btrfs_set_file_extent_type(leaf, fi,
727 BTRFS_FILE_EXTENT_REG); 727 BTRFS_FILE_EXTENT_REG);
728 btrfs_mark_buffer_dirty(leaf); 728 btrfs_mark_buffer_dirty(leaf);
729 } else { 729 } else {
730 fi = btrfs_item_ptr(leaf, del_slot - 1,
731 struct btrfs_file_extent_item);
730 btrfs_set_file_extent_type(leaf, fi, 732 btrfs_set_file_extent_type(leaf, fi,
731 BTRFS_FILE_EXTENT_REG); 733 BTRFS_FILE_EXTENT_REG);
732 btrfs_set_file_extent_num_bytes(leaf, fi, 734 btrfs_set_file_extent_num_bytes(leaf, fi,
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 14ac4806e291..eeb4986ea7db 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -348,7 +348,17 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
348 dir = dget_parent(object->dentry); 348 dir = dget_parent(object->dentry);
349 349
350 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); 350 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
351 ret = cachefiles_bury_object(cache, dir, object->dentry); 351
352 /* we need to check that our parent is _still_ our parent - it may have
353 * been renamed */
354 if (dir == object->dentry->d_parent) {
355 ret = cachefiles_bury_object(cache, dir, object->dentry);
356 } else {
357 /* it got moved, presumably by cachefilesd culling it, so it's
358 * no longer in the key path and we can ignore it */
359 mutex_unlock(&dir->d_inode->i_mutex);
360 ret = 0;
361 }
352 362
353 dput(dir); 363 dput(dir);
354 _leave(" = %d", ret); 364 _leave(" = %d", ret);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 7b2600b380d7..49503d2edc7e 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,7 @@
1Version 1.62
2------------
3Add sockopt=TCP_NODELAY mount option.
4
1Version 1.61 5Version 1.61
2------------ 6------------
3Fix append problem to Samba servers (files opened with O_APPEND could 7Fix append problem to Samba servers (files opened with O_APPEND could
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index ac2b24c192f8..78c1b86d55f6 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -113,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
113extern const struct export_operations cifs_export_ops; 113extern const struct export_operations cifs_export_ops;
114#endif /* EXPERIMENTAL */ 114#endif /* EXPERIMENTAL */
115 115
116#define CIFS_VERSION "1.61" 116#define CIFS_VERSION "1.62"
117#endif /* _CIFSFS_H */ 117#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 4b35f7ec0583..ed751bb657db 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -149,6 +149,7 @@ struct TCP_Server_Info {
149 bool svlocal:1; /* local server or remote */ 149 bool svlocal:1; /* local server or remote */
150 bool noblocksnd; /* use blocking sendmsg */ 150 bool noblocksnd; /* use blocking sendmsg */
151 bool noautotune; /* do not autotune send buf sizes */ 151 bool noautotune; /* do not autotune send buf sizes */
152 bool tcp_nodelay;
152 atomic_t inFlight; /* number of requests on the wire to server */ 153 atomic_t inFlight; /* number of requests on the wire to server */
153#ifdef CONFIG_CIFS_STATS2 154#ifdef CONFIG_CIFS_STATS2
154 atomic_t inSend; /* requests trying to send */ 155 atomic_t inSend; /* requests trying to send */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 3bbcaa716b3c..2e9e09ca0e30 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -98,7 +98,7 @@ struct smb_vol {
98 bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ 98 bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
99 unsigned int rsize; 99 unsigned int rsize;
100 unsigned int wsize; 100 unsigned int wsize;
101 unsigned int sockopt; 101 bool sockopt_tcp_nodelay:1;
102 unsigned short int port; 102 unsigned short int port;
103 char *prepath; 103 char *prepath;
104}; 104};
@@ -1142,9 +1142,11 @@ cifs_parse_mount_options(char *options, const char *devname,
1142 simple_strtoul(value, &value, 0); 1142 simple_strtoul(value, &value, 0);
1143 } 1143 }
1144 } else if (strnicmp(data, "sockopt", 5) == 0) { 1144 } else if (strnicmp(data, "sockopt", 5) == 0) {
1145 if (value && *value) { 1145 if (!value || !*value) {
1146 vol->sockopt = 1146 cERROR(1, ("no socket option specified"));
1147 simple_strtoul(value, &value, 0); 1147 continue;
1148 } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) {
1149 vol->sockopt_tcp_nodelay = 1;
1148 } 1150 }
1149 } else if (strnicmp(data, "netbiosname", 4) == 0) { 1151 } else if (strnicmp(data, "netbiosname", 4) == 0) {
1150 if (!value || !*value || (*value == ' ')) { 1152 if (!value || !*value || (*value == ' ')) {
@@ -1514,6 +1516,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
1514 1516
1515 tcp_ses->noblocksnd = volume_info->noblocksnd; 1517 tcp_ses->noblocksnd = volume_info->noblocksnd;
1516 tcp_ses->noautotune = volume_info->noautotune; 1518 tcp_ses->noautotune = volume_info->noautotune;
1519 tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
1517 atomic_set(&tcp_ses->inFlight, 0); 1520 atomic_set(&tcp_ses->inFlight, 0);
1518 init_waitqueue_head(&tcp_ses->response_q); 1521 init_waitqueue_head(&tcp_ses->response_q);
1519 init_waitqueue_head(&tcp_ses->request_q); 1522 init_waitqueue_head(&tcp_ses->request_q);
@@ -1764,6 +1767,7 @@ static int
1764ipv4_connect(struct TCP_Server_Info *server) 1767ipv4_connect(struct TCP_Server_Info *server)
1765{ 1768{
1766 int rc = 0; 1769 int rc = 0;
1770 int val;
1767 bool connected = false; 1771 bool connected = false;
1768 __be16 orig_port = 0; 1772 __be16 orig_port = 0;
1769 struct socket *socket = server->ssocket; 1773 struct socket *socket = server->ssocket;
@@ -1845,6 +1849,14 @@ ipv4_connect(struct TCP_Server_Info *server)
1845 socket->sk->sk_rcvbuf = 140 * 1024; 1849 socket->sk->sk_rcvbuf = 140 * 1024;
1846 } 1850 }
1847 1851
1852 if (server->tcp_nodelay) {
1853 val = 1;
1854 rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
1855 (char *)&val, sizeof(val));
1856 if (rc)
1857 cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
1858 }
1859
1848 cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx", 1860 cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
1849 socket->sk->sk_sndbuf, 1861 socket->sk->sk_sndbuf,
1850 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo)); 1862 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo));
@@ -1916,6 +1928,7 @@ static int
1916ipv6_connect(struct TCP_Server_Info *server) 1928ipv6_connect(struct TCP_Server_Info *server)
1917{ 1929{
1918 int rc = 0; 1930 int rc = 0;
1931 int val;
1919 bool connected = false; 1932 bool connected = false;
1920 __be16 orig_port = 0; 1933 __be16 orig_port = 0;
1921 struct socket *socket = server->ssocket; 1934 struct socket *socket = server->ssocket;
@@ -1987,6 +2000,15 @@ ipv6_connect(struct TCP_Server_Info *server)
1987 */ 2000 */
1988 socket->sk->sk_rcvtimeo = 7 * HZ; 2001 socket->sk->sk_rcvtimeo = 7 * HZ;
1989 socket->sk->sk_sndtimeo = 5 * HZ; 2002 socket->sk->sk_sndtimeo = 5 * HZ;
2003
2004 if (server->tcp_nodelay) {
2005 val = 1;
2006 rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
2007 (char *)&val, sizeof(val));
2008 if (rc)
2009 cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
2010 }
2011
1990 server->ssocket = socket; 2012 server->ssocket = socket;
1991 2013
1992 return rc; 2014 return rc;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 057e1dae12ab..3d8f8a96f5a3 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2289,9 +2289,9 @@ cifs_oplock_break(struct slow_work *work)
2289 if (inode && S_ISREG(inode->i_mode)) { 2289 if (inode && S_ISREG(inode->i_mode)) {
2290#ifdef CONFIG_CIFS_EXPERIMENTAL 2290#ifdef CONFIG_CIFS_EXPERIMENTAL
2291 if (cinode->clientCanCacheAll == 0) 2291 if (cinode->clientCanCacheAll == 0)
2292 break_lease(inode, FMODE_READ); 2292 break_lease(inode, O_RDONLY);
2293 else if (cinode->clientCanCacheRead == 0) 2293 else if (cinode->clientCanCacheRead == 0)
2294 break_lease(inode, FMODE_WRITE); 2294 break_lease(inode, O_WRONLY);
2295#endif 2295#endif
2296 rc = filemap_fdatawrite(inode->i_mapping); 2296 rc = filemap_fdatawrite(inode->i_mapping);
2297 if (cinode->clientCanCacheRead == 0) { 2297 if (cinode->clientCanCacheRead == 0) {
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index cf18ee765590..e3fda978f481 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1762,8 +1762,18 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
1762 CIFS_MOUNT_MAP_SPECIAL_CHR); 1762 CIFS_MOUNT_MAP_SPECIAL_CHR);
1763 } 1763 }
1764 1764
1765 if (!rc) 1765 if (!rc) {
1766 rc = inode_setattr(inode, attrs); 1766 rc = inode_setattr(inode, attrs);
1767
1768 /* force revalidate when any of these times are set since some
1769 of the fs types (eg ext3, fat) do not have fine enough
1770 time granularity to match protocol, and we do not have a
1771 a way (yet) to query the server fs's time granularity (and
1772 whether it rounds times down).
1773 */
1774 if (!rc && (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME)))
1775 cifsInode->time = 0;
1776 }
1767out: 1777out:
1768 kfree(args); 1778 kfree(args);
1769 kfree(full_path); 1779 kfree(full_path);
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index f84062f9a985..c343b14ba2d3 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -77,6 +77,11 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
77 77
78 cFYI(1, ("For %s", name->name)); 78 cFYI(1, ("For %s", name->name));
79 79
80 if (parent->d_op && parent->d_op->d_hash)
81 parent->d_op->d_hash(parent, name);
82 else
83 name->hash = full_name_hash(name->name, name->len);
84
80 dentry = d_lookup(parent, name); 85 dentry = d_lookup(parent, name);
81 if (dentry) { 86 if (dentry) {
82 /* FIXME: check for inode number changes? */ 87 /* FIXME: check for inode number changes? */
@@ -666,12 +671,11 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
666 min(len, max_len), nlt, 671 min(len, max_len), nlt,
667 cifs_sb->mnt_cifs_flags & 672 cifs_sb->mnt_cifs_flags &
668 CIFS_MOUNT_MAP_SPECIAL_CHR); 673 CIFS_MOUNT_MAP_SPECIAL_CHR);
674 pqst->len -= nls_nullsize(nlt);
669 } else { 675 } else {
670 pqst->name = filename; 676 pqst->name = filename;
671 pqst->len = len; 677 pqst->len = len;
672 } 678 }
673 pqst->hash = full_name_hash(pqst->name, pqst->len);
674/* cFYI(1, ("filldir on %s",pqst->name)); */
675 return rc; 679 return rc;
676} 680}
677 681
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7085a6275c4c..aaa9c1c5a5bd 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -223,9 +223,9 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
223 /* null user mount */ 223 /* null user mount */
224 *bcc_ptr = 0; 224 *bcc_ptr = 0;
225 *(bcc_ptr+1) = 0; 225 *(bcc_ptr+1) = 0;
226 } else { /* 300 should be long enough for any conceivable user name */ 226 } else {
227 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName, 227 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
228 300, nls_cp); 228 MAX_USERNAME_SIZE, nls_cp);
229 } 229 }
230 bcc_ptr += 2 * bytes_ret; 230 bcc_ptr += 2 * bytes_ret;
231 bcc_ptr += 2; /* account for null termination */ 231 bcc_ptr += 2; /* account for null termination */
@@ -246,11 +246,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
246 /* copy user */ 246 /* copy user */
247 if (ses->userName == NULL) { 247 if (ses->userName == NULL) {
248 /* BB what about null user mounts - check that we do this BB */ 248 /* BB what about null user mounts - check that we do this BB */
249 } else { /* 300 should be long enough for any conceivable user name */ 249 } else {
250 strncpy(bcc_ptr, ses->userName, 300); 250 strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE);
251 } 251 }
252 /* BB improve check for overflow */ 252 bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
253 bcc_ptr += strnlen(ses->userName, 300);
254 *bcc_ptr = 0; 253 *bcc_ptr = 0;
255 bcc_ptr++; /* account for null termination */ 254 bcc_ptr++; /* account for null termination */
256 255
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index c5c45de1a2ee..0ca9ec4a79c3 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -301,6 +301,12 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd,
301 u32 data; 301 u32 data;
302 void __user *dxferp; 302 void __user *dxferp;
303 int err; 303 int err;
304 int interface_id;
305
306 if (get_user(interface_id, &sgio32->interface_id))
307 return -EFAULT;
308 if (interface_id != 'S')
309 return sys_ioctl(fd, cmd, (unsigned long)sgio32);
304 310
305 if (get_user(iovec_count, &sgio32->iovec_count)) 311 if (get_user(iovec_count, &sgio32->iovec_count))
306 return -EFAULT; 312 return -EFAULT;
@@ -936,6 +942,7 @@ COMPATIBLE_IOCTL(TCSETSF)
936COMPATIBLE_IOCTL(TIOCLINUX) 942COMPATIBLE_IOCTL(TIOCLINUX)
937COMPATIBLE_IOCTL(TIOCSBRK) 943COMPATIBLE_IOCTL(TIOCSBRK)
938COMPATIBLE_IOCTL(TIOCCBRK) 944COMPATIBLE_IOCTL(TIOCCBRK)
945COMPATIBLE_IOCTL(TIOCGSID)
939COMPATIBLE_IOCTL(TIOCGICOUNT) 946COMPATIBLE_IOCTL(TIOCGICOUNT)
940/* Little t */ 947/* Little t */
941COMPATIBLE_IOCTL(TIOCGETD) 948COMPATIBLE_IOCTL(TIOCGETD)
@@ -1038,6 +1045,8 @@ COMPATIBLE_IOCTL(FIOQSIZE)
1038#ifdef CONFIG_BLOCK 1045#ifdef CONFIG_BLOCK
1039/* loop */ 1046/* loop */
1040IGNORE_IOCTL(LOOP_CLR_FD) 1047IGNORE_IOCTL(LOOP_CLR_FD)
1048/* md calls this on random blockdevs */
1049IGNORE_IOCTL(RAID_VERSION)
1041/* SG stuff */ 1050/* SG stuff */
1042COMPATIBLE_IOCTL(SG_SET_TIMEOUT) 1051COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
1043COMPATIBLE_IOCTL(SG_GET_TIMEOUT) 1052COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
diff --git a/fs/dcache.c b/fs/dcache.c
index 953173a293a9..f1358e5c3a59 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -257,6 +257,7 @@ kill_it:
257 if (dentry) 257 if (dentry)
258 goto repeat; 258 goto repeat;
259} 259}
260EXPORT_SYMBOL(dput);
260 261
261/** 262/**
262 * d_invalidate - invalidate a dentry 263 * d_invalidate - invalidate a dentry
@@ -314,6 +315,7 @@ int d_invalidate(struct dentry * dentry)
314 spin_unlock(&dcache_lock); 315 spin_unlock(&dcache_lock);
315 return 0; 316 return 0;
316} 317}
318EXPORT_SYMBOL(d_invalidate);
317 319
318/* This should be called _only_ with dcache_lock held */ 320/* This should be called _only_ with dcache_lock held */
319 321
@@ -328,6 +330,7 @@ struct dentry * dget_locked(struct dentry *dentry)
328{ 330{
329 return __dget_locked(dentry); 331 return __dget_locked(dentry);
330} 332}
333EXPORT_SYMBOL(dget_locked);
331 334
332/** 335/**
333 * d_find_alias - grab a hashed alias of inode 336 * d_find_alias - grab a hashed alias of inode
@@ -384,6 +387,7 @@ struct dentry * d_find_alias(struct inode *inode)
384 } 387 }
385 return de; 388 return de;
386} 389}
390EXPORT_SYMBOL(d_find_alias);
387 391
388/* 392/*
389 * Try to kill dentries associated with this inode. 393 * Try to kill dentries associated with this inode.
@@ -408,6 +412,7 @@ restart:
408 } 412 }
409 spin_unlock(&dcache_lock); 413 spin_unlock(&dcache_lock);
410} 414}
415EXPORT_SYMBOL(d_prune_aliases);
411 416
412/* 417/*
413 * Throw away a dentry - free the inode, dput the parent. This requires that 418 * Throw away a dentry - free the inode, dput the parent. This requires that
@@ -610,6 +615,7 @@ void shrink_dcache_sb(struct super_block * sb)
610{ 615{
611 __shrink_dcache_sb(sb, NULL, 0); 616 __shrink_dcache_sb(sb, NULL, 0);
612} 617}
618EXPORT_SYMBOL(shrink_dcache_sb);
613 619
614/* 620/*
615 * destroy a single subtree of dentries for unmount 621 * destroy a single subtree of dentries for unmount
@@ -792,6 +798,7 @@ positive:
792 spin_unlock(&dcache_lock); 798 spin_unlock(&dcache_lock);
793 return 1; 799 return 1;
794} 800}
801EXPORT_SYMBOL(have_submounts);
795 802
796/* 803/*
797 * Search the dentry child list for the specified parent, 804 * Search the dentry child list for the specified parent,
@@ -876,6 +883,7 @@ void shrink_dcache_parent(struct dentry * parent)
876 while ((found = select_parent(parent)) != 0) 883 while ((found = select_parent(parent)) != 0)
877 __shrink_dcache_sb(sb, &found, 0); 884 __shrink_dcache_sb(sb, &found, 0);
878} 885}
886EXPORT_SYMBOL(shrink_dcache_parent);
879 887
880/* 888/*
881 * Scan `nr' dentries and return the number which remain. 889 * Scan `nr' dentries and return the number which remain.
@@ -968,6 +976,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
968 976
969 return dentry; 977 return dentry;
970} 978}
979EXPORT_SYMBOL(d_alloc);
971 980
972struct dentry *d_alloc_name(struct dentry *parent, const char *name) 981struct dentry *d_alloc_name(struct dentry *parent, const char *name)
973{ 982{
@@ -1012,6 +1021,7 @@ void d_instantiate(struct dentry *entry, struct inode * inode)
1012 spin_unlock(&dcache_lock); 1021 spin_unlock(&dcache_lock);
1013 security_d_instantiate(entry, inode); 1022 security_d_instantiate(entry, inode);
1014} 1023}
1024EXPORT_SYMBOL(d_instantiate);
1015 1025
1016/** 1026/**
1017 * d_instantiate_unique - instantiate a non-aliased dentry 1027 * d_instantiate_unique - instantiate a non-aliased dentry
@@ -1108,6 +1118,7 @@ struct dentry * d_alloc_root(struct inode * root_inode)
1108 } 1118 }
1109 return res; 1119 return res;
1110} 1120}
1121EXPORT_SYMBOL(d_alloc_root);
1111 1122
1112static inline struct hlist_head *d_hash(struct dentry *parent, 1123static inline struct hlist_head *d_hash(struct dentry *parent,
1113 unsigned long hash) 1124 unsigned long hash)
@@ -1211,7 +1222,6 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1211 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); 1222 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1212 spin_unlock(&dcache_lock); 1223 spin_unlock(&dcache_lock);
1213 security_d_instantiate(new, inode); 1224 security_d_instantiate(new, inode);
1214 d_rehash(dentry);
1215 d_move(new, dentry); 1225 d_move(new, dentry);
1216 iput(inode); 1226 iput(inode);
1217 } else { 1227 } else {
@@ -1225,6 +1235,7 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1225 d_add(dentry, inode); 1235 d_add(dentry, inode);
1226 return new; 1236 return new;
1227} 1237}
1238EXPORT_SYMBOL(d_splice_alias);
1228 1239
1229/** 1240/**
1230 * d_add_ci - lookup or allocate new dentry with case-exact name 1241 * d_add_ci - lookup or allocate new dentry with case-exact name
@@ -1314,6 +1325,7 @@ err_out:
1314 iput(inode); 1325 iput(inode);
1315 return ERR_PTR(error); 1326 return ERR_PTR(error);
1316} 1327}
1328EXPORT_SYMBOL(d_add_ci);
1317 1329
1318/** 1330/**
1319 * d_lookup - search for a dentry 1331 * d_lookup - search for a dentry
@@ -1357,6 +1369,7 @@ struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
1357 } while (read_seqretry(&rename_lock, seq)); 1369 } while (read_seqretry(&rename_lock, seq));
1358 return dentry; 1370 return dentry;
1359} 1371}
1372EXPORT_SYMBOL(d_lookup);
1360 1373
1361struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) 1374struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
1362{ 1375{
@@ -1483,6 +1496,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent)
1483out: 1496out:
1484 return 0; 1497 return 0;
1485} 1498}
1499EXPORT_SYMBOL(d_validate);
1486 1500
1487/* 1501/*
1488 * When a file is deleted, we have two options: 1502 * When a file is deleted, we have two options:
@@ -1528,6 +1542,7 @@ void d_delete(struct dentry * dentry)
1528 1542
1529 fsnotify_nameremove(dentry, isdir); 1543 fsnotify_nameremove(dentry, isdir);
1530} 1544}
1545EXPORT_SYMBOL(d_delete);
1531 1546
1532static void __d_rehash(struct dentry * entry, struct hlist_head *list) 1547static void __d_rehash(struct dentry * entry, struct hlist_head *list)
1533{ 1548{
@@ -1556,6 +1571,7 @@ void d_rehash(struct dentry * entry)
1556 spin_unlock(&entry->d_lock); 1571 spin_unlock(&entry->d_lock);
1557 spin_unlock(&dcache_lock); 1572 spin_unlock(&dcache_lock);
1558} 1573}
1574EXPORT_SYMBOL(d_rehash);
1559 1575
1560/* 1576/*
1561 * When switching names, the actual string doesn't strictly have to 1577 * When switching names, the actual string doesn't strictly have to
@@ -1702,6 +1718,7 @@ void d_move(struct dentry * dentry, struct dentry * target)
1702 d_move_locked(dentry, target); 1718 d_move_locked(dentry, target);
1703 spin_unlock(&dcache_lock); 1719 spin_unlock(&dcache_lock);
1704} 1720}
1721EXPORT_SYMBOL(d_move);
1705 1722
1706/** 1723/**
1707 * d_ancestor - search for an ancestor 1724 * d_ancestor - search for an ancestor
@@ -1868,6 +1885,7 @@ shouldnt_be_hashed:
1868 spin_unlock(&dcache_lock); 1885 spin_unlock(&dcache_lock);
1869 BUG(); 1886 BUG();
1870} 1887}
1888EXPORT_SYMBOL_GPL(d_materialise_unique);
1871 1889
1872static int prepend(char **buffer, int *buflen, const char *str, int namelen) 1890static int prepend(char **buffer, int *buflen, const char *str, int namelen)
1873{ 1891{
@@ -2005,6 +2023,7 @@ char *d_path(const struct path *path, char *buf, int buflen)
2005 path_put(&root); 2023 path_put(&root);
2006 return res; 2024 return res;
2007} 2025}
2026EXPORT_SYMBOL(d_path);
2008 2027
2009/* 2028/*
2010 * Helper function for dentry_operations.d_dname() members 2029 * Helper function for dentry_operations.d_dname() members
@@ -2171,6 +2190,30 @@ int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
2171 return result; 2190 return result;
2172} 2191}
2173 2192
2193int path_is_under(struct path *path1, struct path *path2)
2194{
2195 struct vfsmount *mnt = path1->mnt;
2196 struct dentry *dentry = path1->dentry;
2197 int res;
2198 spin_lock(&vfsmount_lock);
2199 if (mnt != path2->mnt) {
2200 for (;;) {
2201 if (mnt->mnt_parent == mnt) {
2202 spin_unlock(&vfsmount_lock);
2203 return 0;
2204 }
2205 if (mnt->mnt_parent == path2->mnt)
2206 break;
2207 mnt = mnt->mnt_parent;
2208 }
2209 dentry = mnt->mnt_mountpoint;
2210 }
2211 res = is_subdir(dentry, path2->dentry);
2212 spin_unlock(&vfsmount_lock);
2213 return res;
2214}
2215EXPORT_SYMBOL(path_is_under);
2216
2174void d_genocide(struct dentry *root) 2217void d_genocide(struct dentry *root)
2175{ 2218{
2176 struct dentry *this_parent = root; 2219 struct dentry *this_parent = root;
@@ -2228,6 +2271,7 @@ ino_t find_inode_number(struct dentry *dir, struct qstr *name)
2228 } 2271 }
2229 return ino; 2272 return ino;
2230} 2273}
2274EXPORT_SYMBOL(find_inode_number);
2231 2275
2232static __initdata unsigned long dhash_entries; 2276static __initdata unsigned long dhash_entries;
2233static int __init set_dhash_entries(char *str) 2277static int __init set_dhash_entries(char *str)
@@ -2297,6 +2341,7 @@ static void __init dcache_init(void)
2297 2341
2298/* SLAB cache for __getname() consumers */ 2342/* SLAB cache for __getname() consumers */
2299struct kmem_cache *names_cachep __read_mostly; 2343struct kmem_cache *names_cachep __read_mostly;
2344EXPORT_SYMBOL(names_cachep);
2300 2345
2301EXPORT_SYMBOL(d_genocide); 2346EXPORT_SYMBOL(d_genocide);
2302 2347
@@ -2326,26 +2371,3 @@ void __init vfs_caches_init(unsigned long mempages)
2326 bdev_cache_init(); 2371 bdev_cache_init();
2327 chrdev_init(); 2372 chrdev_init();
2328} 2373}
2329
2330EXPORT_SYMBOL(d_alloc);
2331EXPORT_SYMBOL(d_alloc_root);
2332EXPORT_SYMBOL(d_delete);
2333EXPORT_SYMBOL(d_find_alias);
2334EXPORT_SYMBOL(d_instantiate);
2335EXPORT_SYMBOL(d_invalidate);
2336EXPORT_SYMBOL(d_lookup);
2337EXPORT_SYMBOL(d_move);
2338EXPORT_SYMBOL_GPL(d_materialise_unique);
2339EXPORT_SYMBOL(d_path);
2340EXPORT_SYMBOL(d_prune_aliases);
2341EXPORT_SYMBOL(d_rehash);
2342EXPORT_SYMBOL(d_splice_alias);
2343EXPORT_SYMBOL(d_add_ci);
2344EXPORT_SYMBOL(d_validate);
2345EXPORT_SYMBOL(dget_locked);
2346EXPORT_SYMBOL(dput);
2347EXPORT_SYMBOL(find_inode_number);
2348EXPORT_SYMBOL(have_submounts);
2349EXPORT_SYMBOL(names_cachep);
2350EXPORT_SYMBOL(shrink_dcache_parent);
2351EXPORT_SYMBOL(shrink_dcache_sb);
diff --git a/fs/exec.c b/fs/exec.c
index 0790a107ff7e..cce6bbdbdbb1 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -571,6 +571,9 @@ int setup_arg_pages(struct linux_binprm *bprm,
571 struct vm_area_struct *prev = NULL; 571 struct vm_area_struct *prev = NULL;
572 unsigned long vm_flags; 572 unsigned long vm_flags;
573 unsigned long stack_base; 573 unsigned long stack_base;
574 unsigned long stack_size;
575 unsigned long stack_expand;
576 unsigned long rlim_stack;
574 577
575#ifdef CONFIG_STACK_GROWSUP 578#ifdef CONFIG_STACK_GROWSUP
576 /* Limit stack size to 1GB */ 579 /* Limit stack size to 1GB */
@@ -627,10 +630,23 @@ int setup_arg_pages(struct linux_binprm *bprm,
627 goto out_unlock; 630 goto out_unlock;
628 } 631 }
629 632
633 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
634 stack_size = vma->vm_end - vma->vm_start;
635 /*
636 * Align this down to a page boundary as expand_stack
637 * will align it up.
638 */
639 rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
630#ifdef CONFIG_STACK_GROWSUP 640#ifdef CONFIG_STACK_GROWSUP
631 stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE; 641 if (stack_size + stack_expand > rlim_stack)
642 stack_base = vma->vm_start + rlim_stack;
643 else
644 stack_base = vma->vm_end + stack_expand;
632#else 645#else
633 stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE; 646 if (stack_size + stack_expand > rlim_stack)
647 stack_base = vma->vm_end - rlim_stack;
648 else
649 stack_base = vma->vm_start - stack_expand;
634#endif 650#endif
635 ret = expand_stack(vma, stack_base); 651 ret = expand_stack(vma, stack_base);
636 if (ret) 652 if (ret)
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 9630583cef28..56eee3d796c2 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -116,11 +116,9 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
116 * devices or filesystem images. 116 * devices or filesystem images.
117 */ 117 */
118 memset(buf, 0, sizeof(buf)); 118 memset(buf, 0, sizeof(buf));
119 path.mnt = mnt->mnt_parent; 119 path.mnt = mnt;
120 path.dentry = mnt->mnt_mountpoint; 120 path.dentry = mnt->mnt_root;
121 path_get(&path);
122 cp = d_path(&path, buf, sizeof(buf)); 121 cp = d_path(&path, buf, sizeof(buf));
123 path_put(&path);
124 if (!IS_ERR(cp)) { 122 if (!IS_ERR(cp)) {
125 memcpy(sbi->s_es->s_last_mounted, cp, 123 memcpy(sbi->s_es->s_last_mounted, cp,
126 sizeof(sbi->s_es->s_last_mounted)); 124 sizeof(sbi->s_es->s_last_mounted));
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 6d47379e794b..583e823307ae 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -541,7 +541,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
541 *ptr++ = cpu_to_be64(bn++); 541 *ptr++ = cpu_to_be64(bn++);
542 break; 542 break;
543 } 543 }
544 } while (state != ALLOC_DATA); 544 } while ((state != ALLOC_DATA) || !dblock);
545 545
546 ip->i_height = height; 546 ip->i_height = height;
547 gfs2_add_inode_blocks(&ip->i_inode, alloced); 547 gfs2_add_inode_blocks(&ip->i_inode, alloced);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 8a102f731003..a86ed6381566 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -725,7 +725,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
725 goto fail; 725 goto fail;
726 } 726 }
727 727
728 error = -EINVAL; 728 error = -EUSERS;
729 if (!gfs2_jindex_size(sdp)) { 729 if (!gfs2_jindex_size(sdp)) {
730 fs_err(sdp, "no journals!\n"); 730 fs_err(sdp, "no journals!\n");
731 goto fail_jindex; 731 goto fail_jindex;
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 84350e1be66d..4e64352d49de 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -976,122 +976,62 @@ out:
976} 976}
977 977
978/** 978/**
979 * gfs2_readlinki - return the contents of a symlink 979 * gfs2_follow_link - Follow a symbolic link
980 * @ip: the symlink's inode 980 * @dentry: The dentry of the link
981 * @buf: a pointer to the buffer to be filled 981 * @nd: Data that we pass to vfs_follow_link()
982 * @len: a pointer to the length of @buf
983 * 982 *
984 * If @buf is too small, a piece of memory is kmalloc()ed and needs 983 * This can handle symlinks of any size.
985 * to be freed by the caller.
986 * 984 *
987 * Returns: errno 985 * Returns: 0 on success or error code
988 */ 986 */
989 987
990static int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len) 988static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
991{ 989{
990 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
992 struct gfs2_holder i_gh; 991 struct gfs2_holder i_gh;
993 struct buffer_head *dibh; 992 struct buffer_head *dibh;
994 unsigned int x; 993 unsigned int x;
994 char *buf;
995 int error; 995 int error;
996 996
997 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); 997 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
998 error = gfs2_glock_nq(&i_gh); 998 error = gfs2_glock_nq(&i_gh);
999 if (error) { 999 if (error) {
1000 gfs2_holder_uninit(&i_gh); 1000 gfs2_holder_uninit(&i_gh);
1001 return error; 1001 nd_set_link(nd, ERR_PTR(error));
1002 return NULL;
1002 } 1003 }
1003 1004
1004 if (!ip->i_disksize) { 1005 if (!ip->i_disksize) {
1005 gfs2_consist_inode(ip); 1006 gfs2_consist_inode(ip);
1006 error = -EIO; 1007 buf = ERR_PTR(-EIO);
1007 goto out; 1008 goto out;
1008 } 1009 }
1009 1010
1010 error = gfs2_meta_inode_buffer(ip, &dibh); 1011 error = gfs2_meta_inode_buffer(ip, &dibh);
1011 if (error) 1012 if (error) {
1013 buf = ERR_PTR(error);
1012 goto out; 1014 goto out;
1013
1014 x = ip->i_disksize + 1;
1015 if (x > *len) {
1016 *buf = kmalloc(x, GFP_NOFS);
1017 if (!*buf) {
1018 error = -ENOMEM;
1019 goto out_brelse;
1020 }
1021 } 1015 }
1022 1016
1023 memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x); 1017 x = ip->i_disksize + 1;
1024 *len = x; 1018 buf = kmalloc(x, GFP_NOFS);
1025 1019 if (!buf)
1026out_brelse: 1020 buf = ERR_PTR(-ENOMEM);
1021 else
1022 memcpy(buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
1027 brelse(dibh); 1023 brelse(dibh);
1028out: 1024out:
1029 gfs2_glock_dq_uninit(&i_gh); 1025 gfs2_glock_dq_uninit(&i_gh);
1030 return error; 1026 nd_set_link(nd, buf);
1031} 1027 return NULL;
1032
1033/**
1034 * gfs2_readlink - Read the value of a symlink
1035 * @dentry: the symlink
1036 * @buf: the buffer to read the symlink data into
1037 * @size: the size of the buffer
1038 *
1039 * Returns: errno
1040 */
1041
1042static int gfs2_readlink(struct dentry *dentry, char __user *user_buf,
1043 int user_size)
1044{
1045 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
1046 char array[GFS2_FAST_NAME_SIZE], *buf = array;
1047 unsigned int len = GFS2_FAST_NAME_SIZE;
1048 int error;
1049
1050 error = gfs2_readlinki(ip, &buf, &len);
1051 if (error)
1052 return error;
1053
1054 if (user_size > len - 1)
1055 user_size = len - 1;
1056
1057 if (copy_to_user(user_buf, buf, user_size))
1058 error = -EFAULT;
1059 else
1060 error = user_size;
1061
1062 if (buf != array)
1063 kfree(buf);
1064
1065 return error;
1066} 1028}
1067 1029
1068/** 1030static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
1069 * gfs2_follow_link - Follow a symbolic link
1070 * @dentry: The dentry of the link
1071 * @nd: Data that we pass to vfs_follow_link()
1072 *
1073 * This can handle symlinks of any size. It is optimised for symlinks
1074 * under GFS2_FAST_NAME_SIZE.
1075 *
1076 * Returns: 0 on success or error code
1077 */
1078
1079static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
1080{ 1031{
1081 struct gfs2_inode *ip = GFS2_I(dentry->d_inode); 1032 char *s = nd_get_link(nd);
1082 char array[GFS2_FAST_NAME_SIZE], *buf = array; 1033 if (!IS_ERR(s))
1083 unsigned int len = GFS2_FAST_NAME_SIZE; 1034 kfree(s);
1084 int error;
1085
1086 error = gfs2_readlinki(ip, &buf, &len);
1087 if (!error) {
1088 error = vfs_follow_link(nd, buf);
1089 if (buf != array)
1090 kfree(buf);
1091 } else
1092 path_put(&nd->path);
1093
1094 return ERR_PTR(error);
1095} 1035}
1096 1036
1097/** 1037/**
@@ -1426,8 +1366,9 @@ const struct inode_operations gfs2_dir_iops = {
1426}; 1366};
1427 1367
1428const struct inode_operations gfs2_symlink_iops = { 1368const struct inode_operations gfs2_symlink_iops = {
1429 .readlink = gfs2_readlink, 1369 .readlink = generic_readlink,
1430 .follow_link = gfs2_follow_link, 1370 .follow_link = gfs2_follow_link,
1371 .put_link = gfs2_put_link,
1431 .permission = gfs2_permission, 1372 .permission = gfs2_permission,
1432 .setattr = gfs2_setattr, 1373 .setattr = gfs2_setattr,
1433 .getattr = gfs2_getattr, 1374 .getattr = gfs2_getattr,
diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c
index 1aa88c4e0964..6a2f04bf3df0 100644
--- a/fs/hpfs/anode.c
+++ b/fs/hpfs/anode.c
@@ -353,7 +353,7 @@ int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
353} 353}
354 354
355int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos, 355int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos,
356 unsigned len, char *buf) 356 unsigned len, const char *buf)
357{ 357{
358 struct buffer_head *bh; 358 struct buffer_head *bh;
359 char *data; 359 char *data;
diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c
index 940d6d150bee..67d9d36b3d5f 100644
--- a/fs/hpfs/dentry.c
+++ b/fs/hpfs/dentry.c
@@ -20,8 +20,8 @@ static int hpfs_hash_dentry(struct dentry *dentry, struct qstr *qstr)
20 20
21 if (l == 1) if (qstr->name[0]=='.') goto x; 21 if (l == 1) if (qstr->name[0]=='.') goto x;
22 if (l == 2) if (qstr->name[0]=='.' || qstr->name[1]=='.') goto x; 22 if (l == 2) if (qstr->name[0]=='.' || qstr->name[1]=='.') goto x;
23 hpfs_adjust_length((char *)qstr->name, &l); 23 hpfs_adjust_length(qstr->name, &l);
24 /*if (hpfs_chk_name((char *)qstr->name,&l))*/ 24 /*if (hpfs_chk_name(qstr->name,&l))*/
25 /*return -ENAMETOOLONG;*/ 25 /*return -ENAMETOOLONG;*/
26 /*return -ENOENT;*/ 26 /*return -ENOENT;*/
27 x: 27 x:
@@ -38,14 +38,16 @@ static int hpfs_compare_dentry(struct dentry *dentry, struct qstr *a, struct qst
38{ 38{
39 unsigned al=a->len; 39 unsigned al=a->len;
40 unsigned bl=b->len; 40 unsigned bl=b->len;
41 hpfs_adjust_length((char *)a->name, &al); 41 hpfs_adjust_length(a->name, &al);
42 /*hpfs_adjust_length((char *)b->name, &bl);*/ 42 /*hpfs_adjust_length(b->name, &bl);*/
43 /* 'a' is the qstr of an already existing dentry, so the name 43 /* 'a' is the qstr of an already existing dentry, so the name
44 * must be valid. 'b' must be validated first. 44 * must be valid. 'b' must be validated first.
45 */ 45 */
46 46
47 if (hpfs_chk_name((char *)b->name, &bl)) return 1; 47 if (hpfs_chk_name(b->name, &bl))
48 if (hpfs_compare_names(dentry->d_sb, (char *)a->name, al, (char *)b->name, bl, 0)) return 1; 48 return 1;
49 if (hpfs_compare_names(dentry->d_sb, a->name, al, b->name, bl, 0))
50 return 1;
49 return 0; 51 return 0;
50} 52}
51 53
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 8865c94f55f6..26e3964a4b8c 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -59,7 +59,7 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
59 struct hpfs_dirent *de; 59 struct hpfs_dirent *de;
60 int lc; 60 int lc;
61 long old_pos; 61 long old_pos;
62 char *tempname; 62 unsigned char *tempname;
63 int c1, c2 = 0; 63 int c1, c2 = 0;
64 int ret = 0; 64 int ret = 0;
65 65
@@ -158,11 +158,11 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
158 tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3); 158 tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3);
159 if (filldir(dirent, tempname, de->namelen, old_pos, de->fnode, DT_UNKNOWN) < 0) { 159 if (filldir(dirent, tempname, de->namelen, old_pos, de->fnode, DT_UNKNOWN) < 0) {
160 filp->f_pos = old_pos; 160 filp->f_pos = old_pos;
161 if (tempname != (char *)de->name) kfree(tempname); 161 if (tempname != de->name) kfree(tempname);
162 hpfs_brelse4(&qbh); 162 hpfs_brelse4(&qbh);
163 goto out; 163 goto out;
164 } 164 }
165 if (tempname != (char *)de->name) kfree(tempname); 165 if (tempname != de->name) kfree(tempname);
166 hpfs_brelse4(&qbh); 166 hpfs_brelse4(&qbh);
167 } 167 }
168out: 168out:
@@ -187,7 +187,7 @@ out:
187 187
188struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) 188struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
189{ 189{
190 const char *name = dentry->d_name.name; 190 const unsigned char *name = dentry->d_name.name;
191 unsigned len = dentry->d_name.len; 191 unsigned len = dentry->d_name.len;
192 struct quad_buffer_head qbh; 192 struct quad_buffer_head qbh;
193 struct hpfs_dirent *de; 193 struct hpfs_dirent *de;
@@ -197,7 +197,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
197 struct hpfs_inode_info *hpfs_result; 197 struct hpfs_inode_info *hpfs_result;
198 198
199 lock_kernel(); 199 lock_kernel();
200 if ((err = hpfs_chk_name((char *)name, &len))) { 200 if ((err = hpfs_chk_name(name, &len))) {
201 if (err == -ENAMETOOLONG) { 201 if (err == -ENAMETOOLONG) {
202 unlock_kernel(); 202 unlock_kernel();
203 return ERR_PTR(-ENAMETOOLONG); 203 return ERR_PTR(-ENAMETOOLONG);
@@ -209,7 +209,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
209 * '.' and '..' will never be passed here. 209 * '.' and '..' will never be passed here.
210 */ 210 */
211 211
212 de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *) name, len, NULL, &qbh); 212 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, NULL, &qbh);
213 213
214 /* 214 /*
215 * This is not really a bailout, just means file not found. 215 * This is not really a bailout, just means file not found.
@@ -250,7 +250,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
250 hpfs_result = hpfs_i(result); 250 hpfs_result = hpfs_i(result);
251 if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino; 251 if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino;
252 252
253 hpfs_decide_conv(result, (char *)name, len); 253 hpfs_decide_conv(result, name, len);
254 254
255 if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) { 255 if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) {
256 hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures"); 256 hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures");
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c
index fe83c2b7d2d8..9b2ffadfc8c4 100644
--- a/fs/hpfs/dnode.c
+++ b/fs/hpfs/dnode.c
@@ -158,7 +158,8 @@ static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno
158 158
159/* Add an entry to dnode and don't care if it grows over 2048 bytes */ 159/* Add an entry to dnode and don't care if it grows over 2048 bytes */
160 160
161struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d, unsigned char *name, 161struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d,
162 const unsigned char *name,
162 unsigned namelen, secno down_ptr) 163 unsigned namelen, secno down_ptr)
163{ 164{
164 struct hpfs_dirent *de; 165 struct hpfs_dirent *de;
@@ -223,7 +224,7 @@ static void fix_up_ptrs(struct super_block *s, struct dnode *d)
223/* Add an entry to dnode and do dnode splitting if required */ 224/* Add an entry to dnode and do dnode splitting if required */
224 225
225static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno, 226static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
226 unsigned char *name, unsigned namelen, 227 const unsigned char *name, unsigned namelen,
227 struct hpfs_dirent *new_de, dnode_secno down_ptr) 228 struct hpfs_dirent *new_de, dnode_secno down_ptr)
228{ 229{
229 struct quad_buffer_head qbh, qbh1, qbh2; 230 struct quad_buffer_head qbh, qbh1, qbh2;
@@ -231,7 +232,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
231 dnode_secno adno, rdno; 232 dnode_secno adno, rdno;
232 struct hpfs_dirent *de; 233 struct hpfs_dirent *de;
233 struct hpfs_dirent nde; 234 struct hpfs_dirent nde;
234 char *nname; 235 unsigned char *nname;
235 int h; 236 int h;
236 int pos; 237 int pos;
237 struct buffer_head *bh; 238 struct buffer_head *bh;
@@ -305,7 +306,9 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
305 pos++; 306 pos++;
306 } 307 }
307 copy_de(new_de = &nde, de); 308 copy_de(new_de = &nde, de);
308 memcpy(name = nname, de->name, namelen = de->namelen); 309 memcpy(nname, de->name, de->namelen);
310 name = nname;
311 namelen = de->namelen;
309 for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, 4); 312 for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | pos, 4);
310 down_ptr = adno; 313 down_ptr = adno;
311 set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0); 314 set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0);
@@ -368,7 +371,8 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
368 * I hope, now it's finally bug-free. 371 * I hope, now it's finally bug-free.
369 */ 372 */
370 373
371int hpfs_add_dirent(struct inode *i, unsigned char *name, unsigned namelen, 374int hpfs_add_dirent(struct inode *i,
375 const unsigned char *name, unsigned namelen,
372 struct hpfs_dirent *new_de, int cdepth) 376 struct hpfs_dirent *new_de, int cdepth)
373{ 377{
374 struct hpfs_inode_info *hpfs_inode = hpfs_i(i); 378 struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
@@ -897,7 +901,8 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp,
897 901
898/* Find a dirent in tree */ 902/* Find a dirent in tree */
899 903
900struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno, char *name, unsigned len, 904struct hpfs_dirent *map_dirent(struct inode *inode, dnode_secno dno,
905 const unsigned char *name, unsigned len,
901 dnode_secno *dd, struct quad_buffer_head *qbh) 906 dnode_secno *dd, struct quad_buffer_head *qbh)
902{ 907{
903 struct dnode *dnode; 908 struct dnode *dnode;
@@ -988,8 +993,8 @@ void hpfs_remove_dtree(struct super_block *s, dnode_secno dno)
988struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno, 993struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
989 struct fnode *f, struct quad_buffer_head *qbh) 994 struct fnode *f, struct quad_buffer_head *qbh)
990{ 995{
991 char *name1; 996 unsigned char *name1;
992 char *name2; 997 unsigned char *name2;
993 int name1len, name2len; 998 int name1len, name2len;
994 struct dnode *d; 999 struct dnode *d;
995 dnode_secno dno, downd; 1000 dnode_secno dno, downd;
diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c
index 547a8384571f..45e53d972b42 100644
--- a/fs/hpfs/ea.c
+++ b/fs/hpfs/ea.c
@@ -62,8 +62,8 @@ static char *get_indirect_ea(struct super_block *s, int ano, secno a, int size)
62 return ret; 62 return ret;
63} 63}
64 64
65static void set_indirect_ea(struct super_block *s, int ano, secno a, char *data, 65static void set_indirect_ea(struct super_block *s, int ano, secno a,
66 int size) 66 const char *data, int size)
67{ 67{
68 hpfs_ea_write(s, a, ano, 0, size, data); 68 hpfs_ea_write(s, a, ano, 0, size, data);
69} 69}
@@ -186,7 +186,8 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
186 * This driver can't change sizes of eas ('cause I just don't need it). 186 * This driver can't change sizes of eas ('cause I just don't need it).
187 */ 187 */
188 188
189void hpfs_set_ea(struct inode *inode, struct fnode *fnode, char *key, char *data, int size) 189void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
190 const char *data, int size)
190{ 191{
191 fnode_secno fno = inode->i_ino; 192 fnode_secno fno = inode->i_ino;
192 struct super_block *s = inode->i_sb; 193 struct super_block *s = inode->i_sb;
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 701ca54c0867..97bf738cd5d6 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -215,7 +215,7 @@ secno hpfs_bplus_lookup(struct super_block *, struct inode *, struct bplus_heade
215secno hpfs_add_sector_to_btree(struct super_block *, secno, int, unsigned); 215secno hpfs_add_sector_to_btree(struct super_block *, secno, int, unsigned);
216void hpfs_remove_btree(struct super_block *, struct bplus_header *); 216void hpfs_remove_btree(struct super_block *, struct bplus_header *);
217int hpfs_ea_read(struct super_block *, secno, int, unsigned, unsigned, char *); 217int hpfs_ea_read(struct super_block *, secno, int, unsigned, unsigned, char *);
218int hpfs_ea_write(struct super_block *, secno, int, unsigned, unsigned, char *); 218int hpfs_ea_write(struct super_block *, secno, int, unsigned, unsigned, const char *);
219void hpfs_ea_remove(struct super_block *, secno, int, unsigned); 219void hpfs_ea_remove(struct super_block *, secno, int, unsigned);
220void hpfs_truncate_btree(struct super_block *, secno, int, unsigned); 220void hpfs_truncate_btree(struct super_block *, secno, int, unsigned);
221void hpfs_remove_fnode(struct super_block *, fnode_secno fno); 221void hpfs_remove_fnode(struct super_block *, fnode_secno fno);
@@ -244,13 +244,17 @@ extern const struct file_operations hpfs_dir_ops;
244 244
245void hpfs_add_pos(struct inode *, loff_t *); 245void hpfs_add_pos(struct inode *, loff_t *);
246void hpfs_del_pos(struct inode *, loff_t *); 246void hpfs_del_pos(struct inode *, loff_t *);
247struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *, unsigned char *, unsigned, secno); 247struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *,
248int hpfs_add_dirent(struct inode *, unsigned char *, unsigned, struct hpfs_dirent *, int); 248 const unsigned char *, unsigned, secno);
249int hpfs_add_dirent(struct inode *, const unsigned char *, unsigned,
250 struct hpfs_dirent *, int);
249int hpfs_remove_dirent(struct inode *, dnode_secno, struct hpfs_dirent *, struct quad_buffer_head *, int); 251int hpfs_remove_dirent(struct inode *, dnode_secno, struct hpfs_dirent *, struct quad_buffer_head *, int);
250void hpfs_count_dnodes(struct super_block *, dnode_secno, int *, int *, int *); 252void hpfs_count_dnodes(struct super_block *, dnode_secno, int *, int *, int *);
251dnode_secno hpfs_de_as_down_as_possible(struct super_block *, dnode_secno dno); 253dnode_secno hpfs_de_as_down_as_possible(struct super_block *, dnode_secno dno);
252struct hpfs_dirent *map_pos_dirent(struct inode *, loff_t *, struct quad_buffer_head *); 254struct hpfs_dirent *map_pos_dirent(struct inode *, loff_t *, struct quad_buffer_head *);
253struct hpfs_dirent *map_dirent(struct inode *, dnode_secno, char *, unsigned, dnode_secno *, struct quad_buffer_head *); 255struct hpfs_dirent *map_dirent(struct inode *, dnode_secno,
256 const unsigned char *, unsigned, dnode_secno *,
257 struct quad_buffer_head *);
254void hpfs_remove_dtree(struct super_block *, dnode_secno); 258void hpfs_remove_dtree(struct super_block *, dnode_secno);
255struct hpfs_dirent *map_fnode_dirent(struct super_block *, fnode_secno, struct fnode *, struct quad_buffer_head *); 259struct hpfs_dirent *map_fnode_dirent(struct super_block *, fnode_secno, struct fnode *, struct quad_buffer_head *);
256 260
@@ -259,7 +263,8 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *, fnode_secno, struct f
259void hpfs_ea_ext_remove(struct super_block *, secno, int, unsigned); 263void hpfs_ea_ext_remove(struct super_block *, secno, int, unsigned);
260int hpfs_read_ea(struct super_block *, struct fnode *, char *, char *, int); 264int hpfs_read_ea(struct super_block *, struct fnode *, char *, char *, int);
261char *hpfs_get_ea(struct super_block *, struct fnode *, char *, int *); 265char *hpfs_get_ea(struct super_block *, struct fnode *, char *, int *);
262void hpfs_set_ea(struct inode *, struct fnode *, char *, char *, int); 266void hpfs_set_ea(struct inode *, struct fnode *, const char *,
267 const char *, int);
263 268
264/* file.c */ 269/* file.c */
265 270
@@ -282,7 +287,7 @@ void hpfs_delete_inode(struct inode *);
282 287
283unsigned *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *); 288unsigned *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *);
284unsigned *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *); 289unsigned *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *);
285char *hpfs_load_code_page(struct super_block *, secno); 290unsigned char *hpfs_load_code_page(struct super_block *, secno);
286secno *hpfs_load_bitmap_directory(struct super_block *, secno bmp); 291secno *hpfs_load_bitmap_directory(struct super_block *, secno bmp);
287struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **); 292struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **);
288struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **); 293struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **);
@@ -292,12 +297,13 @@ dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino);
292/* name.c */ 297/* name.c */
293 298
294unsigned char hpfs_upcase(unsigned char *, unsigned char); 299unsigned char hpfs_upcase(unsigned char *, unsigned char);
295int hpfs_chk_name(unsigned char *, unsigned *); 300int hpfs_chk_name(const unsigned char *, unsigned *);
296char *hpfs_translate_name(struct super_block *, unsigned char *, unsigned, int, int); 301unsigned char *hpfs_translate_name(struct super_block *, unsigned char *, unsigned, int, int);
297int hpfs_compare_names(struct super_block *, unsigned char *, unsigned, unsigned char *, unsigned, int); 302int hpfs_compare_names(struct super_block *, const unsigned char *, unsigned,
298int hpfs_is_name_long(unsigned char *, unsigned); 303 const unsigned char *, unsigned, int);
299void hpfs_adjust_length(unsigned char *, unsigned *); 304int hpfs_is_name_long(const unsigned char *, unsigned);
300void hpfs_decide_conv(struct inode *, unsigned char *, unsigned); 305void hpfs_adjust_length(const unsigned char *, unsigned *);
306void hpfs_decide_conv(struct inode *, const unsigned char *, unsigned);
301 307
302/* namei.c */ 308/* namei.c */
303 309
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index fe703ae46bc7..ff90affb94e1 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -46,7 +46,7 @@ void hpfs_read_inode(struct inode *i)
46 struct fnode *fnode; 46 struct fnode *fnode;
47 struct super_block *sb = i->i_sb; 47 struct super_block *sb = i->i_sb;
48 struct hpfs_inode_info *hpfs_inode = hpfs_i(i); 48 struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
49 unsigned char *ea; 49 void *ea;
50 int ea_size; 50 int ea_size;
51 51
52 if (!(fnode = hpfs_map_fnode(sb, i->i_ino, &bh))) { 52 if (!(fnode = hpfs_map_fnode(sb, i->i_ino, &bh))) {
@@ -112,7 +112,7 @@ void hpfs_read_inode(struct inode *i)
112 } 112 }
113 } 113 }
114 if (fnode->dirflag) { 114 if (fnode->dirflag) {
115 unsigned n_dnodes, n_subdirs; 115 int n_dnodes, n_subdirs;
116 i->i_mode |= S_IFDIR; 116 i->i_mode |= S_IFDIR;
117 i->i_op = &hpfs_dir_iops; 117 i->i_op = &hpfs_dir_iops;
118 i->i_fop = &hpfs_dir_ops; 118 i->i_fop = &hpfs_dir_ops;
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index c4724589b2eb..840d033ecee8 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -35,7 +35,7 @@ unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
35 * lowercasing table 35 * lowercasing table
36 */ 36 */
37 37
38char *hpfs_load_code_page(struct super_block *s, secno cps) 38unsigned char *hpfs_load_code_page(struct super_block *s, secno cps)
39{ 39{
40 struct buffer_head *bh; 40 struct buffer_head *bh;
41 secno cpds; 41 secno cpds;
@@ -71,7 +71,7 @@ char *hpfs_load_code_page(struct super_block *s, secno cps)
71 brelse(bh); 71 brelse(bh);
72 return NULL; 72 return NULL;
73 } 73 }
74 ptr = (char *)cpd + cpd->offs[cpi] + 6; 74 ptr = (unsigned char *)cpd + cpd->offs[cpi] + 6;
75 if (!(cp_table = kmalloc(256, GFP_KERNEL))) { 75 if (!(cp_table = kmalloc(256, GFP_KERNEL))) {
76 printk("HPFS: out of memory for code page table\n"); 76 printk("HPFS: out of memory for code page table\n");
77 brelse(bh); 77 brelse(bh);
@@ -217,7 +217,7 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
217 if ((dnode = hpfs_map_4sectors(s, secno, qbh, DNODE_RD_AHEAD))) 217 if ((dnode = hpfs_map_4sectors(s, secno, qbh, DNODE_RD_AHEAD)))
218 if (hpfs_sb(s)->sb_chk) { 218 if (hpfs_sb(s)->sb_chk) {
219 unsigned p, pp = 0; 219 unsigned p, pp = 0;
220 unsigned char *d = (char *)dnode; 220 unsigned char *d = (unsigned char *)dnode;
221 int b = 0; 221 int b = 0;
222 if (dnode->magic != DNODE_MAGIC) { 222 if (dnode->magic != DNODE_MAGIC) {
223 hpfs_error(s, "bad magic on dnode %08x", secno); 223 hpfs_error(s, "bad magic on dnode %08x", secno);
diff --git a/fs/hpfs/name.c b/fs/hpfs/name.c
index 1f4a964384eb..f24736d7a439 100644
--- a/fs/hpfs/name.c
+++ b/fs/hpfs/name.c
@@ -8,16 +8,16 @@
8 8
9#include "hpfs_fn.h" 9#include "hpfs_fn.h"
10 10
11static char *text_postfix[]={ 11static const char *text_postfix[]={
12".ASM", ".BAS", ".BAT", ".C", ".CC", ".CFG", ".CMD", ".CON", ".CPP", ".DEF", 12".ASM", ".BAS", ".BAT", ".C", ".CC", ".CFG", ".CMD", ".CON", ".CPP", ".DEF",
13".DOC", ".DPR", ".ERX", ".H", ".HPP", ".HTM", ".HTML", ".JAVA", ".LOG", ".PAS", 13".DOC", ".DPR", ".ERX", ".H", ".HPP", ".HTM", ".HTML", ".JAVA", ".LOG", ".PAS",
14".RC", ".TEX", ".TXT", ".Y", ""}; 14".RC", ".TEX", ".TXT", ".Y", ""};
15 15
16static char *text_prefix[]={ 16static const char *text_prefix[]={
17"AUTOEXEC.", "CHANGES", "COPYING", "CONFIG.", "CREDITS", "FAQ", "FILE_ID.DIZ", 17"AUTOEXEC.", "CHANGES", "COPYING", "CONFIG.", "CREDITS", "FAQ", "FILE_ID.DIZ",
18"MAKEFILE", "READ.ME", "README", "TERMCAP", ""}; 18"MAKEFILE", "READ.ME", "README", "TERMCAP", ""};
19 19
20void hpfs_decide_conv(struct inode *inode, unsigned char *name, unsigned len) 20void hpfs_decide_conv(struct inode *inode, const unsigned char *name, unsigned len)
21{ 21{
22 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); 22 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
23 int i; 23 int i;
@@ -71,7 +71,7 @@ static inline unsigned char locase(unsigned char *dir, unsigned char a)
71 return dir[a]; 71 return dir[a];
72} 72}
73 73
74int hpfs_chk_name(unsigned char *name, unsigned *len) 74int hpfs_chk_name(const unsigned char *name, unsigned *len)
75{ 75{
76 int i; 76 int i;
77 if (*len > 254) return -ENAMETOOLONG; 77 if (*len > 254) return -ENAMETOOLONG;
@@ -83,10 +83,10 @@ int hpfs_chk_name(unsigned char *name, unsigned *len)
83 return 0; 83 return 0;
84} 84}
85 85
86char *hpfs_translate_name(struct super_block *s, unsigned char *from, 86unsigned char *hpfs_translate_name(struct super_block *s, unsigned char *from,
87 unsigned len, int lc, int lng) 87 unsigned len, int lc, int lng)
88{ 88{
89 char *to; 89 unsigned char *to;
90 int i; 90 int i;
91 if (hpfs_sb(s)->sb_chk >= 2) if (hpfs_is_name_long(from, len) != lng) { 91 if (hpfs_sb(s)->sb_chk >= 2) if (hpfs_is_name_long(from, len) != lng) {
92 printk("HPFS: Long name flag mismatch - name "); 92 printk("HPFS: Long name flag mismatch - name ");
@@ -103,8 +103,9 @@ char *hpfs_translate_name(struct super_block *s, unsigned char *from,
103 return to; 103 return to;
104} 104}
105 105
106int hpfs_compare_names(struct super_block *s, unsigned char *n1, unsigned l1, 106int hpfs_compare_names(struct super_block *s,
107 unsigned char *n2, unsigned l2, int last) 107 const unsigned char *n1, unsigned l1,
108 const unsigned char *n2, unsigned l2, int last)
108{ 109{
109 unsigned l = l1 < l2 ? l1 : l2; 110 unsigned l = l1 < l2 ? l1 : l2;
110 unsigned i; 111 unsigned i;
@@ -120,7 +121,7 @@ int hpfs_compare_names(struct super_block *s, unsigned char *n1, unsigned l1,
120 return 0; 121 return 0;
121} 122}
122 123
123int hpfs_is_name_long(unsigned char *name, unsigned len) 124int hpfs_is_name_long(const unsigned char *name, unsigned len)
124{ 125{
125 int i,j; 126 int i,j;
126 for (i = 0; i < len && name[i] != '.'; i++) 127 for (i = 0; i < len && name[i] != '.'; i++)
@@ -134,7 +135,7 @@ int hpfs_is_name_long(unsigned char *name, unsigned len)
134 135
135/* OS/2 clears dots and spaces at the end of file name, so we have to */ 136/* OS/2 clears dots and spaces at the end of file name, so we have to */
136 137
137void hpfs_adjust_length(unsigned char *name, unsigned *len) 138void hpfs_adjust_length(const unsigned char *name, unsigned *len)
138{ 139{
139 if (!*len) return; 140 if (!*len) return;
140 if (*len == 1 && name[0] == '.') return; 141 if (*len == 1 && name[0] == '.') return;
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 82b9c4ba9ed0..11c2b4080f65 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -11,7 +11,7 @@
11 11
12static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) 12static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
13{ 13{
14 const char *name = dentry->d_name.name; 14 const unsigned char *name = dentry->d_name.name;
15 unsigned len = dentry->d_name.len; 15 unsigned len = dentry->d_name.len;
16 struct quad_buffer_head qbh0; 16 struct quad_buffer_head qbh0;
17 struct buffer_head *bh; 17 struct buffer_head *bh;
@@ -24,7 +24,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
24 int r; 24 int r;
25 struct hpfs_dirent dee; 25 struct hpfs_dirent dee;
26 int err; 26 int err;
27 if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err; 27 if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
28 lock_kernel(); 28 lock_kernel();
29 err = -ENOSPC; 29 err = -ENOSPC;
30 fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh); 30 fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
@@ -62,7 +62,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
62 result->i_mode &= ~0222; 62 result->i_mode &= ~0222;
63 63
64 mutex_lock(&hpfs_i(dir)->i_mutex); 64 mutex_lock(&hpfs_i(dir)->i_mutex);
65 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 65 r = hpfs_add_dirent(dir, name, len, &dee, 0);
66 if (r == 1) 66 if (r == 1)
67 goto bail3; 67 goto bail3;
68 if (r == -1) { 68 if (r == -1) {
@@ -121,7 +121,7 @@ bail:
121 121
122static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) 122static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd)
123{ 123{
124 const char *name = dentry->d_name.name; 124 const unsigned char *name = dentry->d_name.name;
125 unsigned len = dentry->d_name.len; 125 unsigned len = dentry->d_name.len;
126 struct inode *result = NULL; 126 struct inode *result = NULL;
127 struct buffer_head *bh; 127 struct buffer_head *bh;
@@ -130,7 +130,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
130 int r; 130 int r;
131 struct hpfs_dirent dee; 131 struct hpfs_dirent dee;
132 int err; 132 int err;
133 if ((err = hpfs_chk_name((char *)name, &len))) 133 if ((err = hpfs_chk_name(name, &len)))
134 return err==-ENOENT ? -EINVAL : err; 134 return err==-ENOENT ? -EINVAL : err;
135 lock_kernel(); 135 lock_kernel();
136 err = -ENOSPC; 136 err = -ENOSPC;
@@ -155,7 +155,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
155 result->i_op = &hpfs_file_iops; 155 result->i_op = &hpfs_file_iops;
156 result->i_fop = &hpfs_file_ops; 156 result->i_fop = &hpfs_file_ops;
157 result->i_nlink = 1; 157 result->i_nlink = 1;
158 hpfs_decide_conv(result, (char *)name, len); 158 hpfs_decide_conv(result, name, len);
159 hpfs_i(result)->i_parent_dir = dir->i_ino; 159 hpfs_i(result)->i_parent_dir = dir->i_ino;
160 result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date); 160 result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date);
161 result->i_ctime.tv_nsec = 0; 161 result->i_ctime.tv_nsec = 0;
@@ -170,7 +170,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
170 hpfs_i(result)->mmu_private = 0; 170 hpfs_i(result)->mmu_private = 0;
171 171
172 mutex_lock(&hpfs_i(dir)->i_mutex); 172 mutex_lock(&hpfs_i(dir)->i_mutex);
173 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 173 r = hpfs_add_dirent(dir, name, len, &dee, 0);
174 if (r == 1) 174 if (r == 1)
175 goto bail2; 175 goto bail2;
176 if (r == -1) { 176 if (r == -1) {
@@ -211,7 +211,7 @@ bail:
211 211
212static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) 212static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
213{ 213{
214 const char *name = dentry->d_name.name; 214 const unsigned char *name = dentry->d_name.name;
215 unsigned len = dentry->d_name.len; 215 unsigned len = dentry->d_name.len;
216 struct buffer_head *bh; 216 struct buffer_head *bh;
217 struct fnode *fnode; 217 struct fnode *fnode;
@@ -220,7 +220,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
220 struct hpfs_dirent dee; 220 struct hpfs_dirent dee;
221 struct inode *result = NULL; 221 struct inode *result = NULL;
222 int err; 222 int err;
223 if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err; 223 if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
224 if (hpfs_sb(dir->i_sb)->sb_eas < 2) return -EPERM; 224 if (hpfs_sb(dir->i_sb)->sb_eas < 2) return -EPERM;
225 if (!new_valid_dev(rdev)) 225 if (!new_valid_dev(rdev))
226 return -EINVAL; 226 return -EINVAL;
@@ -256,7 +256,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
256 init_special_inode(result, mode, rdev); 256 init_special_inode(result, mode, rdev);
257 257
258 mutex_lock(&hpfs_i(dir)->i_mutex); 258 mutex_lock(&hpfs_i(dir)->i_mutex);
259 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 259 r = hpfs_add_dirent(dir, name, len, &dee, 0);
260 if (r == 1) 260 if (r == 1)
261 goto bail2; 261 goto bail2;
262 if (r == -1) { 262 if (r == -1) {
@@ -289,7 +289,7 @@ bail:
289 289
290static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *symlink) 290static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *symlink)
291{ 291{
292 const char *name = dentry->d_name.name; 292 const unsigned char *name = dentry->d_name.name;
293 unsigned len = dentry->d_name.len; 293 unsigned len = dentry->d_name.len;
294 struct buffer_head *bh; 294 struct buffer_head *bh;
295 struct fnode *fnode; 295 struct fnode *fnode;
@@ -298,7 +298,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
298 struct hpfs_dirent dee; 298 struct hpfs_dirent dee;
299 struct inode *result; 299 struct inode *result;
300 int err; 300 int err;
301 if ((err = hpfs_chk_name((char *)name, &len))) return err==-ENOENT ? -EINVAL : err; 301 if ((err = hpfs_chk_name(name, &len))) return err==-ENOENT ? -EINVAL : err;
302 lock_kernel(); 302 lock_kernel();
303 if (hpfs_sb(dir->i_sb)->sb_eas < 2) { 303 if (hpfs_sb(dir->i_sb)->sb_eas < 2) {
304 unlock_kernel(); 304 unlock_kernel();
@@ -335,7 +335,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
335 result->i_data.a_ops = &hpfs_symlink_aops; 335 result->i_data.a_ops = &hpfs_symlink_aops;
336 336
337 mutex_lock(&hpfs_i(dir)->i_mutex); 337 mutex_lock(&hpfs_i(dir)->i_mutex);
338 r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); 338 r = hpfs_add_dirent(dir, name, len, &dee, 0);
339 if (r == 1) 339 if (r == 1)
340 goto bail2; 340 goto bail2;
341 if (r == -1) { 341 if (r == -1) {
@@ -345,7 +345,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
345 fnode->len = len; 345 fnode->len = len;
346 memcpy(fnode->name, name, len > 15 ? 15 : len); 346 memcpy(fnode->name, name, len > 15 ? 15 : len);
347 fnode->up = dir->i_ino; 347 fnode->up = dir->i_ino;
348 hpfs_set_ea(result, fnode, "SYMLINK", (char *)symlink, strlen(symlink)); 348 hpfs_set_ea(result, fnode, "SYMLINK", symlink, strlen(symlink));
349 mark_buffer_dirty(bh); 349 mark_buffer_dirty(bh);
350 brelse(bh); 350 brelse(bh);
351 351
@@ -369,7 +369,7 @@ bail:
369 369
370static int hpfs_unlink(struct inode *dir, struct dentry *dentry) 370static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
371{ 371{
372 const char *name = dentry->d_name.name; 372 const unsigned char *name = dentry->d_name.name;
373 unsigned len = dentry->d_name.len; 373 unsigned len = dentry->d_name.len;
374 struct quad_buffer_head qbh; 374 struct quad_buffer_head qbh;
375 struct hpfs_dirent *de; 375 struct hpfs_dirent *de;
@@ -381,12 +381,12 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
381 int err; 381 int err;
382 382
383 lock_kernel(); 383 lock_kernel();
384 hpfs_adjust_length((char *)name, &len); 384 hpfs_adjust_length(name, &len);
385again: 385again:
386 mutex_lock(&hpfs_i(inode)->i_parent_mutex); 386 mutex_lock(&hpfs_i(inode)->i_parent_mutex);
387 mutex_lock(&hpfs_i(dir)->i_mutex); 387 mutex_lock(&hpfs_i(dir)->i_mutex);
388 err = -ENOENT; 388 err = -ENOENT;
389 de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); 389 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
390 if (!de) 390 if (!de)
391 goto out; 391 goto out;
392 392
@@ -413,22 +413,25 @@ again:
413 413
414 mutex_unlock(&hpfs_i(dir)->i_mutex); 414 mutex_unlock(&hpfs_i(dir)->i_mutex);
415 mutex_unlock(&hpfs_i(inode)->i_parent_mutex); 415 mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
416 d_drop(dentry); 416 dentry_unhash(dentry);
417 spin_lock(&dentry->d_lock); 417 if (!d_unhashed(dentry)) {
418 if (atomic_read(&dentry->d_count) > 1 || 418 dput(dentry);
419 generic_permission(inode, MAY_WRITE, NULL) || 419 unlock_kernel();
420 return -ENOSPC;
421 }
422 if (generic_permission(inode, MAY_WRITE, NULL) ||
420 !S_ISREG(inode->i_mode) || 423 !S_ISREG(inode->i_mode) ||
421 get_write_access(inode)) { 424 get_write_access(inode)) {
422 spin_unlock(&dentry->d_lock);
423 d_rehash(dentry); 425 d_rehash(dentry);
426 dput(dentry);
424 } else { 427 } else {
425 struct iattr newattrs; 428 struct iattr newattrs;
426 spin_unlock(&dentry->d_lock);
427 /*printk("HPFS: truncating file before delete.\n");*/ 429 /*printk("HPFS: truncating file before delete.\n");*/
428 newattrs.ia_size = 0; 430 newattrs.ia_size = 0;
429 newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME; 431 newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
430 err = notify_change(dentry, &newattrs); 432 err = notify_change(dentry, &newattrs);
431 put_write_access(inode); 433 put_write_access(inode);
434 dput(dentry);
432 if (!err) 435 if (!err)
433 goto again; 436 goto again;
434 } 437 }
@@ -451,7 +454,7 @@ out:
451 454
452static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) 455static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
453{ 456{
454 const char *name = dentry->d_name.name; 457 const unsigned char *name = dentry->d_name.name;
455 unsigned len = dentry->d_name.len; 458 unsigned len = dentry->d_name.len;
456 struct quad_buffer_head qbh; 459 struct quad_buffer_head qbh;
457 struct hpfs_dirent *de; 460 struct hpfs_dirent *de;
@@ -462,12 +465,12 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
462 int err; 465 int err;
463 int r; 466 int r;
464 467
465 hpfs_adjust_length((char *)name, &len); 468 hpfs_adjust_length(name, &len);
466 lock_kernel(); 469 lock_kernel();
467 mutex_lock(&hpfs_i(inode)->i_parent_mutex); 470 mutex_lock(&hpfs_i(inode)->i_parent_mutex);
468 mutex_lock(&hpfs_i(dir)->i_mutex); 471 mutex_lock(&hpfs_i(dir)->i_mutex);
469 err = -ENOENT; 472 err = -ENOENT;
470 de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); 473 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
471 if (!de) 474 if (!de)
472 goto out; 475 goto out;
473 476
@@ -546,10 +549,10 @@ const struct address_space_operations hpfs_symlink_aops = {
546static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, 549static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
547 struct inode *new_dir, struct dentry *new_dentry) 550 struct inode *new_dir, struct dentry *new_dentry)
548{ 551{
549 char *old_name = (char *)old_dentry->d_name.name; 552 const unsigned char *old_name = old_dentry->d_name.name;
550 int old_len = old_dentry->d_name.len; 553 unsigned old_len = old_dentry->d_name.len;
551 char *new_name = (char *)new_dentry->d_name.name; 554 const unsigned char *new_name = new_dentry->d_name.name;
552 int new_len = new_dentry->d_name.len; 555 unsigned new_len = new_dentry->d_name.len;
553 struct inode *i = old_dentry->d_inode; 556 struct inode *i = old_dentry->d_inode;
554 struct inode *new_inode = new_dentry->d_inode; 557 struct inode *new_inode = new_dentry->d_inode;
555 struct quad_buffer_head qbh, qbh1; 558 struct quad_buffer_head qbh, qbh1;
@@ -560,9 +563,9 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
560 struct buffer_head *bh; 563 struct buffer_head *bh;
561 struct fnode *fnode; 564 struct fnode *fnode;
562 int err; 565 int err;
563 if ((err = hpfs_chk_name((char *)new_name, &new_len))) return err; 566 if ((err = hpfs_chk_name(new_name, &new_len))) return err;
564 err = 0; 567 err = 0;
565 hpfs_adjust_length((char *)old_name, &old_len); 568 hpfs_adjust_length(old_name, &old_len);
566 569
567 lock_kernel(); 570 lock_kernel();
568 /* order doesn't matter, due to VFS exclusion */ 571 /* order doesn't matter, due to VFS exclusion */
@@ -579,7 +582,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
579 goto end1; 582 goto end1;
580 } 583 }
581 584
582 if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, (char *)old_name, old_len, &dno, &qbh))) { 585 if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) {
583 hpfs_error(i->i_sb, "lookup succeeded but map dirent failed"); 586 hpfs_error(i->i_sb, "lookup succeeded but map dirent failed");
584 err = -ENOENT; 587 err = -ENOENT;
585 goto end1; 588 goto end1;
@@ -590,7 +593,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
590 if (new_inode) { 593 if (new_inode) {
591 int r; 594 int r;
592 if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 1)) != 2) { 595 if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 1)) != 2) {
593 if ((nde = map_dirent(new_dir, hpfs_i(new_dir)->i_dno, (char *)new_name, new_len, NULL, &qbh1))) { 596 if ((nde = map_dirent(new_dir, hpfs_i(new_dir)->i_dno, new_name, new_len, NULL, &qbh1))) {
594 clear_nlink(new_inode); 597 clear_nlink(new_inode);
595 copy_de(nde, &de); 598 copy_de(nde, &de);
596 memcpy(nde->name, new_name, new_len); 599 memcpy(nde->name, new_name, new_len);
@@ -618,7 +621,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
618 } 621 }
619 622
620 if (new_dir == old_dir) 623 if (new_dir == old_dir)
621 if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, (char *)old_name, old_len, &dno, &qbh))) { 624 if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) {
622 hpfs_unlock_creation(i->i_sb); 625 hpfs_unlock_creation(i->i_sb);
623 hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2"); 626 hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2");
624 err = -ENOENT; 627 err = -ENOENT;
@@ -648,7 +651,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
648 brelse(bh); 651 brelse(bh);
649 } 652 }
650 hpfs_i(i)->i_conv = hpfs_sb(i->i_sb)->sb_conv; 653 hpfs_i(i)->i_conv = hpfs_sb(i->i_sb)->sb_conv;
651 hpfs_decide_conv(i, (char *)new_name, new_len); 654 hpfs_decide_conv(i, new_name, new_len);
652end1: 655end1:
653 if (old_dir != new_dir) 656 if (old_dir != new_dir)
654 mutex_unlock(&hpfs_i(new_dir)->i_mutex); 657 mutex_unlock(&hpfs_i(new_dir)->i_mutex);
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index 7239efc690d8..2e4dfa8593da 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -718,7 +718,7 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent)
718 struct vfsmount *proc_mnt; 718 struct vfsmount *proc_mnt;
719 int err = -ENOENT; 719 int err = -ENOENT;
720 720
721 proc_mnt = do_kern_mount("proc", 0, "proc", NULL); 721 proc_mnt = mntget(current->nsproxy->pid_ns->proc_mnt);
722 if (IS_ERR(proc_mnt)) 722 if (IS_ERR(proc_mnt))
723 goto out; 723 goto out;
724 724
diff --git a/fs/internal.h b/fs/internal.h
index e96a1667d749..8a03a5447bdf 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -70,6 +70,8 @@ extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
70 70
71extern void __init mnt_init(void); 71extern void __init mnt_init(void);
72 72
73extern spinlock_t vfsmount_lock;
74
73/* 75/*
74 * fs_struct.c 76 * fs_struct.c
75 */ 77 */
diff --git a/fs/libfs.c b/fs/libfs.c
index 6e8d17e1dc4c..9e50bcf55857 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -338,28 +338,14 @@ int simple_readpage(struct file *file, struct page *page)
338 return 0; 338 return 0;
339} 339}
340 340
341int simple_prepare_write(struct file *file, struct page *page,
342 unsigned from, unsigned to)
343{
344 if (!PageUptodate(page)) {
345 if (to - from != PAGE_CACHE_SIZE)
346 zero_user_segments(page,
347 0, from,
348 to, PAGE_CACHE_SIZE);
349 }
350 return 0;
351}
352
353int simple_write_begin(struct file *file, struct address_space *mapping, 341int simple_write_begin(struct file *file, struct address_space *mapping,
354 loff_t pos, unsigned len, unsigned flags, 342 loff_t pos, unsigned len, unsigned flags,
355 struct page **pagep, void **fsdata) 343 struct page **pagep, void **fsdata)
356{ 344{
357 struct page *page; 345 struct page *page;
358 pgoff_t index; 346 pgoff_t index;
359 unsigned from;
360 347
361 index = pos >> PAGE_CACHE_SHIFT; 348 index = pos >> PAGE_CACHE_SHIFT;
362 from = pos & (PAGE_CACHE_SIZE - 1);
363 349
364 page = grab_cache_page_write_begin(mapping, index, flags); 350 page = grab_cache_page_write_begin(mapping, index, flags);
365 if (!page) 351 if (!page)
@@ -367,43 +353,59 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
367 353
368 *pagep = page; 354 *pagep = page;
369 355
370 return simple_prepare_write(file, page, from, from+len); 356 if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
371} 357 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
372
373static int simple_commit_write(struct file *file, struct page *page,
374 unsigned from, unsigned to)
375{
376 struct inode *inode = page->mapping->host;
377 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
378 358
379 if (!PageUptodate(page)) 359 zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE);
380 SetPageUptodate(page); 360 }
381 /*
382 * No need to use i_size_read() here, the i_size
383 * cannot change under us because we hold the i_mutex.
384 */
385 if (pos > inode->i_size)
386 i_size_write(inode, pos);
387 set_page_dirty(page);
388 return 0; 361 return 0;
389} 362}
390 363
364/**
365 * simple_write_end - .write_end helper for non-block-device FSes
366 * @available: See .write_end of address_space_operations
367 * @file: "
368 * @mapping: "
369 * @pos: "
370 * @len: "
371 * @copied: "
372 * @page: "
373 * @fsdata: "
374 *
375 * simple_write_end does the minimum needed for updating a page after writing is
376 * done. It has the same API signature as the .write_end of
377 * address_space_operations vector. So it can just be set onto .write_end for
378 * FSes that don't need any other processing. i_mutex is assumed to be held.
379 * Block based filesystems should use generic_write_end().
380 * NOTE: Even though i_size might get updated by this function, mark_inode_dirty
381 * is not called, so a filesystem that actually does store data in .write_inode
382 * should extend on what's done here with a call to mark_inode_dirty() in the
383 * case that i_size has changed.
384 */
391int simple_write_end(struct file *file, struct address_space *mapping, 385int simple_write_end(struct file *file, struct address_space *mapping,
392 loff_t pos, unsigned len, unsigned copied, 386 loff_t pos, unsigned len, unsigned copied,
393 struct page *page, void *fsdata) 387 struct page *page, void *fsdata)
394{ 388{
395 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 389 struct inode *inode = page->mapping->host;
390 loff_t last_pos = pos + copied;
396 391
397 /* zero the stale part of the page if we did a short copy */ 392 /* zero the stale part of the page if we did a short copy */
398 if (copied < len) { 393 if (copied < len) {
399 void *kaddr = kmap_atomic(page, KM_USER0); 394 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
400 memset(kaddr + from + copied, 0, len - copied); 395
401 flush_dcache_page(page); 396 zero_user(page, from + copied, len - copied);
402 kunmap_atomic(kaddr, KM_USER0);
403 } 397 }
404 398
405 simple_commit_write(file, page, from, from+copied); 399 if (!PageUptodate(page))
400 SetPageUptodate(page);
401 /*
402 * No need to use i_size_read() here, the i_size
403 * cannot change under us because we hold the i_mutex.
404 */
405 if (last_pos > inode->i_size)
406 i_size_write(inode, last_pos);
406 407
408 set_page_dirty(page);
407 unlock_page(page); 409 unlock_page(page);
408 page_cache_release(page); 410 page_cache_release(page);
409 411
@@ -853,7 +855,6 @@ EXPORT_SYMBOL(simple_getattr);
853EXPORT_SYMBOL(simple_link); 855EXPORT_SYMBOL(simple_link);
854EXPORT_SYMBOL(simple_lookup); 856EXPORT_SYMBOL(simple_lookup);
855EXPORT_SYMBOL(simple_pin_fs); 857EXPORT_SYMBOL(simple_pin_fs);
856EXPORT_UNUSED_SYMBOL(simple_prepare_write);
857EXPORT_SYMBOL(simple_readpage); 858EXPORT_SYMBOL(simple_readpage);
858EXPORT_SYMBOL(simple_release_fs); 859EXPORT_SYMBOL(simple_release_fs);
859EXPORT_SYMBOL(simple_rename); 860EXPORT_SYMBOL(simple_rename);
diff --git a/fs/locks.c b/fs/locks.c
index a8794f233bc9..ae9ded026b7c 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1182,8 +1182,9 @@ int __break_lease(struct inode *inode, unsigned int mode)
1182 struct file_lock *fl; 1182 struct file_lock *fl;
1183 unsigned long break_time; 1183 unsigned long break_time;
1184 int i_have_this_lease = 0; 1184 int i_have_this_lease = 0;
1185 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1185 1186
1186 new_fl = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK); 1187 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1187 1188
1188 lock_kernel(); 1189 lock_kernel();
1189 1190
@@ -1197,7 +1198,7 @@ int __break_lease(struct inode *inode, unsigned int mode)
1197 if (fl->fl_owner == current->files) 1198 if (fl->fl_owner == current->files)
1198 i_have_this_lease = 1; 1199 i_have_this_lease = 1;
1199 1200
1200 if (mode & FMODE_WRITE) { 1201 if (want_write) {
1201 /* If we want write access, we have to revoke any lease. */ 1202 /* If we want write access, we have to revoke any lease. */
1202 future = F_UNLCK | F_INPROGRESS; 1203 future = F_UNLCK | F_INPROGRESS;
1203 } else if (flock->fl_type & F_INPROGRESS) { 1204 } else if (flock->fl_type & F_INPROGRESS) {
diff --git a/fs/namei.c b/fs/namei.c
index 865282f8e012..0741c69b3319 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -689,33 +689,20 @@ static __always_inline void follow_dotdot(struct nameidata *nd)
689 set_root(nd); 689 set_root(nd);
690 690
691 while(1) { 691 while(1) {
692 struct vfsmount *parent;
693 struct dentry *old = nd->path.dentry; 692 struct dentry *old = nd->path.dentry;
694 693
695 if (nd->path.dentry == nd->root.dentry && 694 if (nd->path.dentry == nd->root.dentry &&
696 nd->path.mnt == nd->root.mnt) { 695 nd->path.mnt == nd->root.mnt) {
697 break; 696 break;
698 } 697 }
699 spin_lock(&dcache_lock);
700 if (nd->path.dentry != nd->path.mnt->mnt_root) { 698 if (nd->path.dentry != nd->path.mnt->mnt_root) {
701 nd->path.dentry = dget(nd->path.dentry->d_parent); 699 /* rare case of legitimate dget_parent()... */
702 spin_unlock(&dcache_lock); 700 nd->path.dentry = dget_parent(nd->path.dentry);
703 dput(old); 701 dput(old);
704 break; 702 break;
705 } 703 }
706 spin_unlock(&dcache_lock); 704 if (!follow_up(&nd->path))
707 spin_lock(&vfsmount_lock);
708 parent = nd->path.mnt->mnt_parent;
709 if (parent == nd->path.mnt) {
710 spin_unlock(&vfsmount_lock);
711 break; 705 break;
712 }
713 mntget(parent);
714 nd->path.dentry = dget(nd->path.mnt->mnt_mountpoint);
715 spin_unlock(&vfsmount_lock);
716 dput(old);
717 mntput(nd->path.mnt);
718 nd->path.mnt = parent;
719 } 706 }
720 follow_mount(&nd->path); 707 follow_mount(&nd->path);
721} 708}
@@ -823,6 +810,17 @@ fail:
823} 810}
824 811
825/* 812/*
813 * This is a temporary kludge to deal with "automount" symlinks; proper
814 * solution is to trigger them on follow_mount(), so that do_lookup()
815 * would DTRT. To be killed before 2.6.34-final.
816 */
817static inline int follow_on_final(struct inode *inode, unsigned lookup_flags)
818{
819 return inode && unlikely(inode->i_op->follow_link) &&
820 ((lookup_flags & LOOKUP_FOLLOW) || S_ISDIR(inode->i_mode));
821}
822
823/*
826 * Name resolution. 824 * Name resolution.
827 * This is the basic name resolution function, turning a pathname into 825 * This is the basic name resolution function, turning a pathname into
828 * the final dentry. We expect 'base' to be positive and a directory. 826 * the final dentry. We expect 'base' to be positive and a directory.
@@ -942,8 +940,7 @@ last_component:
942 if (err) 940 if (err)
943 break; 941 break;
944 inode = next.dentry->d_inode; 942 inode = next.dentry->d_inode;
945 if ((lookup_flags & LOOKUP_FOLLOW) 943 if (follow_on_final(inode, lookup_flags)) {
946 && inode && inode->i_op->follow_link) {
947 err = do_follow_link(&next, nd); 944 err = do_follow_link(&next, nd);
948 if (err) 945 if (err)
949 goto return_err; 946 goto return_err;
@@ -1493,7 +1490,7 @@ int may_open(struct path *path, int acc_mode, int flag)
1493 * An append-only file must be opened in append mode for writing. 1490 * An append-only file must be opened in append mode for writing.
1494 */ 1491 */
1495 if (IS_APPEND(inode)) { 1492 if (IS_APPEND(inode)) {
1496 if ((flag & FMODE_WRITE) && !(flag & O_APPEND)) 1493 if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND))
1497 return -EPERM; 1494 return -EPERM;
1498 if (flag & O_TRUNC) 1495 if (flag & O_TRUNC)
1499 return -EPERM; 1496 return -EPERM;
@@ -1537,7 +1534,7 @@ static int handle_truncate(struct path *path)
1537 * what get passed to sys_open(). 1534 * what get passed to sys_open().
1538 */ 1535 */
1539static int __open_namei_create(struct nameidata *nd, struct path *path, 1536static int __open_namei_create(struct nameidata *nd, struct path *path,
1540 int flag, int mode) 1537 int open_flag, int mode)
1541{ 1538{
1542 int error; 1539 int error;
1543 struct dentry *dir = nd->path.dentry; 1540 struct dentry *dir = nd->path.dentry;
@@ -1555,7 +1552,7 @@ out_unlock:
1555 if (error) 1552 if (error)
1556 return error; 1553 return error;
1557 /* Don't check for write permission, don't truncate */ 1554 /* Don't check for write permission, don't truncate */
1558 return may_open(&nd->path, 0, flag & ~O_TRUNC); 1555 return may_open(&nd->path, 0, open_flag & ~O_TRUNC);
1559} 1556}
1560 1557
1561/* 1558/*
@@ -1726,7 +1723,7 @@ do_last:
1726 error = mnt_want_write(nd.path.mnt); 1723 error = mnt_want_write(nd.path.mnt);
1727 if (error) 1724 if (error)
1728 goto exit_mutex_unlock; 1725 goto exit_mutex_unlock;
1729 error = __open_namei_create(&nd, &path, flag, mode); 1726 error = __open_namei_create(&nd, &path, open_flag, mode);
1730 if (error) { 1727 if (error) {
1731 mnt_drop_write(nd.path.mnt); 1728 mnt_drop_write(nd.path.mnt);
1732 goto exit; 1729 goto exit;
@@ -1788,7 +1785,7 @@ ok:
1788 if (error) 1785 if (error)
1789 goto exit; 1786 goto exit;
1790 } 1787 }
1791 error = may_open(&nd.path, acc_mode, flag); 1788 error = may_open(&nd.path, acc_mode, open_flag);
1792 if (error) { 1789 if (error) {
1793 if (will_truncate) 1790 if (will_truncate)
1794 mnt_drop_write(nd.path.mnt); 1791 mnt_drop_write(nd.path.mnt);
@@ -2265,8 +2262,11 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
2265 error = -EBUSY; 2262 error = -EBUSY;
2266 else { 2263 else {
2267 error = security_inode_unlink(dir, dentry); 2264 error = security_inode_unlink(dir, dentry);
2268 if (!error) 2265 if (!error) {
2269 error = dir->i_op->unlink(dir, dentry); 2266 error = dir->i_op->unlink(dir, dentry);
2267 if (!error)
2268 dentry->d_inode->i_flags |= S_DEAD;
2269 }
2270 } 2270 }
2271 mutex_unlock(&dentry->d_inode->i_mutex); 2271 mutex_unlock(&dentry->d_inode->i_mutex);
2272 2272
@@ -2619,6 +2619,8 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
2619 else 2619 else
2620 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); 2620 error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
2621 if (!error) { 2621 if (!error) {
2622 if (target)
2623 target->i_flags |= S_DEAD;
2622 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) 2624 if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
2623 d_move(old_dentry, new_dentry); 2625 d_move(old_dentry, new_dentry);
2624 } 2626 }
diff --git a/fs/namespace.c b/fs/namespace.c
index c768f733c8d6..8174c8ab5c70 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -573,7 +573,7 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
573 mnt->mnt_master = old; 573 mnt->mnt_master = old;
574 CLEAR_MNT_SHARED(mnt); 574 CLEAR_MNT_SHARED(mnt);
575 } else if (!(flag & CL_PRIVATE)) { 575 } else if (!(flag & CL_PRIVATE)) {
576 if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old)) 576 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
577 list_add(&mnt->mnt_share, &old->mnt_share); 577 list_add(&mnt->mnt_share, &old->mnt_share);
578 if (IS_MNT_SLAVE(old)) 578 if (IS_MNT_SLAVE(old))
579 list_add(&mnt->mnt_slave, &old->mnt_slave); 579 list_add(&mnt->mnt_slave, &old->mnt_slave);
@@ -737,6 +737,21 @@ static void m_stop(struct seq_file *m, void *v)
737 up_read(&namespace_sem); 737 up_read(&namespace_sem);
738} 738}
739 739
740int mnt_had_events(struct proc_mounts *p)
741{
742 struct mnt_namespace *ns = p->ns;
743 int res = 0;
744
745 spin_lock(&vfsmount_lock);
746 if (p->event != ns->event) {
747 p->event = ns->event;
748 res = 1;
749 }
750 spin_unlock(&vfsmount_lock);
751
752 return res;
753}
754
740struct proc_fs_info { 755struct proc_fs_info {
741 int flag; 756 int flag;
742 const char *str; 757 const char *str;
@@ -1121,8 +1136,15 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1121{ 1136{
1122 struct path path; 1137 struct path path;
1123 int retval; 1138 int retval;
1139 int lookup_flags = 0;
1124 1140
1125 retval = user_path(name, &path); 1141 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1142 return -EINVAL;
1143
1144 if (!(flags & UMOUNT_NOFOLLOW))
1145 lookup_flags |= LOOKUP_FOLLOW;
1146
1147 retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
1126 if (retval) 1148 if (retval)
1127 goto out; 1149 goto out;
1128 retval = -EINVAL; 1150 retval = -EINVAL;
@@ -1246,6 +1268,21 @@ void drop_collected_mounts(struct vfsmount *mnt)
1246 release_mounts(&umount_list); 1268 release_mounts(&umount_list);
1247} 1269}
1248 1270
1271int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
1272 struct vfsmount *root)
1273{
1274 struct vfsmount *mnt;
1275 int res = f(root, arg);
1276 if (res)
1277 return res;
1278 list_for_each_entry(mnt, &root->mnt_list, mnt_list) {
1279 res = f(mnt, arg);
1280 if (res)
1281 return res;
1282 }
1283 return 0;
1284}
1285
1249static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end) 1286static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
1250{ 1287{
1251 struct vfsmount *p; 1288 struct vfsmount *p;
@@ -1538,7 +1575,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
1538 err = do_remount_sb(sb, flags, data, 0); 1575 err = do_remount_sb(sb, flags, data, 0);
1539 if (!err) { 1576 if (!err) {
1540 spin_lock(&vfsmount_lock); 1577 spin_lock(&vfsmount_lock);
1541 mnt_flags |= path->mnt->mnt_flags & MNT_PNODE_MASK; 1578 mnt_flags |= path->mnt->mnt_flags & MNT_PROPAGATION_MASK;
1542 path->mnt->mnt_flags = mnt_flags; 1579 path->mnt->mnt_flags = mnt_flags;
1543 spin_unlock(&vfsmount_lock); 1580 spin_unlock(&vfsmount_lock);
1544 } 1581 }
@@ -1671,7 +1708,7 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path,
1671{ 1708{
1672 int err; 1709 int err;
1673 1710
1674 mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD); 1711 mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL);
1675 1712
1676 down_write(&namespace_sem); 1713 down_write(&namespace_sem);
1677 /* Something was mounted here while we slept */ 1714 /* Something was mounted here while we slept */
@@ -2314,17 +2351,13 @@ void __init mnt_init(void)
2314 2351
2315void put_mnt_ns(struct mnt_namespace *ns) 2352void put_mnt_ns(struct mnt_namespace *ns)
2316{ 2353{
2317 struct vfsmount *root;
2318 LIST_HEAD(umount_list); 2354 LIST_HEAD(umount_list);
2319 2355
2320 if (!atomic_dec_and_lock(&ns->count, &vfsmount_lock)) 2356 if (!atomic_dec_and_test(&ns->count))
2321 return; 2357 return;
2322 root = ns->root;
2323 ns->root = NULL;
2324 spin_unlock(&vfsmount_lock);
2325 down_write(&namespace_sem); 2358 down_write(&namespace_sem);
2326 spin_lock(&vfsmount_lock); 2359 spin_lock(&vfsmount_lock);
2327 umount_tree(root, 0, &umount_list); 2360 umount_tree(ns->root, 0, &umount_list);
2328 spin_unlock(&vfsmount_lock); 2361 spin_unlock(&vfsmount_lock);
2329 up_write(&namespace_sem); 2362 up_write(&namespace_sem);
2330 release_mounts(&umount_list); 2363 release_mounts(&umount_list);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index e1d415e97849..0d289823e856 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -342,6 +342,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
342 data->res.fattr = &data->fattr; 342 data->res.fattr = &data->fattr;
343 data->res.eof = 0; 343 data->res.eof = 0;
344 data->res.count = bytes; 344 data->res.count = bytes;
345 nfs_fattr_init(&data->fattr);
345 msg.rpc_argp = &data->args; 346 msg.rpc_argp = &data->args;
346 msg.rpc_resp = &data->res; 347 msg.rpc_resp = &data->res;
347 348
@@ -575,6 +576,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
575 data->res.count = 0; 576 data->res.count = 0;
576 data->res.fattr = &data->fattr; 577 data->res.fattr = &data->fattr;
577 data->res.verf = &data->verf; 578 data->res.verf = &data->verf;
579 nfs_fattr_init(&data->fattr);
578 580
579 NFS_PROTO(data->inode)->commit_setup(data, &msg); 581 NFS_PROTO(data->inode)->commit_setup(data, &msg);
580 582
@@ -766,6 +768,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
766 data->res.fattr = &data->fattr; 768 data->res.fattr = &data->fattr;
767 data->res.count = bytes; 769 data->res.count = bytes;
768 data->res.verf = &data->verf; 770 data->res.verf = &data->verf;
771 nfs_fattr_init(&data->fattr);
769 772
770 task_setup_data.task = &data->task; 773 task_setup_data.task = &data->task;
771 task_setup_data.callback_data = data; 774 task_setup_data.callback_data = data;
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index fa588006588d..237874f1af23 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -354,12 +354,11 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode)
354 */ 354 */
355int nfs_fscache_release_page(struct page *page, gfp_t gfp) 355int nfs_fscache_release_page(struct page *page, gfp_t gfp)
356{ 356{
357 struct nfs_inode *nfsi = NFS_I(page->mapping->host);
358 struct fscache_cookie *cookie = nfsi->fscache;
359
360 BUG_ON(!cookie);
361
362 if (PageFsCache(page)) { 357 if (PageFsCache(page)) {
358 struct nfs_inode *nfsi = NFS_I(page->mapping->host);
359 struct fscache_cookie *cookie = nfsi->fscache;
360
361 BUG_ON(!cookie);
363 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", 362 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
364 cookie, page, nfsi); 363 cookie, page, nfsi);
365 364
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index f141bde7756a..7570573bdb30 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -574,14 +574,14 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
574 nfs_revalidate_inode(server, inode); 574 nfs_revalidate_inode(server, inode);
575} 575}
576 576
577static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, struct dentry *dentry, struct rpc_cred *cred) 577static struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cred *cred)
578{ 578{
579 struct nfs_open_context *ctx; 579 struct nfs_open_context *ctx;
580 580
581 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 581 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
582 if (ctx != NULL) { 582 if (ctx != NULL) {
583 ctx->path.dentry = dget(dentry); 583 ctx->path = *path;
584 ctx->path.mnt = mntget(mnt); 584 path_get(&ctx->path);
585 ctx->cred = get_rpccred(cred); 585 ctx->cred = get_rpccred(cred);
586 ctx->state = NULL; 586 ctx->state = NULL;
587 ctx->lockowner = current->files; 587 ctx->lockowner = current->files;
@@ -686,7 +686,7 @@ int nfs_open(struct inode *inode, struct file *filp)
686 cred = rpc_lookup_cred(); 686 cred = rpc_lookup_cred();
687 if (IS_ERR(cred)) 687 if (IS_ERR(cred))
688 return PTR_ERR(cred); 688 return PTR_ERR(cred);
689 ctx = alloc_nfs_open_context(filp->f_path.mnt, filp->f_path.dentry, cred); 689 ctx = alloc_nfs_open_context(&filp->f_path, cred);
690 put_rpccred(cred); 690 put_rpccred(cred);
691 if (ctx == NULL) 691 if (ctx == NULL)
692 return -ENOMEM; 692 return -ENOMEM;
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 0adefc40cc89..59047f8d7d72 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -120,7 +120,7 @@ static struct {
120 { .status = MNT3ERR_INVAL, .errno = -EINVAL, }, 120 { .status = MNT3ERR_INVAL, .errno = -EINVAL, },
121 { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, }, 121 { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, },
122 { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, }, 122 { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, },
123 { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, }, 123 { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, },
124}; 124};
125 125
126struct mountres { 126struct mountres {
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 5e078b222b4e..7bc2da8efd4a 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -699,7 +699,7 @@ static struct {
699 { NFSERR_BAD_COOKIE, -EBADCOOKIE }, 699 { NFSERR_BAD_COOKIE, -EBADCOOKIE },
700 { NFSERR_NOTSUPP, -ENOTSUPP }, 700 { NFSERR_NOTSUPP, -ENOTSUPP },
701 { NFSERR_TOOSMALL, -ETOOSMALL }, 701 { NFSERR_TOOSMALL, -ETOOSMALL },
702 { NFSERR_SERVERFAULT, -ESERVERFAULT }, 702 { NFSERR_SERVERFAULT, -EREMOTEIO },
703 { NFSERR_BADTYPE, -EBADTYPE }, 703 { NFSERR_BADTYPE, -EBADTYPE },
704 { NFSERR_JUKEBOX, -EJUKEBOX }, 704 { NFSERR_JUKEBOX, -EJUKEBOX },
705 { -1, -EIO } 705 { -1, -EIO }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 375f0fae2c6a..84d83be25a98 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -724,8 +724,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
724 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); 724 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
725 if (p->o_arg.seqid == NULL) 725 if (p->o_arg.seqid == NULL)
726 goto err_free; 726 goto err_free;
727 p->path.mnt = mntget(path->mnt); 727 path_get(path);
728 p->path.dentry = dget(path->dentry); 728 p->path = *path;
729 p->dir = parent; 729 p->dir = parent;
730 p->owner = sp; 730 p->owner = sp;
731 atomic_inc(&sp->so_count); 731 atomic_inc(&sp->so_count);
@@ -1944,8 +1944,8 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
1944 calldata->res.seqid = calldata->arg.seqid; 1944 calldata->res.seqid = calldata->arg.seqid;
1945 calldata->res.server = server; 1945 calldata->res.server = server;
1946 calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE; 1946 calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
1947 calldata->path.mnt = mntget(path->mnt); 1947 path_get(path);
1948 calldata->path.dentry = dget(path->dentry); 1948 calldata->path = *path;
1949 1949
1950 msg.rpc_argp = &calldata->arg, 1950 msg.rpc_argp = &calldata->arg,
1951 msg.rpc_resp = &calldata->res, 1951 msg.rpc_resp = &calldata->res,
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index e437fd6a819f..5cd5184b56db 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -4631,7 +4631,7 @@ static int decode_sequence(struct xdr_stream *xdr,
4631 * If the server returns different values for sessionID, slotID or 4631 * If the server returns different values for sessionID, slotID or
4632 * sequence number, the server is looney tunes. 4632 * sequence number, the server is looney tunes.
4633 */ 4633 */
4634 status = -ESERVERFAULT; 4634 status = -EREMOTEIO;
4635 4635
4636 if (memcmp(id.data, res->sr_session->sess_id.data, 4636 if (memcmp(id.data, res->sr_session->sess_id.data,
4637 NFS4_MAX_SESSIONID_LEN)) { 4637 NFS4_MAX_SESSIONID_LEN)) {
@@ -5774,7 +5774,7 @@ static struct {
5774 { NFS4ERR_BAD_COOKIE, -EBADCOOKIE }, 5774 { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
5775 { NFS4ERR_NOTSUPP, -ENOTSUPP }, 5775 { NFS4ERR_NOTSUPP, -ENOTSUPP },
5776 { NFS4ERR_TOOSMALL, -ETOOSMALL }, 5776 { NFS4ERR_TOOSMALL, -ETOOSMALL },
5777 { NFS4ERR_SERVERFAULT, -ESERVERFAULT }, 5777 { NFS4ERR_SERVERFAULT, -EREMOTEIO },
5778 { NFS4ERR_BADTYPE, -EBADTYPE }, 5778 { NFS4ERR_BADTYPE, -EBADTYPE },
5779 { NFS4ERR_LOCKED, -EAGAIN }, 5779 { NFS4ERR_LOCKED, -EAGAIN },
5780 { NFS4ERR_SYMLINK, -ELOOP }, 5780 { NFS4ERR_SYMLINK, -ELOOP },
@@ -5801,7 +5801,7 @@ nfs4_stat_to_errno(int stat)
5801 } 5801 }
5802 if (stat <= 10000 || stat > 10100) { 5802 if (stat <= 10000 || stat > 10100) {
5803 /* The server is looney tunes. */ 5803 /* The server is looney tunes. */
5804 return -ESERVERFAULT; 5804 return -EREMOTEIO;
5805 } 5805 }
5806 /* If we cannot translate the error, the recovery routines should 5806 /* If we cannot translate the error, the recovery routines should
5807 * handle it. 5807 * handle it.
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 7b54b8bb101f..d63d964a0392 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1598,8 +1598,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1598 struct nfs_page *req; 1598 struct nfs_page *req;
1599 int ret; 1599 int ret;
1600 1600
1601 if (PageFsCache(page)) 1601 nfs_fscache_release_page(page, GFP_KERNEL);
1602 nfs_fscache_release_page(page, GFP_KERNEL);
1603 1602
1604 req = nfs_find_and_lock_request(page); 1603 req = nfs_find_and_lock_request(page);
1605 ret = PTR_ERR(req); 1604 ret = PTR_ERR(req);
diff --git a/fs/nfsctl.c b/fs/nfsctl.c
index d3854d94b7cf..bf9cbd242ddd 100644
--- a/fs/nfsctl.c
+++ b/fs/nfsctl.c
@@ -36,10 +36,9 @@ static struct file *do_open(char *name, int flags)
36 return ERR_PTR(error); 36 return ERR_PTR(error);
37 37
38 if (flags == O_RDWR) 38 if (flags == O_RDWR)
39 error = may_open(&nd.path, MAY_READ|MAY_WRITE, 39 error = may_open(&nd.path, MAY_READ|MAY_WRITE, flags);
40 FMODE_READ|FMODE_WRITE);
41 else 40 else
42 error = may_open(&nd.path, MAY_WRITE, FMODE_WRITE); 41 error = may_open(&nd.path, MAY_WRITE, flags);
43 42
44 if (!error) 43 if (!error)
45 return dentry_open(nd.path.dentry, nd.path.mnt, flags, 44 return dentry_open(nd.path.dentry, nd.path.mnt, flags,
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c487810a2366..a0c4016413f1 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1316,19 +1316,11 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path)
1316 1316
1317static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp) 1317static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp)
1318{ 1318{
1319 struct svc_export *exp;
1320 u32 fsidv[2]; 1319 u32 fsidv[2];
1321 1320
1322 mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL); 1321 mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
1323 1322
1324 exp = rqst_exp_find(rqstp, FSID_NUM, fsidv); 1323 return rqst_exp_find(rqstp, FSID_NUM, fsidv);
1325 /*
1326 * We shouldn't have accepting an nfsv4 request at all if we
1327 * don't have a pseudoexport!:
1328 */
1329 if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT)
1330 exp = ERR_PTR(-ESERVERFAULT);
1331 return exp;
1332} 1324}
1333 1325
1334/* 1326/*
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index a8587e90fd5a..bbf72d8f9fc0 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2121,9 +2121,15 @@ out_acl:
2121 * and this is the root of a cross-mounted filesystem. 2121 * and this is the root of a cross-mounted filesystem.
2122 */ 2122 */
2123 if (ignore_crossmnt == 0 && 2123 if (ignore_crossmnt == 0 &&
2124 exp->ex_path.mnt->mnt_root->d_inode == dentry->d_inode) { 2124 dentry == exp->ex_path.mnt->mnt_root) {
2125 err = vfs_getattr(exp->ex_path.mnt->mnt_parent, 2125 struct path path = exp->ex_path;
2126 exp->ex_path.mnt->mnt_mountpoint, &stat); 2126 path_get(&path);
2127 while (follow_up(&path)) {
2128 if (path.dentry != path.mnt->mnt_root)
2129 break;
2130 }
2131 err = vfs_getattr(path.mnt, path.dentry, &stat);
2132 path_put(&path);
2127 if (err) 2133 if (err)
2128 goto out_nfserr; 2134 goto out_nfserr;
2129 } 2135 }
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 97d79eff6b7f..15dc2deaac5f 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -361,7 +361,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
361 * If we are changing the size of the file, then 361 * If we are changing the size of the file, then
362 * we need to break all leases. 362 * we need to break all leases.
363 */ 363 */
364 host_err = break_lease(inode, FMODE_WRITE | O_NONBLOCK); 364 host_err = break_lease(inode, O_WRONLY | O_NONBLOCK);
365 if (host_err == -EWOULDBLOCK) 365 if (host_err == -EWOULDBLOCK)
366 host_err = -ETIMEDOUT; 366 host_err = -ETIMEDOUT;
367 if (host_err) /* ENOMEM or EWOULDBLOCK */ 367 if (host_err) /* ENOMEM or EWOULDBLOCK */
@@ -734,7 +734,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
734 * Check to see if there are any leases on this file. 734 * Check to see if there are any leases on this file.
735 * This may block while leases are broken. 735 * This may block while leases are broken.
736 */ 736 */
737 host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? FMODE_WRITE : 0)); 737 host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
738 if (host_err == -EWOULDBLOCK) 738 if (host_err == -EWOULDBLOCK)
739 host_err = -ETIMEDOUT; 739 host_err = -ETIMEDOUT;
740 if (host_err) /* NOMEM or WOULDBLOCK */ 740 if (host_err) /* NOMEM or WOULDBLOCK */
@@ -752,7 +752,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
752 flags, current_cred()); 752 flags, current_cred());
753 if (IS_ERR(*filp)) 753 if (IS_ERR(*filp))
754 host_err = PTR_ERR(*filp); 754 host_err = PTR_ERR(*filp);
755 host_err = ima_file_check(*filp, access); 755 else
756 host_err = ima_file_check(*filp, access);
756out_nfserr: 757out_nfserr:
757 err = nfserrno(host_err); 758 err = nfserrno(host_err);
758out: 759out:
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 76d803e060a9..0092840492ee 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -224,7 +224,7 @@ fail:
224 * len <= NILFS_NAME_LEN and de != NULL are guaranteed by caller. 224 * len <= NILFS_NAME_LEN and de != NULL are guaranteed by caller.
225 */ 225 */
226static int 226static int
227nilfs_match(int len, const char * const name, struct nilfs_dir_entry *de) 227nilfs_match(int len, const unsigned char *name, struct nilfs_dir_entry *de)
228{ 228{
229 if (len != de->name_len) 229 if (len != de->name_len)
230 return 0; 230 return 0;
@@ -349,11 +349,11 @@ done:
349 * Entry is guaranteed to be valid. 349 * Entry is guaranteed to be valid.
350 */ 350 */
351struct nilfs_dir_entry * 351struct nilfs_dir_entry *
352nilfs_find_entry(struct inode *dir, struct dentry *dentry, 352nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
353 struct page **res_page) 353 struct page **res_page)
354{ 354{
355 const char *name = dentry->d_name.name; 355 const unsigned char *name = qstr->name;
356 int namelen = dentry->d_name.len; 356 int namelen = qstr->len;
357 unsigned reclen = NILFS_DIR_REC_LEN(namelen); 357 unsigned reclen = NILFS_DIR_REC_LEN(namelen);
358 unsigned long start, n; 358 unsigned long start, n;
359 unsigned long npages = dir_pages(dir); 359 unsigned long npages = dir_pages(dir);
@@ -424,13 +424,13 @@ struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
424 return de; 424 return de;
425} 425}
426 426
427ino_t nilfs_inode_by_name(struct inode *dir, struct dentry *dentry) 427ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)
428{ 428{
429 ino_t res = 0; 429 ino_t res = 0;
430 struct nilfs_dir_entry *de; 430 struct nilfs_dir_entry *de;
431 struct page *page; 431 struct page *page;
432 432
433 de = nilfs_find_entry(dir, dentry, &page); 433 de = nilfs_find_entry(dir, qstr, &page);
434 if (de) { 434 if (de) {
435 res = le64_to_cpu(de->inode); 435 res = le64_to_cpu(de->inode);
436 kunmap(page); 436 kunmap(page);
@@ -465,7 +465,7 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
465int nilfs_add_link(struct dentry *dentry, struct inode *inode) 465int nilfs_add_link(struct dentry *dentry, struct inode *inode)
466{ 466{
467 struct inode *dir = dentry->d_parent->d_inode; 467 struct inode *dir = dentry->d_parent->d_inode;
468 const char *name = dentry->d_name.name; 468 const unsigned char *name = dentry->d_name.name;
469 int namelen = dentry->d_name.len; 469 int namelen = dentry->d_name.len;
470 unsigned chunk_size = nilfs_chunk_size(dir); 470 unsigned chunk_size = nilfs_chunk_size(dir);
471 unsigned reclen = NILFS_DIR_REC_LEN(namelen); 471 unsigned reclen = NILFS_DIR_REC_LEN(namelen);
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 07ba838ef089..ad6ed2cf19b4 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -67,7 +67,7 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
67 if (dentry->d_name.len > NILFS_NAME_LEN) 67 if (dentry->d_name.len > NILFS_NAME_LEN)
68 return ERR_PTR(-ENAMETOOLONG); 68 return ERR_PTR(-ENAMETOOLONG);
69 69
70 ino = nilfs_inode_by_name(dir, dentry); 70 ino = nilfs_inode_by_name(dir, &dentry->d_name);
71 inode = NULL; 71 inode = NULL;
72 if (ino) { 72 if (ino) {
73 inode = nilfs_iget(dir->i_sb, ino); 73 inode = nilfs_iget(dir->i_sb, ino);
@@ -81,10 +81,7 @@ struct dentry *nilfs_get_parent(struct dentry *child)
81{ 81{
82 unsigned long ino; 82 unsigned long ino;
83 struct inode *inode; 83 struct inode *inode;
84 struct dentry dotdot; 84 struct qstr dotdot = {.name = "..", .len = 2};
85
86 dotdot.d_name.name = "..";
87 dotdot.d_name.len = 2;
88 85
89 ino = nilfs_inode_by_name(child->d_inode, &dotdot); 86 ino = nilfs_inode_by_name(child->d_inode, &dotdot);
90 if (!ino) 87 if (!ino)
@@ -296,7 +293,7 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
296 int err; 293 int err;
297 294
298 err = -ENOENT; 295 err = -ENOENT;
299 de = nilfs_find_entry(dir, dentry, &page); 296 de = nilfs_find_entry(dir, &dentry->d_name, &page);
300 if (!de) 297 if (!de)
301 goto out; 298 goto out;
302 299
@@ -389,7 +386,7 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
389 return err; 386 return err;
390 387
391 err = -ENOENT; 388 err = -ENOENT;
392 old_de = nilfs_find_entry(old_dir, old_dentry, &old_page); 389 old_de = nilfs_find_entry(old_dir, &old_dentry->d_name, &old_page);
393 if (!old_de) 390 if (!old_de)
394 goto out; 391 goto out;
395 392
@@ -409,7 +406,7 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
409 goto out_dir; 406 goto out_dir;
410 407
411 err = -ENOENT; 408 err = -ENOENT;
412 new_de = nilfs_find_entry(new_dir, new_dentry, &new_page); 409 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page);
413 if (!new_de) 410 if (!new_de)
414 goto out_dir; 411 goto out_dir;
415 inc_nlink(old_inode); 412 inc_nlink(old_inode);
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 4da6f67e9a91..8723e5bfd071 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -217,10 +217,10 @@ static inline int nilfs_init_acl(struct inode *inode, struct inode *dir)
217 217
218/* dir.c */ 218/* dir.c */
219extern int nilfs_add_link(struct dentry *, struct inode *); 219extern int nilfs_add_link(struct dentry *, struct inode *);
220extern ino_t nilfs_inode_by_name(struct inode *, struct dentry *); 220extern ino_t nilfs_inode_by_name(struct inode *, const struct qstr *);
221extern int nilfs_make_empty(struct inode *, struct inode *); 221extern int nilfs_make_empty(struct inode *, struct inode *);
222extern struct nilfs_dir_entry * 222extern struct nilfs_dir_entry *
223nilfs_find_entry(struct inode *, struct dentry *, struct page **); 223nilfs_find_entry(struct inode *, const struct qstr *, struct page **);
224extern int nilfs_delete_entry(struct nilfs_dir_entry *, struct page *); 224extern int nilfs_delete_entry(struct nilfs_dir_entry *, struct page *);
225extern int nilfs_empty_dir(struct inode *); 225extern int nilfs_empty_dir(struct inode *);
226extern struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct page **); 226extern struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct page **);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 3dae4a13f6e4..7e9df11260f4 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -599,7 +599,7 @@ bail:
599 return ret; 599 return ret;
600} 600}
601 601
602/* 602/*
603 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're 603 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
604 * particularly interested in the aio/dio case. Like the core uses 604 * particularly interested in the aio/dio case. Like the core uses
605 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from 605 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
@@ -670,7 +670,7 @@ static ssize_t ocfs2_direct_IO(int rw,
670 670
671 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, 671 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
672 inode->i_sb->s_bdev, iov, offset, 672 inode->i_sb->s_bdev, iov, offset,
673 nr_segs, 673 nr_segs,
674 ocfs2_direct_IO_get_blocks, 674 ocfs2_direct_IO_get_blocks,
675 ocfs2_dio_end_io); 675 ocfs2_dio_end_io);
676 676
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index d43d34a1dd31..21c808f752d8 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
368 } 368 }
369 ocfs2_metadata_cache_io_unlock(ci); 369 ocfs2_metadata_cache_io_unlock(ci);
370 370
371 mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", 371 mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
372 (unsigned long long)block, nr, 372 (unsigned long long)block, nr,
373 ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes", 373 ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
374 flags); 374 flags);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index eda5b8bcddd5..5c9890006708 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
78 78
79unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; 79unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
80 80
81/* Only sets a new threshold if there are no active regions. 81/* Only sets a new threshold if there are no active regions.
82 * 82 *
83 * No locking or otherwise interesting code is required for reading 83 * No locking or otherwise interesting code is required for reading
84 * o2hb_dead_threshold as it can't change once regions are active and 84 * o2hb_dead_threshold as it can't change once regions are active and
@@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work)
170 170
171 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " 171 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
172 "milliseconds\n", reg->hr_dev_name, 172 "milliseconds\n", reg->hr_dev_name,
173 jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); 173 jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
174 o2quo_disk_timeout(); 174 o2quo_disk_timeout();
175} 175}
176 176
@@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg,
624 "seq %llu last %llu changed %u equal %u\n", 624 "seq %llu last %llu changed %u equal %u\n",
625 slot->ds_node_num, (long long)slot->ds_last_generation, 625 slot->ds_node_num, (long long)slot->ds_last_generation,
626 le32_to_cpu(hb_block->hb_cksum), 626 le32_to_cpu(hb_block->hb_cksum),
627 (unsigned long long)le64_to_cpu(hb_block->hb_seq), 627 (unsigned long long)le64_to_cpu(hb_block->hb_seq),
628 (unsigned long long)slot->ds_last_time, slot->ds_changed_samples, 628 (unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
629 slot->ds_equal_samples); 629 slot->ds_equal_samples);
630 630
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 334f231a422c..d8d0c65ac03c 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -485,7 +485,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
485 } 485 }
486 486
487 if (was_valid && !valid) { 487 if (was_valid && !valid) {
488 printk(KERN_INFO "o2net: no longer connected to " 488 printk(KERN_NOTICE "o2net: no longer connected to "
489 SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); 489 SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
490 o2net_complete_nodes_nsw(nn); 490 o2net_complete_nodes_nsw(nn);
491 } 491 }
@@ -493,7 +493,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
493 if (!was_valid && valid) { 493 if (!was_valid && valid) {
494 o2quo_conn_up(o2net_num_from_nn(nn)); 494 o2quo_conn_up(o2net_num_from_nn(nn));
495 cancel_delayed_work(&nn->nn_connect_expired); 495 cancel_delayed_work(&nn->nn_connect_expired);
496 printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n", 496 printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
497 o2nm_this_node() > sc->sc_node->nd_num ? 497 o2nm_this_node() > sc->sc_node->nd_num ?
498 "connected to" : "accepted connection from", 498 "connected to" : "accepted connection from",
499 SC_NODEF_ARGS(sc)); 499 SC_NODEF_ARGS(sc));
@@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
930 cond_resched(); 930 cond_resched();
931 continue; 931 continue;
932 } 932 }
933 mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT 933 mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
934 " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret); 934 " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
935 o2net_ensure_shutdown(nn, sc, 0); 935 o2net_ensure_shutdown(nn, sc, 0);
936 break; 936 break;
@@ -1476,14 +1476,14 @@ static void o2net_idle_timer(unsigned long data)
1476 1476
1477 do_gettimeofday(&now); 1477 do_gettimeofday(&now);
1478 1478
1479 printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " 1479 printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
1480 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), 1480 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
1481 o2net_idle_timeout() / 1000, 1481 o2net_idle_timeout() / 1000,
1482 o2net_idle_timeout() % 1000); 1482 o2net_idle_timeout() % 1000);
1483 mlog(ML_NOTICE, "here are some times that might help debug the " 1483 mlog(ML_NOTICE, "here are some times that might help debug the "
1484 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " 1484 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
1485 "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", 1485 "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
1486 sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, 1486 sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
1487 now.tv_sec, (long) now.tv_usec, 1487 now.tv_sec, (long) now.tv_usec,
1488 sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec, 1488 sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
1489 sc->sc_tv_advance_start.tv_sec, 1489 sc->sc_tv_advance_start.tv_sec,
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 8d58cfe410b1..96fa7ebc530c 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -32,10 +32,10 @@
32 * on their number */ 32 * on their number */
33#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS) 33#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
34 34
35/* 35/*
36 * This version number represents quite a lot, unfortunately. It not 36 * This version number represents quite a lot, unfortunately. It not
37 * only represents the raw network message protocol on the wire but also 37 * only represents the raw network message protocol on the wire but also
38 * locking semantics of the file system using the protocol. It should 38 * locking semantics of the file system using the protocol. It should
39 * be somewhere else, I'm sure, but right now it isn't. 39 * be somewhere else, I'm sure, but right now it isn't.
40 * 40 *
41 * With version 11, we separate out the filesystem locking portion. The 41 * With version 11, we separate out the filesystem locking portion. The
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h
index b5786a787fab..3cfa114aa391 100644
--- a/fs/ocfs2/dlm/dlmapi.h
+++ b/fs/ocfs2/dlm/dlmapi.h
@@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err);
95 mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \ 95 mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \
96} while (0) 96} while (0)
97 97
98#define DLM_LKSB_UNUSED1 0x01 98#define DLM_LKSB_UNUSED1 0x01
99#define DLM_LKSB_PUT_LVB 0x02 99#define DLM_LKSB_PUT_LVB 0x02
100#define DLM_LKSB_GET_LVB 0x04 100#define DLM_LKSB_GET_LVB 0x04
101#define DLM_LKSB_UNUSED2 0x08 101#define DLM_LKSB_UNUSED2 0x08
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 01cf8cc3d286..dccc439fa087 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
123 dlm_lock_put(lock); 123 dlm_lock_put(lock);
124 /* free up the reserved bast that we are cancelling. 124 /* free up the reserved bast that we are cancelling.
125 * guaranteed that this will not be the last reserved 125 * guaranteed that this will not be the last reserved
126 * ast because *both* an ast and a bast were reserved 126 * ast because *both* an ast and a bast were reserved
127 * to get to this point. the res->spinlock will not be 127 * to get to this point. the res->spinlock will not be
128 * taken here */ 128 * taken here */
129 dlm_lockres_release_ast(dlm, res); 129 dlm_lockres_release_ast(dlm, res);
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index ca96bce50e18..f283bce776b4 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
396 /* instead of logging the same network error over 396 /* instead of logging the same network error over
397 * and over, sleep here and wait for the heartbeat 397 * and over, sleep here and wait for the heartbeat
398 * to notice the node is dead. times out after 5s. */ 398 * to notice the node is dead. times out after 5s. */
399 dlm_wait_for_node_death(dlm, res->owner, 399 dlm_wait_for_node_death(dlm, res->owner,
400 DLM_NODE_DEATH_WAIT_MAX); 400 DLM_NODE_DEATH_WAIT_MAX);
401 ret = DLM_RECOVERING; 401 ret = DLM_RECOVERING;
402 mlog(0, "node %u died so returning DLM_RECOVERING " 402 mlog(0, "node %u died so returning DLM_RECOVERING "
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 42b0bad7a612..0cd24cf54396 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -102,7 +102,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
102 assert_spin_locked(&res->spinlock); 102 assert_spin_locked(&res->spinlock);
103 103
104 stringify_lockname(res->lockname.name, res->lockname.len, 104 stringify_lockname(res->lockname.name, res->lockname.len,
105 buf, sizeof(buf) - 1); 105 buf, sizeof(buf));
106 printk("lockres: %s, owner=%u, state=%u\n", 106 printk("lockres: %s, owner=%u, state=%u\n",
107 buf, res->owner, res->state); 107 buf, res->owner, res->state);
108 printk(" last used: %lu, refcnt: %u, on purge list: %s\n", 108 printk(" last used: %lu, refcnt: %u, on purge list: %s\n",
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 0334000676d3..988c9055fd4e 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
816 } 816 }
817 817
818 /* Once the dlm ctxt is marked as leaving then we don't want 818 /* Once the dlm ctxt is marked as leaving then we don't want
819 * to be put in someone's domain map. 819 * to be put in someone's domain map.
820 * Also, explicitly disallow joining at certain troublesome 820 * Also, explicitly disallow joining at certain troublesome
821 * times (ie. during recovery). */ 821 * times (ie. during recovery). */
822 if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) { 822 if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 437698e9465f..733337772671 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
269 } 269 }
270 dlm_revert_pending_lock(res, lock); 270 dlm_revert_pending_lock(res, lock);
271 dlm_lock_put(lock); 271 dlm_lock_put(lock);
272 } else if (dlm_is_recovery_lock(res->lockname.name, 272 } else if (dlm_is_recovery_lock(res->lockname.name,
273 res->lockname.len)) { 273 res->lockname.len)) {
274 /* special case for the $RECOVERY lock. 274 /* special case for the $RECOVERY lock.
275 * there will never be an AST delivered to put 275 * there will never be an AST delivered to put
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 03ccf9a7b1f4..a659606dcb95 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
366 struct dlm_master_list_entry *mle; 366 struct dlm_master_list_entry *mle;
367 367
368 assert_spin_locked(&dlm->spinlock); 368 assert_spin_locked(&dlm->spinlock);
369 369
370 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { 370 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
371 if (node_up) 371 if (node_up)
372 dlm_mle_node_up(dlm, mle, NULL, idx); 372 dlm_mle_node_up(dlm, mle, NULL, idx);
@@ -833,7 +833,7 @@ lookup:
833 __dlm_insert_mle(dlm, mle); 833 __dlm_insert_mle(dlm, mle);
834 834
835 /* still holding the dlm spinlock, check the recovery map 835 /* still holding the dlm spinlock, check the recovery map
836 * to see if there are any nodes that still need to be 836 * to see if there are any nodes that still need to be
837 * considered. these will not appear in the mle nodemap 837 * considered. these will not appear in the mle nodemap
838 * but they might own this lockres. wait on them. */ 838 * but they might own this lockres. wait on them. */
839 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 839 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
@@ -883,7 +883,7 @@ redo_request:
883 msleep(500); 883 msleep(500);
884 } 884 }
885 continue; 885 continue;
886 } 886 }
887 887
888 dlm_kick_recovery_thread(dlm); 888 dlm_kick_recovery_thread(dlm);
889 msleep(1000); 889 msleep(1000);
@@ -939,8 +939,8 @@ wait:
939 res->lockname.name, blocked); 939 res->lockname.name, blocked);
940 if (++tries > 20) { 940 if (++tries > 20) {
941 mlog(ML_ERROR, "%s:%.*s: spinning on " 941 mlog(ML_ERROR, "%s:%.*s: spinning on "
942 "dlm_wait_for_lock_mastery, blocked=%d\n", 942 "dlm_wait_for_lock_mastery, blocked=%d\n",
943 dlm->name, res->lockname.len, 943 dlm->name, res->lockname.len,
944 res->lockname.name, blocked); 944 res->lockname.name, blocked);
945 dlm_print_one_lock_resource(res); 945 dlm_print_one_lock_resource(res);
946 dlm_print_one_mle(mle); 946 dlm_print_one_mle(mle);
@@ -1029,7 +1029,7 @@ recheck:
1029 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); 1029 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1030 b = (mle->type == DLM_MLE_BLOCK); 1030 b = (mle->type == DLM_MLE_BLOCK);
1031 if ((*blocked && !b) || (!*blocked && b)) { 1031 if ((*blocked && !b) || (!*blocked && b)) {
1032 mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 1032 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1033 dlm->name, res->lockname.len, res->lockname.name, 1033 dlm->name, res->lockname.len, res->lockname.name,
1034 *blocked, b); 1034 *blocked, b);
1035 *blocked = b; 1035 *blocked = b;
@@ -1602,7 +1602,7 @@ send_response:
1602 } 1602 }
1603 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", 1603 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1604 dlm->node_num, res->lockname.len, res->lockname.name); 1604 dlm->node_num, res->lockname.len, res->lockname.name);
1605 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, 1605 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1606 DLM_ASSERT_MASTER_MLE_CLEANUP); 1606 DLM_ASSERT_MASTER_MLE_CLEANUP);
1607 if (ret < 0) { 1607 if (ret < 0) {
1608 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1608 mlog(ML_ERROR, "failed to dispatch assert master work\n");
@@ -1701,7 +1701,7 @@ again:
1701 1701
1702 if (r & DLM_ASSERT_RESPONSE_REASSERT) { 1702 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1703 mlog(0, "%.*s: node %u create mles on other " 1703 mlog(0, "%.*s: node %u create mles on other "
1704 "nodes and requests a re-assert\n", 1704 "nodes and requests a re-assert\n",
1705 namelen, lockname, to); 1705 namelen, lockname, to);
1706 reassert = 1; 1706 reassert = 1;
1707 } 1707 }
@@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1812 spin_unlock(&dlm->master_lock); 1812 spin_unlock(&dlm->master_lock);
1813 spin_unlock(&dlm->spinlock); 1813 spin_unlock(&dlm->spinlock);
1814 goto done; 1814 goto done;
1815 } 1815 }
1816 } 1816 }
1817 } 1817 }
1818 spin_unlock(&dlm->master_lock); 1818 spin_unlock(&dlm->master_lock);
@@ -1883,7 +1883,7 @@ ok:
1883 int extra_ref = 0; 1883 int extra_ref = 0;
1884 int nn = -1; 1884 int nn = -1;
1885 int rr, err = 0; 1885 int rr, err = 0;
1886 1886
1887 spin_lock(&mle->spinlock); 1887 spin_lock(&mle->spinlock);
1888 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) 1888 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1889 extra_ref = 1; 1889 extra_ref = 1;
@@ -1891,7 +1891,7 @@ ok:
1891 /* MASTER mle: if any bits set in the response map 1891 /* MASTER mle: if any bits set in the response map
1892 * then the calling node needs to re-assert to clear 1892 * then the calling node needs to re-assert to clear
1893 * up nodes that this node contacted */ 1893 * up nodes that this node contacted */
1894 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 1894 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1895 nn+1)) < O2NM_MAX_NODES) { 1895 nn+1)) < O2NM_MAX_NODES) {
1896 if (nn != dlm->node_num && nn != assert->node_idx) 1896 if (nn != dlm->node_num && nn != assert->node_idx)
1897 master_request = 1; 1897 master_request = 1;
@@ -2002,7 +2002,7 @@ kill:
2002 __dlm_print_one_lock_resource(res); 2002 __dlm_print_one_lock_resource(res);
2003 spin_unlock(&res->spinlock); 2003 spin_unlock(&res->spinlock);
2004 spin_unlock(&dlm->spinlock); 2004 spin_unlock(&dlm->spinlock);
2005 *ret_data = (void *)res; 2005 *ret_data = (void *)res;
2006 dlm_put(dlm); 2006 dlm_put(dlm);
2007 return -EINVAL; 2007 return -EINVAL;
2008} 2008}
@@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2040 item->u.am.request_from = request_from; 2040 item->u.am.request_from = request_from;
2041 item->u.am.flags = flags; 2041 item->u.am.flags = flags;
2042 2042
2043 if (ignore_higher) 2043 if (ignore_higher)
2044 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, 2044 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2045 res->lockname.name); 2045 res->lockname.name);
2046 2046
2047 spin_lock(&dlm->work_lock); 2047 spin_lock(&dlm->work_lock);
2048 list_add_tail(&item->list, &dlm->work_list); 2048 list_add_tail(&item->list, &dlm->work_list);
2049 spin_unlock(&dlm->work_lock); 2049 spin_unlock(&dlm->work_lock);
@@ -2133,7 +2133,7 @@ put:
2133 * think that $RECOVERY is currently mastered by a dead node. If so, 2133 * think that $RECOVERY is currently mastered by a dead node. If so,
2134 * we wait a short time to allow that node to get notified by its own 2134 * we wait a short time to allow that node to get notified by its own
2135 * heartbeat stack, then check again. All $RECOVERY lock resources 2135 * heartbeat stack, then check again. All $RECOVERY lock resources
2136 * mastered by dead nodes are purged when the hearbeat callback is 2136 * mastered by dead nodes are purged when the hearbeat callback is
2137 * fired, so we can know for sure that it is safe to continue once 2137 * fired, so we can know for sure that it is safe to continue once
2138 * the node returns a live node or no node. */ 2138 * the node returns a live node or no node. */
2139static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 2139static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
@@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2174 ret = -EAGAIN; 2174 ret = -EAGAIN;
2175 } 2175 }
2176 spin_unlock(&dlm->spinlock); 2176 spin_unlock(&dlm->spinlock);
2177 mlog(0, "%s: reco lock master is %u\n", dlm->name, 2177 mlog(0, "%s: reco lock master is %u\n", dlm->name,
2178 master); 2178 master);
2179 break; 2179 break;
2180 } 2180 }
@@ -2602,7 +2602,7 @@ fail:
2602 2602
2603 mlog(0, "%s:%.*s: timed out during migration\n", 2603 mlog(0, "%s:%.*s: timed out during migration\n",
2604 dlm->name, res->lockname.len, res->lockname.name); 2604 dlm->name, res->lockname.len, res->lockname.name);
2605 /* avoid hang during shutdown when migrating lockres 2605 /* avoid hang during shutdown when migrating lockres
2606 * to a node which also goes down */ 2606 * to a node which also goes down */
2607 if (dlm_is_node_dead(dlm, target)) { 2607 if (dlm_is_node_dead(dlm, target)) {
2608 mlog(0, "%s:%.*s: expected migration " 2608 mlog(0, "%s:%.*s: expected migration "
@@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2738 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); 2738 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2739 spin_unlock(&res->spinlock); 2739 spin_unlock(&res->spinlock);
2740 2740
2741 /* target has died, so make the caller break out of the 2741 /* target has died, so make the caller break out of the
2742 * wait_event, but caller must recheck the domain_map */ 2742 * wait_event, but caller must recheck the domain_map */
2743 spin_lock(&dlm->spinlock); 2743 spin_lock(&dlm->spinlock);
2744 if (!test_bit(mig_target, dlm->domain_map)) 2744 if (!test_bit(mig_target, dlm->domain_map))
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 2f9e4e19a4f2..344bcf90cbf4 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1050,7 +1050,7 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1050 if (lock->ml.node == dead_node) { 1050 if (lock->ml.node == dead_node) {
1051 mlog(0, "AHA! there was " 1051 mlog(0, "AHA! there was "
1052 "a $RECOVERY lock for dead " 1052 "a $RECOVERY lock for dead "
1053 "node %u (%s)!\n", 1053 "node %u (%s)!\n",
1054 dead_node, dlm->name); 1054 dead_node, dlm->name);
1055 list_del_init(&lock->list); 1055 list_del_init(&lock->list);
1056 dlm_lock_put(lock); 1056 dlm_lock_put(lock);
@@ -1164,6 +1164,39 @@ static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1164 mres->master = master; 1164 mres->master = master;
1165} 1165}
1166 1166
1167static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1168 struct dlm_migratable_lockres *mres,
1169 int queue)
1170{
1171 if (!lock->lksb)
1172 return;
1173
1174 /* Ignore lvb in all locks in the blocked list */
1175 if (queue == DLM_BLOCKED_LIST)
1176 return;
1177
1178 /* Only consider lvbs in locks with granted EX or PR lock levels */
1179 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1180 return;
1181
1182 if (dlm_lvb_is_empty(mres->lvb)) {
1183 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1184 return;
1185 }
1186
1187 /* Ensure the lvb copied for migration matches in other valid locks */
1188 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1189 return;
1190
1191 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1192 "node=%u\n",
1193 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1194 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1195 lock->lockres->lockname.len, lock->lockres->lockname.name,
1196 lock->ml.node);
1197 dlm_print_one_lock_resource(lock->lockres);
1198 BUG();
1199}
1167 1200
1168/* returns 1 if this lock fills the network structure, 1201/* returns 1 if this lock fills the network structure,
1169 * 0 otherwise */ 1202 * 0 otherwise */
@@ -1181,20 +1214,7 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock,
1181 ml->list = queue; 1214 ml->list = queue;
1182 if (lock->lksb) { 1215 if (lock->lksb) {
1183 ml->flags = lock->lksb->flags; 1216 ml->flags = lock->lksb->flags;
1184 /* send our current lvb */ 1217 dlm_prepare_lvb_for_migration(lock, mres, queue);
1185 if (ml->type == LKM_EXMODE ||
1186 ml->type == LKM_PRMODE) {
1187 /* if it is already set, this had better be a PR
1188 * and it has to match */
1189 if (!dlm_lvb_is_empty(mres->lvb) &&
1190 (ml->type == LKM_EXMODE ||
1191 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
1192 mlog(ML_ERROR, "mismatched lvbs!\n");
1193 dlm_print_one_lock_resource(lock->lockres);
1194 BUG();
1195 }
1196 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1197 }
1198 } 1218 }
1199 ml->node = lock->ml.node; 1219 ml->node = lock->ml.node;
1200 mres->num_locks++; 1220 mres->num_locks++;
@@ -1730,6 +1750,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1730 struct dlm_lock *lock = NULL; 1750 struct dlm_lock *lock = NULL;
1731 u8 from = O2NM_MAX_NODES; 1751 u8 from = O2NM_MAX_NODES;
1732 unsigned int added = 0; 1752 unsigned int added = 0;
1753 __be64 c;
1733 1754
1734 mlog(0, "running %d locks for this lockres\n", mres->num_locks); 1755 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1735 for (i=0; i<mres->num_locks; i++) { 1756 for (i=0; i<mres->num_locks; i++) {
@@ -1777,19 +1798,48 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1777 /* lock is always created locally first, and 1798 /* lock is always created locally first, and
1778 * destroyed locally last. it must be on the list */ 1799 * destroyed locally last. it must be on the list */
1779 if (!lock) { 1800 if (!lock) {
1780 __be64 c = ml->cookie; 1801 c = ml->cookie;
1781 mlog(ML_ERROR, "could not find local lock " 1802 mlog(ML_ERROR, "Could not find local lock "
1782 "with cookie %u:%llu!\n", 1803 "with cookie %u:%llu, node %u, "
1804 "list %u, flags 0x%x, type %d, "
1805 "conv %d, highest blocked %d\n",
1783 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1806 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1784 dlm_get_lock_cookie_seq(be64_to_cpu(c))); 1807 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1808 ml->node, ml->list, ml->flags, ml->type,
1809 ml->convert_type, ml->highest_blocked);
1810 __dlm_print_one_lock_resource(res);
1811 BUG();
1812 }
1813
1814 if (lock->ml.node != ml->node) {
1815 c = lock->ml.cookie;
1816 mlog(ML_ERROR, "Mismatched node# in lock "
1817 "cookie %u:%llu, name %.*s, node %u\n",
1818 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1819 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1820 res->lockname.len, res->lockname.name,
1821 lock->ml.node);
1822 c = ml->cookie;
1823 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1824 "node %u, list %u, flags 0x%x, type %d, "
1825 "conv %d, highest blocked %d\n",
1826 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1827 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1828 ml->node, ml->list, ml->flags, ml->type,
1829 ml->convert_type, ml->highest_blocked);
1785 __dlm_print_one_lock_resource(res); 1830 __dlm_print_one_lock_resource(res);
1786 BUG(); 1831 BUG();
1787 } 1832 }
1788 BUG_ON(lock->ml.node != ml->node);
1789 1833
1790 if (tmpq != queue) { 1834 if (tmpq != queue) {
1791 mlog(0, "lock was on %u instead of %u for %.*s\n", 1835 c = ml->cookie;
1792 j, ml->list, res->lockname.len, res->lockname.name); 1836 mlog(0, "Lock cookie %u:%llu was on list %u "
1837 "instead of list %u for %.*s\n",
1838 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1839 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1840 j, ml->list, res->lockname.len,
1841 res->lockname.name);
1842 __dlm_print_one_lock_resource(res);
1793 spin_unlock(&res->spinlock); 1843 spin_unlock(&res->spinlock);
1794 continue; 1844 continue;
1795 } 1845 }
@@ -1839,7 +1889,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1839 * the lvb. */ 1889 * the lvb. */
1840 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1890 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1841 } else { 1891 } else {
1842 /* otherwise, the node is sending its 1892 /* otherwise, the node is sending its
1843 * most recent valid lvb info */ 1893 * most recent valid lvb info */
1844 BUG_ON(ml->type != LKM_EXMODE && 1894 BUG_ON(ml->type != LKM_EXMODE &&
1845 ml->type != LKM_PRMODE); 1895 ml->type != LKM_PRMODE);
@@ -1886,7 +1936,7 @@ skip_lvb:
1886 spin_lock(&res->spinlock); 1936 spin_lock(&res->spinlock);
1887 list_for_each_entry(lock, queue, list) { 1937 list_for_each_entry(lock, queue, list) {
1888 if (lock->ml.cookie == ml->cookie) { 1938 if (lock->ml.cookie == ml->cookie) {
1889 __be64 c = lock->ml.cookie; 1939 c = lock->ml.cookie;
1890 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " 1940 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1891 "exists on this lockres!\n", dlm->name, 1941 "exists on this lockres!\n", dlm->name,
1892 res->lockname.len, res->lockname.name, 1942 res->lockname.len, res->lockname.name,
@@ -2114,7 +2164,7 @@ static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2114 assert_spin_locked(&res->spinlock); 2164 assert_spin_locked(&res->spinlock);
2115 2165
2116 if (res->owner == dlm->node_num) 2166 if (res->owner == dlm->node_num)
2117 /* if this node owned the lockres, and if the dead node 2167 /* if this node owned the lockres, and if the dead node
2118 * had an EX when he died, blank out the lvb */ 2168 * had an EX when he died, blank out the lvb */
2119 search_node = dead_node; 2169 search_node = dead_node;
2120 else { 2170 else {
@@ -2152,7 +2202,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2152 2202
2153 /* this node is the lockres master: 2203 /* this node is the lockres master:
2154 * 1) remove any stale locks for the dead node 2204 * 1) remove any stale locks for the dead node
2155 * 2) if the dead node had an EX when he died, blank out the lvb 2205 * 2) if the dead node had an EX when he died, blank out the lvb
2156 */ 2206 */
2157 assert_spin_locked(&dlm->spinlock); 2207 assert_spin_locked(&dlm->spinlock);
2158 assert_spin_locked(&res->spinlock); 2208 assert_spin_locked(&res->spinlock);
@@ -2193,7 +2243,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2193 mlog(0, "%s:%.*s: freed %u locks for dead node %u, " 2243 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2194 "dropping ref from lockres\n", dlm->name, 2244 "dropping ref from lockres\n", dlm->name,
2195 res->lockname.len, res->lockname.name, freed, dead_node); 2245 res->lockname.len, res->lockname.name, freed, dead_node);
2196 BUG_ON(!test_bit(dead_node, res->refmap)); 2246 if(!test_bit(dead_node, res->refmap)) {
2247 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2248 "but ref was not set\n", dlm->name,
2249 res->lockname.len, res->lockname.name, freed, dead_node);
2250 __dlm_print_one_lock_resource(res);
2251 }
2197 dlm_lockres_clear_refmap_bit(dead_node, res); 2252 dlm_lockres_clear_refmap_bit(dead_node, res);
2198 } else if (test_bit(dead_node, res->refmap)) { 2253 } else if (test_bit(dead_node, res->refmap)) {
2199 mlog(0, "%s:%.*s: dead node %u had a ref, but had " 2254 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
@@ -2260,7 +2315,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2260 } 2315 }
2261 spin_unlock(&res->spinlock); 2316 spin_unlock(&res->spinlock);
2262 continue; 2317 continue;
2263 } 2318 }
2264 spin_lock(&res->spinlock); 2319 spin_lock(&res->spinlock);
2265 /* zero the lvb if necessary */ 2320 /* zero the lvb if necessary */
2266 dlm_revalidate_lvb(dlm, res, dead_node); 2321 dlm_revalidate_lvb(dlm, res, dead_node);
@@ -2411,7 +2466,7 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2411 * this function on each node racing to become the recovery 2466 * this function on each node racing to become the recovery
2412 * master will not stop attempting this until either: 2467 * master will not stop attempting this until either:
2413 * a) this node gets the EX (and becomes the recovery master), 2468 * a) this node gets the EX (and becomes the recovery master),
2414 * or b) dlm->reco.new_master gets set to some nodenum 2469 * or b) dlm->reco.new_master gets set to some nodenum
2415 * != O2NM_INVALID_NODE_NUM (another node will do the reco). 2470 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2416 * so each time a recovery master is needed, the entire cluster 2471 * so each time a recovery master is needed, the entire cluster
2417 * will sync at this point. if the new master dies, that will 2472 * will sync at this point. if the new master dies, that will
@@ -2424,7 +2479,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2424 2479
2425 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", 2480 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2426 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); 2481 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2427again: 2482again:
2428 memset(&lksb, 0, sizeof(lksb)); 2483 memset(&lksb, 0, sizeof(lksb));
2429 2484
2430 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, 2485 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
@@ -2437,8 +2492,8 @@ again:
2437 if (ret == DLM_NORMAL) { 2492 if (ret == DLM_NORMAL) {
2438 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", 2493 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2439 dlm->name, dlm->node_num); 2494 dlm->name, dlm->node_num);
2440 2495
2441 /* got the EX lock. check to see if another node 2496 /* got the EX lock. check to see if another node
2442 * just became the reco master */ 2497 * just became the reco master */
2443 if (dlm_reco_master_ready(dlm)) { 2498 if (dlm_reco_master_ready(dlm)) {
2444 mlog(0, "%s: got reco EX lock, but %u will " 2499 mlog(0, "%s: got reco EX lock, but %u will "
@@ -2451,12 +2506,12 @@ again:
2451 /* see if recovery was already finished elsewhere */ 2506 /* see if recovery was already finished elsewhere */
2452 spin_lock(&dlm->spinlock); 2507 spin_lock(&dlm->spinlock);
2453 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 2508 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2454 status = -EINVAL; 2509 status = -EINVAL;
2455 mlog(0, "%s: got reco EX lock, but " 2510 mlog(0, "%s: got reco EX lock, but "
2456 "node got recovered already\n", dlm->name); 2511 "node got recovered already\n", dlm->name);
2457 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2512 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2458 mlog(ML_ERROR, "%s: new master is %u " 2513 mlog(ML_ERROR, "%s: new master is %u "
2459 "but no dead node!\n", 2514 "but no dead node!\n",
2460 dlm->name, dlm->reco.new_master); 2515 dlm->name, dlm->reco.new_master);
2461 BUG(); 2516 BUG();
2462 } 2517 }
@@ -2468,7 +2523,7 @@ again:
2468 * set the master and send the messages to begin recovery */ 2523 * set the master and send the messages to begin recovery */
2469 if (!status) { 2524 if (!status) {
2470 mlog(0, "%s: dead=%u, this=%u, sending " 2525 mlog(0, "%s: dead=%u, this=%u, sending "
2471 "begin_reco now\n", dlm->name, 2526 "begin_reco now\n", dlm->name,
2472 dlm->reco.dead_node, dlm->node_num); 2527 dlm->reco.dead_node, dlm->node_num);
2473 status = dlm_send_begin_reco_message(dlm, 2528 status = dlm_send_begin_reco_message(dlm,
2474 dlm->reco.dead_node); 2529 dlm->reco.dead_node);
@@ -2501,7 +2556,7 @@ again:
2501 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", 2556 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2502 dlm->name, dlm->node_num); 2557 dlm->name, dlm->node_num);
2503 /* another node is master. wait on 2558 /* another node is master. wait on
2504 * reco.new_master != O2NM_INVALID_NODE_NUM 2559 * reco.new_master != O2NM_INVALID_NODE_NUM
2505 * for at most one second */ 2560 * for at most one second */
2506 wait_event_timeout(dlm->dlm_reco_thread_wq, 2561 wait_event_timeout(dlm->dlm_reco_thread_wq,
2507 dlm_reco_master_ready(dlm), 2562 dlm_reco_master_ready(dlm),
@@ -2589,7 +2644,13 @@ retry:
2589 "begin reco msg (%d)\n", dlm->name, nodenum, ret); 2644 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2590 ret = 0; 2645 ret = 0;
2591 } 2646 }
2592 if (ret == -EAGAIN) { 2647
2648 /*
2649 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
2650 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
2651 * We are handling both for compatibility reasons.
2652 */
2653 if (ret == -EAGAIN || ret == EAGAIN) {
2593 mlog(0, "%s: trying to start recovery of node " 2654 mlog(0, "%s: trying to start recovery of node "
2594 "%u, but node %u is waiting for last recovery " 2655 "%u, but node %u is waiting for last recovery "
2595 "to complete, backoff for a bit\n", dlm->name, 2656 "to complete, backoff for a bit\n", dlm->name,
@@ -2599,7 +2660,7 @@ retry:
2599 } 2660 }
2600 if (ret < 0) { 2661 if (ret < 0) {
2601 struct dlm_lock_resource *res; 2662 struct dlm_lock_resource *res;
2602 /* this is now a serious problem, possibly ENOMEM 2663 /* this is now a serious problem, possibly ENOMEM
2603 * in the network stack. must retry */ 2664 * in the network stack. must retry */
2604 mlog_errno(ret); 2665 mlog_errno(ret);
2605 mlog(ML_ERROR, "begin reco of dlm %s to node %u " 2666 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
@@ -2612,7 +2673,7 @@ retry:
2612 } else { 2673 } else {
2613 mlog(ML_ERROR, "recovery lock not found\n"); 2674 mlog(ML_ERROR, "recovery lock not found\n");
2614 } 2675 }
2615 /* sleep for a bit in hopes that we can avoid 2676 /* sleep for a bit in hopes that we can avoid
2616 * another ENOMEM */ 2677 * another ENOMEM */
2617 msleep(100); 2678 msleep(100);
2618 goto retry; 2679 goto retry;
@@ -2664,7 +2725,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2664 } 2725 }
2665 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { 2726 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2666 mlog(ML_NOTICE, "%s: dead_node previously set to %u, " 2727 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2667 "node %u changing it to %u\n", dlm->name, 2728 "node %u changing it to %u\n", dlm->name,
2668 dlm->reco.dead_node, br->node_idx, br->dead_node); 2729 dlm->reco.dead_node, br->node_idx, br->dead_node);
2669 } 2730 }
2670 dlm_set_reco_master(dlm, br->node_idx); 2731 dlm_set_reco_master(dlm, br->node_idx);
@@ -2730,8 +2791,8 @@ stage2:
2730 if (ret < 0) { 2791 if (ret < 0) {
2731 mlog_errno(ret); 2792 mlog_errno(ret);
2732 if (dlm_is_host_down(ret)) { 2793 if (dlm_is_host_down(ret)) {
2733 /* this has no effect on this recovery 2794 /* this has no effect on this recovery
2734 * session, so set the status to zero to 2795 * session, so set the status to zero to
2735 * finish out the last recovery */ 2796 * finish out the last recovery */
2736 mlog(ML_ERROR, "node %u went down after this " 2797 mlog(ML_ERROR, "node %u went down after this "
2737 "node finished recovery.\n", nodenum); 2798 "node finished recovery.\n", nodenum);
@@ -2768,7 +2829,7 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2768 mlog(0, "%s: node %u finalizing recovery stage%d of " 2829 mlog(0, "%s: node %u finalizing recovery stage%d of "
2769 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, 2830 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2770 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); 2831 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2771 2832
2772 spin_lock(&dlm->spinlock); 2833 spin_lock(&dlm->spinlock);
2773 2834
2774 if (dlm->reco.new_master != fr->node_idx) { 2835 if (dlm->reco.new_master != fr->node_idx) {
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 00f53b2aea76..49e29ecd0201 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -190,8 +190,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
190 actions &= ~(DLM_UNLOCK_REMOVE_LOCK| 190 actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
191 DLM_UNLOCK_REGRANT_LOCK| 191 DLM_UNLOCK_REGRANT_LOCK|
192 DLM_UNLOCK_CLEAR_CONVERT_TYPE); 192 DLM_UNLOCK_CLEAR_CONVERT_TYPE);
193 } else if (status == DLM_RECOVERING || 193 } else if (status == DLM_RECOVERING ||
194 status == DLM_MIGRATING || 194 status == DLM_MIGRATING ||
195 status == DLM_FORWARD) { 195 status == DLM_FORWARD) {
196 /* must clear the actions because this unlock 196 /* must clear the actions because this unlock
197 * is about to be retried. cannot free or do 197 * is about to be retried. cannot free or do
@@ -661,14 +661,14 @@ retry:
661 if (call_ast) { 661 if (call_ast) {
662 mlog(0, "calling unlockast(%p, %d)\n", data, status); 662 mlog(0, "calling unlockast(%p, %d)\n", data, status);
663 if (is_master) { 663 if (is_master) {
664 /* it is possible that there is one last bast 664 /* it is possible that there is one last bast
665 * pending. make sure it is flushed, then 665 * pending. make sure it is flushed, then
666 * call the unlockast. 666 * call the unlockast.
667 * not an issue if this is a mastered remotely, 667 * not an issue if this is a mastered remotely,
668 * since this lock has been removed from the 668 * since this lock has been removed from the
669 * lockres queues and cannot be found. */ 669 * lockres queues and cannot be found. */
670 dlm_kick_thread(dlm, NULL); 670 dlm_kick_thread(dlm, NULL);
671 wait_event(dlm->ast_wq, 671 wait_event(dlm->ast_wq,
672 dlm_lock_basts_flushed(dlm, lock)); 672 dlm_lock_basts_flushed(dlm, lock));
673 } 673 }
674 (*unlockast)(data, status); 674 (*unlockast)(data, status);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index c5e4a49e3a12..e044019cb3b1 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -875,6 +875,14 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo
875 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); 875 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
876 876
877 lockres->l_level = lockres->l_requested; 877 lockres->l_level = lockres->l_requested;
878
879 /*
880 * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
881 * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
882 * downconverting the lock before the upconvert has fully completed.
883 */
884 lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
885
878 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 886 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
879 887
880 mlog_exit_void(); 888 mlog_exit_void();
@@ -907,8 +915,6 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
907 915
908 assert_spin_locked(&lockres->l_lock); 916 assert_spin_locked(&lockres->l_lock);
909 917
910 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
911
912 if (level > lockres->l_blocking) { 918 if (level > lockres->l_blocking) {
913 /* only schedule a downconvert if we haven't already scheduled 919 /* only schedule a downconvert if we haven't already scheduled
914 * one that goes low enough to satisfy the level we're 920 * one that goes low enough to satisfy the level we're
@@ -921,6 +927,9 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
921 lockres->l_blocking = level; 927 lockres->l_blocking = level;
922 } 928 }
923 929
930 if (needs_downconvert)
931 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
932
924 mlog_exit(needs_downconvert); 933 mlog_exit(needs_downconvert);
925 return needs_downconvert; 934 return needs_downconvert;
926} 935}
@@ -1133,6 +1142,7 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1133 mlog_entry_void(); 1142 mlog_entry_void();
1134 spin_lock_irqsave(&lockres->l_lock, flags); 1143 spin_lock_irqsave(&lockres->l_lock, flags);
1135 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 1144 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1145 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1136 if (convert) 1146 if (convert)
1137 lockres->l_action = OCFS2_AST_INVALID; 1147 lockres->l_action = OCFS2_AST_INVALID;
1138 else 1148 else
@@ -1323,13 +1333,13 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
1323again: 1333again:
1324 wait = 0; 1334 wait = 0;
1325 1335
1336 spin_lock_irqsave(&lockres->l_lock, flags);
1337
1326 if (catch_signals && signal_pending(current)) { 1338 if (catch_signals && signal_pending(current)) {
1327 ret = -ERESTARTSYS; 1339 ret = -ERESTARTSYS;
1328 goto out; 1340 goto unlock;
1329 } 1341 }
1330 1342
1331 spin_lock_irqsave(&lockres->l_lock, flags);
1332
1333 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING, 1343 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
1334 "Cluster lock called on freeing lockres %s! flags " 1344 "Cluster lock called on freeing lockres %s! flags "
1335 "0x%lx\n", lockres->l_name, lockres->l_flags); 1345 "0x%lx\n", lockres->l_name, lockres->l_flags);
@@ -1346,6 +1356,25 @@ again:
1346 goto unlock; 1356 goto unlock;
1347 } 1357 }
1348 1358
1359 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
1360 /*
1361 * We've upconverted. If the lock now has a level we can
1362 * work with, we take it. If, however, the lock is not at the
1363 * required level, we go thru the full cycle. One way this could
1364 * happen is if a process requesting an upconvert to PR is
1365 * closely followed by another requesting upconvert to an EX.
1366 * If the process requesting EX lands here, we want it to
1367 * continue attempting to upconvert and let the process
1368 * requesting PR take the lock.
1369 * If multiple processes request upconvert to PR, the first one
1370 * here will take the lock. The others will have to go thru the
1371 * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
1372 * downconvert request.
1373 */
1374 if (level <= lockres->l_level)
1375 goto update_holders;
1376 }
1377
1349 if (lockres->l_flags & OCFS2_LOCK_BLOCKED && 1378 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
1350 !ocfs2_may_continue_on_blocked_lock(lockres, level)) { 1379 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1351 /* is the lock is currently blocked on behalf of 1380 /* is the lock is currently blocked on behalf of
@@ -1416,11 +1445,14 @@ again:
1416 goto again; 1445 goto again;
1417 } 1446 }
1418 1447
1448update_holders:
1419 /* Ok, if we get here then we're good to go. */ 1449 /* Ok, if we get here then we're good to go. */
1420 ocfs2_inc_holders(lockres, level); 1450 ocfs2_inc_holders(lockres, level);
1421 1451
1422 ret = 0; 1452 ret = 0;
1423unlock: 1453unlock:
1454 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1455
1424 spin_unlock_irqrestore(&lockres->l_lock, flags); 1456 spin_unlock_irqrestore(&lockres->l_lock, flags);
1425out: 1457out:
1426 /* 1458 /*
@@ -3155,7 +3187,7 @@ out:
3155/* Mark the lockres as being dropped. It will no longer be 3187/* Mark the lockres as being dropped. It will no longer be
3156 * queued if blocking, but we still may have to wait on it 3188 * queued if blocking, but we still may have to wait on it
3157 * being dequeued from the downconvert thread before we can consider 3189 * being dequeued from the downconvert thread before we can consider
3158 * it safe to drop. 3190 * it safe to drop.
3159 * 3191 *
3160 * You can *not* attempt to call cluster_lock on this lockres anymore. */ 3192 * You can *not* attempt to call cluster_lock on this lockres anymore. */
3161void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres) 3193void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
@@ -3352,6 +3384,7 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3352 unsigned long flags; 3384 unsigned long flags;
3353 int blocking; 3385 int blocking;
3354 int new_level; 3386 int new_level;
3387 int level;
3355 int ret = 0; 3388 int ret = 0;
3356 int set_lvb = 0; 3389 int set_lvb = 0;
3357 unsigned int gen; 3390 unsigned int gen;
@@ -3360,9 +3393,17 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3360 3393
3361 spin_lock_irqsave(&lockres->l_lock, flags); 3394 spin_lock_irqsave(&lockres->l_lock, flags);
3362 3395
3363 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
3364
3365recheck: 3396recheck:
3397 /*
3398 * Is it still blocking? If not, we have no more work to do.
3399 */
3400 if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
3401 BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
3402 spin_unlock_irqrestore(&lockres->l_lock, flags);
3403 ret = 0;
3404 goto leave;
3405 }
3406
3366 if (lockres->l_flags & OCFS2_LOCK_BUSY) { 3407 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
3367 /* XXX 3408 /* XXX
3368 * This is a *big* race. The OCFS2_LOCK_PENDING flag 3409 * This is a *big* race. The OCFS2_LOCK_PENDING flag
@@ -3401,6 +3442,31 @@ recheck:
3401 goto leave; 3442 goto leave;
3402 } 3443 }
3403 3444
3445 /*
3446 * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
3447 * set when the ast is received for an upconvert just before the
3448 * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
3449 * on the heels of the ast, we want to delay the downconvert just
3450 * enough to allow the up requestor to do its task. Because this
3451 * lock is in the blocked queue, the lock will be downconverted
3452 * as soon as the requestor is done with the lock.
3453 */
3454 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
3455 goto leave_requeue;
3456
3457 /*
3458 * How can we block and yet be at NL? We were trying to upconvert
3459 * from NL and got canceled. The code comes back here, and now
3460 * we notice and clear BLOCKING.
3461 */
3462 if (lockres->l_level == DLM_LOCK_NL) {
3463 BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
3464 lockres->l_blocking = DLM_LOCK_NL;
3465 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
3466 spin_unlock_irqrestore(&lockres->l_lock, flags);
3467 goto leave;
3468 }
3469
3404 /* if we're blocking an exclusive and we have *any* holders, 3470 /* if we're blocking an exclusive and we have *any* holders,
3405 * then requeue. */ 3471 * then requeue. */
3406 if ((lockres->l_blocking == DLM_LOCK_EX) 3472 if ((lockres->l_blocking == DLM_LOCK_EX)
@@ -3438,6 +3504,7 @@ recheck:
3438 * may sleep, so we save off a copy of what we're blocking as 3504 * may sleep, so we save off a copy of what we're blocking as
3439 * it may change while we're not holding the spin lock. */ 3505 * it may change while we're not holding the spin lock. */
3440 blocking = lockres->l_blocking; 3506 blocking = lockres->l_blocking;
3507 level = lockres->l_level;
3441 spin_unlock_irqrestore(&lockres->l_lock, flags); 3508 spin_unlock_irqrestore(&lockres->l_lock, flags);
3442 3509
3443 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking); 3510 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
@@ -3446,7 +3513,7 @@ recheck:
3446 goto leave; 3513 goto leave;
3447 3514
3448 spin_lock_irqsave(&lockres->l_lock, flags); 3515 spin_lock_irqsave(&lockres->l_lock, flags);
3449 if (blocking != lockres->l_blocking) { 3516 if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
3450 /* If this changed underneath us, then we can't drop 3517 /* If this changed underneath us, then we can't drop
3451 * it just yet. */ 3518 * it just yet. */
3452 goto recheck; 3519 goto recheck;
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 15713cbb865c..19ad145d2af3 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -239,7 +239,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
239 mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", 239 mlog(0, "Encoding parent: blkno: %llu, generation: %u\n",
240 (unsigned long long)blkno, generation); 240 (unsigned long long)blkno, generation);
241 } 241 }
242 242
243 *max_len = len; 243 *max_len = len;
244 244
245bail: 245bail:
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index d35a27f4523e..5328529e7fd2 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -192,7 +192,7 @@ static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
192 emi->ei_clusters += ins->ei_clusters; 192 emi->ei_clusters += ins->ei_clusters;
193 return 1; 193 return 1;
194 } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys && 194 } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
195 (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys && 195 (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos &&
196 ins->ei_flags == emi->ei_flags) { 196 ins->ei_flags == emi->ei_flags) {
197 emi->ei_phys = ins->ei_phys; 197 emi->ei_phys = ins->ei_phys;
198 emi->ei_cpos = ins->ei_cpos; 198 emi->ei_cpos = ins->ei_cpos;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 06ccf6a86d35..558ce0312421 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -749,7 +749,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
749 int ret; 749 int ret;
750 750
751 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */ 751 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
752 /* ugh. in prepare/commit_write, if from==to==start of block, we 752 /* ugh. in prepare/commit_write, if from==to==start of block, we
753 ** skip the prepare. make sure we never send an offset for the start 753 ** skip the prepare. make sure we never send an offset for the start
754 ** of a block 754 ** of a block
755 */ 755 */
@@ -1779,7 +1779,7 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1779 struct inode *inode = dentry->d_inode; 1779 struct inode *inode = dentry->d_inode;
1780 loff_t saved_pos, end; 1780 loff_t saved_pos, end;
1781 1781
1782 /* 1782 /*
1783 * We start with a read level meta lock and only jump to an ex 1783 * We start with a read level meta lock and only jump to an ex
1784 * if we need to make modifications here. 1784 * if we need to make modifications here.
1785 */ 1785 */
@@ -2013,8 +2013,8 @@ out_dio:
2013 /* buffered aio wouldn't have proper lock coverage today */ 2013 /* buffered aio wouldn't have proper lock coverage today */
2014 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); 2014 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2015 2015
2016 if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode) || 2016 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
2017 (file->f_flags & O_DIRECT && has_refcount)) { 2017 ((file->f_flags & O_DIRECT) && has_refcount)) {
2018 ret = filemap_fdatawrite_range(file->f_mapping, pos, 2018 ret = filemap_fdatawrite_range(file->f_mapping, pos,
2019 pos + count - 1); 2019 pos + count - 1);
2020 if (ret < 0) 2020 if (ret < 0)
@@ -2033,7 +2033,7 @@ out_dio:
2033 pos + count - 1); 2033 pos + count - 1);
2034 } 2034 }
2035 2035
2036 /* 2036 /*
2037 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io 2037 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2038 * function pointer which is called when o_direct io completes so that 2038 * function pointer which is called when o_direct io completes so that
2039 * it can unlock our rw lock. (it's the clustered equivalent of 2039 * it can unlock our rw lock. (it's the clustered equivalent of
@@ -2198,7 +2198,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2198 goto bail; 2198 goto bail;
2199 } 2199 }
2200 2200
2201 /* 2201 /*
2202 * buffered reads protect themselves in ->readpage(). O_DIRECT reads 2202 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2203 * need locks to protect pending reads from racing with truncate. 2203 * need locks to protect pending reads from racing with truncate.
2204 */ 2204 */
@@ -2220,10 +2220,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2220 * We're fine letting folks race truncates and extending 2220 * We're fine letting folks race truncates and extending
2221 * writes with read across the cluster, just like they can 2221 * writes with read across the cluster, just like they can
2222 * locally. Hence no rw_lock during read. 2222 * locally. Hence no rw_lock during read.
2223 * 2223 *
2224 * Take and drop the meta data lock to update inode fields 2224 * Take and drop the meta data lock to update inode fields
2225 * like i_size. This allows the checks down below 2225 * like i_size. This allows the checks down below
2226 * generic_file_aio_read() a chance of actually working. 2226 * generic_file_aio_read() a chance of actually working.
2227 */ 2227 */
2228 ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); 2228 ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
2229 if (ret < 0) { 2229 if (ret < 0) {
@@ -2248,7 +2248,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2248bail: 2248bail:
2249 if (have_alloc_sem) 2249 if (have_alloc_sem)
2250 up_read(&inode->i_alloc_sem); 2250 up_read(&inode->i_alloc_sem);
2251 if (rw_level != -1) 2251 if (rw_level != -1)
2252 ocfs2_rw_unlock(inode, rw_level); 2252 ocfs2_rw_unlock(inode, rw_level);
2253 mlog_exit(ret); 2253 mlog_exit(ret);
2254 2254
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 0297fb8982b8..88459bdd1ff3 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -475,7 +475,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
475 if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) { 475 if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) {
476 status = ocfs2_try_open_lock(inode, 0); 476 status = ocfs2_try_open_lock(inode, 0);
477 if (status) { 477 if (status) {
478 make_bad_inode(inode); 478 make_bad_inode(inode);
479 return status; 479 return status;
480 } 480 }
481 } 481 }
@@ -684,7 +684,7 @@ bail:
684 return status; 684 return status;
685} 685}
686 686
687/* 687/*
688 * Serialize with orphan dir recovery. If the process doing 688 * Serialize with orphan dir recovery. If the process doing
689 * recovery on this orphan dir does an iget() with the dir 689 * recovery on this orphan dir does an iget() with the dir
690 * i_mutex held, we'll deadlock here. Instead we detect this 690 * i_mutex held, we'll deadlock here. Instead we detect this
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 31fbb0619510..7d9d9c132cef 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/fs.h> 8#include <linux/fs.h>
9#include <linux/mount.h> 9#include <linux/mount.h>
10#include <linux/compat.h>
10 11
11#define MLOG_MASK_PREFIX ML_INODE 12#define MLOG_MASK_PREFIX ML_INODE
12#include <cluster/masklog.h> 13#include <cluster/masklog.h>
@@ -181,6 +182,10 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
181#ifdef CONFIG_COMPAT 182#ifdef CONFIG_COMPAT
182long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) 183long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
183{ 184{
185 bool preserve;
186 struct reflink_arguments args;
187 struct inode *inode = file->f_path.dentry->d_inode;
188
184 switch (cmd) { 189 switch (cmd) {
185 case OCFS2_IOC32_GETFLAGS: 190 case OCFS2_IOC32_GETFLAGS:
186 cmd = OCFS2_IOC_GETFLAGS; 191 cmd = OCFS2_IOC_GETFLAGS;
@@ -195,8 +200,15 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
195 case OCFS2_IOC_GROUP_EXTEND: 200 case OCFS2_IOC_GROUP_EXTEND:
196 case OCFS2_IOC_GROUP_ADD: 201 case OCFS2_IOC_GROUP_ADD:
197 case OCFS2_IOC_GROUP_ADD64: 202 case OCFS2_IOC_GROUP_ADD64:
198 case OCFS2_IOC_REFLINK:
199 break; 203 break;
204 case OCFS2_IOC_REFLINK:
205 if (copy_from_user(&args, (struct reflink_arguments *)arg,
206 sizeof(args)))
207 return -EFAULT;
208 preserve = (args.preserve != 0);
209
210 return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
211 compat_ptr(args.new_path), preserve);
200 default: 212 default:
201 return -ENOIOCTLCMD; 213 return -ENOIOCTLCMD;
202 } 214 }
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index bf34c491ae96..9336c60e3a36 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -2034,7 +2034,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
2034 status = -ENOENT; 2034 status = -ENOENT;
2035 mlog_errno(status); 2035 mlog_errno(status);
2036 return status; 2036 return status;
2037 } 2037 }
2038 2038
2039 mutex_lock(&orphan_dir_inode->i_mutex); 2039 mutex_lock(&orphan_dir_inode->i_mutex);
2040 status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0); 2040 status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 9362eea7424b..740f448041e2 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -136,6 +136,10 @@ enum ocfs2_unlock_action {
136#define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a 136#define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a
137 call to dlm_lock. Only 137 call to dlm_lock. Only
138 exists with BUSY set. */ 138 exists with BUSY set. */
139#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread
140 * from downconverting
141 * before the upconvert
142 * has completed */
139 143
140struct ocfs2_lock_res_ops; 144struct ocfs2_lock_res_ops;
141 145
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 1a1a679e51b5..7638a38c32bc 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -1417,9 +1417,16 @@ static inline int ocfs2_fast_symlink_chars(int blocksize)
1417 return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink); 1417 return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink);
1418} 1418}
1419 1419
1420static inline int ocfs2_max_inline_data(int blocksize) 1420static inline int ocfs2_max_inline_data_with_xattr(int blocksize,
1421 struct ocfs2_dinode *di)
1421{ 1422{
1422 return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data); 1423 if (di && (di->i_dyn_features & OCFS2_INLINE_XATTR_FL))
1424 return blocksize -
1425 offsetof(struct ocfs2_dinode, id2.i_data.id_data) -
1426 di->i_xattr_inline_size;
1427 else
1428 return blocksize -
1429 offsetof(struct ocfs2_dinode, id2.i_data.id_data);
1423} 1430}
1424 1431
1425static inline int ocfs2_extent_recs_per_inode(int blocksize) 1432static inline int ocfs2_extent_recs_per_inode(int blocksize)
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 74db2be75dd6..8ae65c9c020c 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2945,7 +2945,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2945 2945
2946 while (offset < end) { 2946 while (offset < end) {
2947 page_index = offset >> PAGE_CACHE_SHIFT; 2947 page_index = offset >> PAGE_CACHE_SHIFT;
2948 map_end = (page_index + 1) << PAGE_CACHE_SHIFT; 2948 map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
2949 if (map_end > end) 2949 if (map_end > end)
2950 map_end = end; 2950 map_end = end;
2951 2951
@@ -2957,8 +2957,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2957 2957
2958 page = grab_cache_page(mapping, page_index); 2958 page = grab_cache_page(mapping, page_index);
2959 2959
2960 /* This page can't be dirtied before we CoW it out. */ 2960 /*
2961 BUG_ON(PageDirty(page)); 2961 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
2962 * can't be dirtied before we CoW it out.
2963 */
2964 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
2965 BUG_ON(PageDirty(page));
2962 2966
2963 if (!PageUptodate(page)) { 2967 if (!PageUptodate(page)) {
2964 ret = block_read_full_page(page, ocfs2_get_block); 2968 ret = block_read_full_page(page, ocfs2_get_block);
@@ -3170,7 +3174,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
3170 3174
3171 while (offset < end) { 3175 while (offset < end) {
3172 page_index = offset >> PAGE_CACHE_SHIFT; 3176 page_index = offset >> PAGE_CACHE_SHIFT;
3173 map_end = (page_index + 1) << PAGE_CACHE_SHIFT; 3177 map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
3174 if (map_end > end) 3178 if (map_end > end)
3175 map_end = end; 3179 map_end = end;
3176 3180
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index e49c41050264..3038c92af493 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -277,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
277 u32 dlm_key; 277 u32 dlm_key;
278 struct dlm_ctxt *dlm; 278 struct dlm_ctxt *dlm;
279 struct o2dlm_private *priv; 279 struct o2dlm_private *priv;
280 struct dlm_protocol_version dlm_version; 280 struct dlm_protocol_version fs_version;
281 281
282 BUG_ON(conn == NULL); 282 BUG_ON(conn == NULL);
283 BUG_ON(o2cb_stack.sp_proto == NULL); 283 BUG_ON(o2cb_stack.sp_proto == NULL);
@@ -304,18 +304,18 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
304 /* used by the dlm code to make message headers unique, each 304 /* used by the dlm code to make message headers unique, each
305 * node in this domain must agree on this. */ 305 * node in this domain must agree on this. */
306 dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen); 306 dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen);
307 dlm_version.pv_major = conn->cc_version.pv_major; 307 fs_version.pv_major = conn->cc_version.pv_major;
308 dlm_version.pv_minor = conn->cc_version.pv_minor; 308 fs_version.pv_minor = conn->cc_version.pv_minor;
309 309
310 dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version); 310 dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version);
311 if (IS_ERR(dlm)) { 311 if (IS_ERR(dlm)) {
312 rc = PTR_ERR(dlm); 312 rc = PTR_ERR(dlm);
313 mlog_errno(rc); 313 mlog_errno(rc);
314 goto out_free; 314 goto out_free;
315 } 315 }
316 316
317 conn->cc_version.pv_major = dlm_version.pv_major; 317 conn->cc_version.pv_major = fs_version.pv_major;
318 conn->cc_version.pv_minor = dlm_version.pv_minor; 318 conn->cc_version.pv_minor = fs_version.pv_minor;
319 conn->cc_lockspace = dlm; 319 conn->cc_lockspace = dlm;
320 320
321 dlm_register_eviction_cb(dlm, &priv->op_eviction_cb); 321 dlm_register_eviction_cb(dlm, &priv->op_eviction_cb);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 26069917a9f5..755cd49a5ef3 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1062,7 +1062,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1062 "file system, but write access is " 1062 "file system, but write access is "
1063 "unavailable.\n"); 1063 "unavailable.\n");
1064 else 1064 else
1065 mlog_errno(status); 1065 mlog_errno(status);
1066 goto read_super_error; 1066 goto read_super_error;
1067 } 1067 }
1068 1068
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 49b133ccbf11..32499d213fc4 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -137,20 +137,20 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
137 } 137 }
138 138
139 memcpy(link, target, len); 139 memcpy(link, target, len);
140 nd_set_link(nd, link);
141 140
142bail: 141bail:
142 nd_set_link(nd, status ? ERR_PTR(status) : link);
143 brelse(bh); 143 brelse(bh);
144 144
145 mlog_exit(status); 145 mlog_exit(status);
146 return status ? ERR_PTR(status) : link; 146 return NULL;
147} 147}
148 148
149static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 149static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
150{ 150{
151 char *link = cookie; 151 char *link = nd_get_link(nd);
152 152 if (!IS_ERR(link))
153 kfree(link); 153 kfree(link);
154} 154}
155 155
156const struct inode_operations ocfs2_symlink_inode_operations = { 156const struct inode_operations ocfs2_symlink_inode_operations = {
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index c61369342a27..a0a120e82b97 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -267,8 +267,8 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
267} 267}
268 268
269/* Warning: even if it returns true, this does *not* guarantee that 269/* Warning: even if it returns true, this does *not* guarantee that
270 * the block is stored in our inode metadata cache. 270 * the block is stored in our inode metadata cache.
271 * 271 *
272 * This can be called under lock_buffer() 272 * This can be called under lock_buffer()
273 */ 273 */
274int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci, 274int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
diff --git a/fs/open.c b/fs/open.c
index 040cef72bc00..e0b2d88b0380 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -271,7 +271,7 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
271 * Make sure that there are no leases. get_write_access() protects 271 * Make sure that there are no leases. get_write_access() protects
272 * against the truncate racing with a lease-granting setlease(). 272 * against the truncate racing with a lease-granting setlease().
273 */ 273 */
274 error = break_lease(inode, FMODE_WRITE); 274 error = break_lease(inode, O_WRONLY);
275 if (error) 275 if (error)
276 goto put_write_and_out; 276 goto put_write_and_out;
277 277
diff --git a/fs/pnode.c b/fs/pnode.c
index 8d5f392ec3d3..5cc564a83149 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -86,7 +86,7 @@ static int do_make_slave(struct vfsmount *mnt)
86 86
87 /* 87 /*
88 * slave 'mnt' to a peer mount that has the 88 * slave 'mnt' to a peer mount that has the
89 * same root dentry. If none is available than 89 * same root dentry. If none is available then
90 * slave it to anything that is available. 90 * slave it to anything that is available.
91 */ 91 */
92 while ((peer_mnt = next_peer(peer_mnt)) != mnt && 92 while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
@@ -147,6 +147,11 @@ void change_mnt_propagation(struct vfsmount *mnt, int type)
147 * get the next mount in the propagation tree. 147 * get the next mount in the propagation tree.
148 * @m: the mount seen last 148 * @m: the mount seen last
149 * @origin: the original mount from where the tree walk initiated 149 * @origin: the original mount from where the tree walk initiated
150 *
151 * Note that peer groups form contiguous segments of slave lists.
152 * We rely on that in get_source() to be able to find out if
153 * vfsmount found while iterating with propagation_next() is
154 * a peer of one we'd found earlier.
150 */ 155 */
151static struct vfsmount *propagation_next(struct vfsmount *m, 156static struct vfsmount *propagation_next(struct vfsmount *m,
152 struct vfsmount *origin) 157 struct vfsmount *origin)
@@ -186,10 +191,6 @@ static struct vfsmount *get_source(struct vfsmount *dest,
186{ 191{
187 struct vfsmount *p_last_src = NULL; 192 struct vfsmount *p_last_src = NULL;
188 struct vfsmount *p_last_dest = NULL; 193 struct vfsmount *p_last_dest = NULL;
189 *type = CL_PROPAGATION;
190
191 if (IS_MNT_SHARED(dest))
192 *type |= CL_MAKE_SHARED;
193 194
194 while (last_dest != dest->mnt_master) { 195 while (last_dest != dest->mnt_master) {
195 p_last_dest = last_dest; 196 p_last_dest = last_dest;
@@ -202,13 +203,18 @@ static struct vfsmount *get_source(struct vfsmount *dest,
202 do { 203 do {
203 p_last_dest = next_peer(p_last_dest); 204 p_last_dest = next_peer(p_last_dest);
204 } while (IS_MNT_NEW(p_last_dest)); 205 } while (IS_MNT_NEW(p_last_dest));
206 /* is that a peer of the earlier? */
207 if (dest == p_last_dest) {
208 *type = CL_MAKE_SHARED;
209 return p_last_src;
210 }
205 } 211 }
206 212 /* slave of the earlier, then */
207 if (dest != p_last_dest) { 213 *type = CL_SLAVE;
208 *type |= CL_SLAVE; 214 /* beginning of peer group among the slaves? */
209 return last_src; 215 if (IS_MNT_SHARED(dest))
210 } else 216 *type |= CL_MAKE_SHARED;
211 return p_last_src; 217 return last_src;
212} 218}
213 219
214/* 220/*
diff --git a/fs/pnode.h b/fs/pnode.h
index 958665d662af..1ea4ae1efcd3 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -21,12 +21,11 @@
21#define CL_SLAVE 0x02 21#define CL_SLAVE 0x02
22#define CL_COPY_ALL 0x04 22#define CL_COPY_ALL 0x04
23#define CL_MAKE_SHARED 0x08 23#define CL_MAKE_SHARED 0x08
24#define CL_PROPAGATION 0x10 24#define CL_PRIVATE 0x10
25#define CL_PRIVATE 0x20
26 25
27static inline void set_mnt_shared(struct vfsmount *mnt) 26static inline void set_mnt_shared(struct vfsmount *mnt)
28{ 27{
29 mnt->mnt_flags &= ~MNT_PNODE_MASK; 28 mnt->mnt_flags &= ~MNT_SHARED_MASK;
30 mnt->mnt_flags |= MNT_SHARED; 29 mnt->mnt_flags |= MNT_SHARED;
31} 30}
32 31
diff --git a/fs/proc/base.c b/fs/proc/base.c
index e42bbd843ed1..746895ddfda1 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -647,17 +647,11 @@ static int mounts_release(struct inode *inode, struct file *file)
647static unsigned mounts_poll(struct file *file, poll_table *wait) 647static unsigned mounts_poll(struct file *file, poll_table *wait)
648{ 648{
649 struct proc_mounts *p = file->private_data; 649 struct proc_mounts *p = file->private_data;
650 struct mnt_namespace *ns = p->ns;
651 unsigned res = POLLIN | POLLRDNORM; 650 unsigned res = POLLIN | POLLRDNORM;
652 651
653 poll_wait(file, &ns->poll, wait); 652 poll_wait(file, &p->ns->poll, wait);
654 653 if (mnt_had_events(p))
655 spin_lock(&vfsmount_lock);
656 if (p->event != ns->event) {
657 p->event = ns->event;
658 res |= POLLERR | POLLPRI; 654 res |= POLLERR | POLLPRI;
659 }
660 spin_unlock(&vfsmount_lock);
661 655
662 return res; 656 return res;
663} 657}
@@ -2369,16 +2363,30 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
2369{ 2363{
2370 struct pid_namespace *ns = dentry->d_sb->s_fs_info; 2364 struct pid_namespace *ns = dentry->d_sb->s_fs_info;
2371 pid_t tgid = task_tgid_nr_ns(current, ns); 2365 pid_t tgid = task_tgid_nr_ns(current, ns);
2372 char tmp[PROC_NUMBUF]; 2366 char *name = ERR_PTR(-ENOENT);
2373 if (!tgid) 2367 if (tgid) {
2374 return ERR_PTR(-ENOENT); 2368 name = __getname();
2375 sprintf(tmp, "%d", task_tgid_nr_ns(current, ns)); 2369 if (!name)
2376 return ERR_PTR(vfs_follow_link(nd,tmp)); 2370 name = ERR_PTR(-ENOMEM);
2371 else
2372 sprintf(name, "%d", tgid);
2373 }
2374 nd_set_link(nd, name);
2375 return NULL;
2376}
2377
2378static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
2379 void *cookie)
2380{
2381 char *s = nd_get_link(nd);
2382 if (!IS_ERR(s))
2383 __putname(s);
2377} 2384}
2378 2385
2379static const struct inode_operations proc_self_inode_operations = { 2386static const struct inode_operations proc_self_inode_operations = {
2380 .readlink = proc_self_readlink, 2387 .readlink = proc_self_readlink,
2381 .follow_link = proc_self_follow_link, 2388 .follow_link = proc_self_follow_link,
2389 .put_link = proc_self_put_link,
2382}; 2390};
2383 2391
2384/* 2392/*
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 480cb1065eec..9580abeadeb3 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -662,6 +662,7 @@ struct proc_dir_entry *proc_symlink(const char *name,
662 } 662 }
663 return ent; 663 return ent;
664} 664}
665EXPORT_SYMBOL(proc_symlink);
665 666
666struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode, 667struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
667 struct proc_dir_entry *parent) 668 struct proc_dir_entry *parent)
@@ -700,6 +701,7 @@ struct proc_dir_entry *proc_mkdir(const char *name,
700{ 701{
701 return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent); 702 return proc_mkdir_mode(name, S_IRUGO | S_IXUGO, parent);
702} 703}
704EXPORT_SYMBOL(proc_mkdir);
703 705
704struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, 706struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
705 struct proc_dir_entry *parent) 707 struct proc_dir_entry *parent)
@@ -728,6 +730,7 @@ struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
728 } 730 }
729 return ent; 731 return ent;
730} 732}
733EXPORT_SYMBOL(create_proc_entry);
731 734
732struct proc_dir_entry *proc_create_data(const char *name, mode_t mode, 735struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
733 struct proc_dir_entry *parent, 736 struct proc_dir_entry *parent,
@@ -762,6 +765,7 @@ out_free:
762out: 765out:
763 return NULL; 766 return NULL;
764} 767}
768EXPORT_SYMBOL(proc_create_data);
765 769
766static void free_proc_entry(struct proc_dir_entry *de) 770static void free_proc_entry(struct proc_dir_entry *de)
767{ 771{
@@ -853,3 +857,4 @@ continue_removing:
853 de->parent->name, de->name, de->subdir->name); 857 de->parent->name, de->name, de->subdir->name);
854 pde_put(de); 858 pde_put(de);
855} 859}
860EXPORT_SYMBOL(remove_proc_entry);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index b080b791d9e3..757c069f2a65 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -220,9 +220,3 @@ void pid_ns_release_proc(struct pid_namespace *ns)
220{ 220{
221 mntput(ns->proc_mnt); 221 mntput(ns->proc_mnt);
222} 222}
223
224EXPORT_SYMBOL(proc_symlink);
225EXPORT_SYMBOL(proc_mkdir);
226EXPORT_SYMBOL(create_proc_entry);
227EXPORT_SYMBOL(proc_create_data);
228EXPORT_SYMBOL(remove_proc_entry);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 9087b10209e6..2df0f5c7c60b 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1497,9 +1497,11 @@ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
1497 1497
1498 args.objectid = key->on_disk_key.k_objectid; 1498 args.objectid = key->on_disk_key.k_objectid;
1499 args.dirid = key->on_disk_key.k_dir_id; 1499 args.dirid = key->on_disk_key.k_dir_id;
1500 reiserfs_write_unlock(s);
1500 inode = iget5_locked(s, key->on_disk_key.k_objectid, 1501 inode = iget5_locked(s, key->on_disk_key.k_objectid,
1501 reiserfs_find_actor, reiserfs_init_locked_inode, 1502 reiserfs_find_actor, reiserfs_init_locked_inode,
1502 (void *)(&args)); 1503 (void *)(&args));
1504 reiserfs_write_lock(s);
1503 if (!inode) 1505 if (!inode)
1504 return ERR_PTR(-ENOMEM); 1506 return ERR_PTR(-ENOMEM);
1505 1507
diff --git a/fs/super.c b/fs/super.c
index aff046b0fe78..f35ac6022109 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -568,7 +568,7 @@ out:
568int do_remount_sb(struct super_block *sb, int flags, void *data, int force) 568int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
569{ 569{
570 int retval; 570 int retval;
571 int remount_rw; 571 int remount_rw, remount_ro;
572 572
573 if (sb->s_frozen != SB_UNFROZEN) 573 if (sb->s_frozen != SB_UNFROZEN)
574 return -EBUSY; 574 return -EBUSY;
@@ -583,9 +583,12 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
583 shrink_dcache_sb(sb); 583 shrink_dcache_sb(sb);
584 sync_filesystem(sb); 584 sync_filesystem(sb);
585 585
586 remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
587 remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY);
588
586 /* If we are remounting RDONLY and current sb is read/write, 589 /* If we are remounting RDONLY and current sb is read/write,
587 make sure there are no rw files opened */ 590 make sure there are no rw files opened */
588 if ((flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY)) { 591 if (remount_ro) {
589 if (force) 592 if (force)
590 mark_files_ro(sb); 593 mark_files_ro(sb);
591 else if (!fs_may_remount_ro(sb)) 594 else if (!fs_may_remount_ro(sb))
@@ -594,7 +597,6 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
594 if (retval < 0 && retval != -ENOSYS) 597 if (retval < 0 && retval != -ENOSYS)
595 return -EBUSY; 598 return -EBUSY;
596 } 599 }
597 remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY);
598 600
599 if (sb->s_op->remount_fs) { 601 if (sb->s_op->remount_fs) {
600 retval = sb->s_op->remount_fs(sb, &flags, data); 602 retval = sb->s_op->remount_fs(sb, &flags, data);
@@ -604,6 +606,16 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
604 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); 606 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
605 if (remount_rw) 607 if (remount_rw)
606 vfs_dq_quota_on_remount(sb); 608 vfs_dq_quota_on_remount(sb);
609 /*
610 * Some filesystems modify their metadata via some other path than the
611 * bdev buffer cache (eg. use a private mapping, or directories in
612 * pagecache, etc). Also file data modifications go via their own
613 * mappings. So If we try to mount readonly then copy the filesystem
614 * from bdev, we could get stale data, so invalidate it to give a best
615 * effort at coherency.
616 */
617 if (remount_ro && sb->s_bdev)
618 invalidate_bdev(sb->s_bdev);
607 return 0; 619 return 0;
608} 620}
609 621
@@ -925,6 +937,9 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
925 if (!mnt) 937 if (!mnt)
926 goto out; 938 goto out;
927 939
940 if (flags & MS_KERNMOUNT)
941 mnt->mnt_flags = MNT_INTERNAL;
942
928 if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) { 943 if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
929 secdata = alloc_secdata(); 944 secdata = alloc_secdata();
930 if (!secdata) 945 if (!secdata)
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 220b758523ae..6a06a1d1ea7b 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -81,24 +81,23 @@ int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr * iattr)
81 if (!sd_attrs) 81 if (!sd_attrs)
82 return -ENOMEM; 82 return -ENOMEM;
83 sd->s_iattr = sd_attrs; 83 sd->s_iattr = sd_attrs;
84 } else { 84 }
85 /* attributes were changed at least once in past */ 85 /* attributes were changed at least once in past */
86 iattrs = &sd_attrs->ia_iattr; 86 iattrs = &sd_attrs->ia_iattr;
87 87
88 if (ia_valid & ATTR_UID) 88 if (ia_valid & ATTR_UID)
89 iattrs->ia_uid = iattr->ia_uid; 89 iattrs->ia_uid = iattr->ia_uid;
90 if (ia_valid & ATTR_GID) 90 if (ia_valid & ATTR_GID)
91 iattrs->ia_gid = iattr->ia_gid; 91 iattrs->ia_gid = iattr->ia_gid;
92 if (ia_valid & ATTR_ATIME) 92 if (ia_valid & ATTR_ATIME)
93 iattrs->ia_atime = iattr->ia_atime; 93 iattrs->ia_atime = iattr->ia_atime;
94 if (ia_valid & ATTR_MTIME) 94 if (ia_valid & ATTR_MTIME)
95 iattrs->ia_mtime = iattr->ia_mtime; 95 iattrs->ia_mtime = iattr->ia_mtime;
96 if (ia_valid & ATTR_CTIME) 96 if (ia_valid & ATTR_CTIME)
97 iattrs->ia_ctime = iattr->ia_ctime; 97 iattrs->ia_ctime = iattr->ia_ctime;
98 if (ia_valid & ATTR_MODE) { 98 if (ia_valid & ATTR_MODE) {
99 umode_t mode = iattr->ia_mode; 99 umode_t mode = iattr->ia_mode;
100 iattrs->ia_mode = sd->s_mode = mode; 100 iattrs->ia_mode = sd->s_mode = mode;
101 }
102 } 101 }
103 return 0; 102 return 0;
104} 103}
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 82372e332f08..b2d96f45c12b 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -547,7 +547,7 @@ static void udf_table_free_blocks(struct super_block *sb,
547 } 547 }
548 548
549 if (epos.offset + (2 * adsize) > sb->s_blocksize) { 549 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
550 char *sptr, *dptr; 550 unsigned char *sptr, *dptr;
551 int loffset; 551 int loffset;
552 552
553 brelse(oepos.bh); 553 brelse(oepos.bh);
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 61d9a76a3a69..f0f2a436251e 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -45,8 +45,8 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
45 int block, iblock; 45 int block, iblock;
46 loff_t nf_pos = (filp->f_pos - 1) << 2; 46 loff_t nf_pos = (filp->f_pos - 1) << 2;
47 int flen; 47 int flen;
48 char *fname = NULL; 48 unsigned char *fname = NULL;
49 char *nameptr; 49 unsigned char *nameptr;
50 uint16_t liu; 50 uint16_t liu;
51 uint8_t lfi; 51 uint8_t lfi;
52 loff_t size = udf_ext0_offset(dir) + dir->i_size; 52 loff_t size = udf_ext0_offset(dir) + dir->i_size;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index f90231eb2916..378a7592257c 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1672,7 +1672,7 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
1672 return -1; 1672 return -1;
1673 1673
1674 if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) { 1674 if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
1675 char *sptr, *dptr; 1675 unsigned char *sptr, *dptr;
1676 struct buffer_head *nbh; 1676 struct buffer_head *nbh;
1677 int err, loffset; 1677 int err, loffset;
1678 struct kernel_lb_addr obloc = epos->block; 1678 struct kernel_lb_addr obloc = epos->block;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index cd2115060fdc..7c56ff00cd53 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -34,8 +34,8 @@
34#include <linux/crc-itu-t.h> 34#include <linux/crc-itu-t.h>
35#include <linux/exportfs.h> 35#include <linux/exportfs.h>
36 36
37static inline int udf_match(int len1, const char *name1, int len2, 37static inline int udf_match(int len1, const unsigned char *name1, int len2,
38 const char *name2) 38 const unsigned char *name2)
39{ 39{
40 if (len1 != len2) 40 if (len1 != len2)
41 return 0; 41 return 0;
@@ -142,15 +142,15 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
142} 142}
143 143
144static struct fileIdentDesc *udf_find_entry(struct inode *dir, 144static struct fileIdentDesc *udf_find_entry(struct inode *dir,
145 struct qstr *child, 145 const struct qstr *child,
146 struct udf_fileident_bh *fibh, 146 struct udf_fileident_bh *fibh,
147 struct fileIdentDesc *cfi) 147 struct fileIdentDesc *cfi)
148{ 148{
149 struct fileIdentDesc *fi = NULL; 149 struct fileIdentDesc *fi = NULL;
150 loff_t f_pos; 150 loff_t f_pos;
151 int block, flen; 151 int block, flen;
152 char *fname = NULL; 152 unsigned char *fname = NULL;
153 char *nameptr; 153 unsigned char *nameptr;
154 uint8_t lfi; 154 uint8_t lfi;
155 uint16_t liu; 155 uint16_t liu;
156 loff_t size; 156 loff_t size;
@@ -308,7 +308,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
308{ 308{
309 struct super_block *sb = dir->i_sb; 309 struct super_block *sb = dir->i_sb;
310 struct fileIdentDesc *fi = NULL; 310 struct fileIdentDesc *fi = NULL;
311 char *name = NULL; 311 unsigned char *name = NULL;
312 int namelen; 312 int namelen;
313 loff_t f_pos; 313 loff_t f_pos;
314 loff_t size = udf_ext0_offset(dir) + dir->i_size; 314 loff_t size = udf_ext0_offset(dir) + dir->i_size;
@@ -885,16 +885,16 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
885{ 885{
886 struct inode *inode; 886 struct inode *inode;
887 struct pathComponent *pc; 887 struct pathComponent *pc;
888 char *compstart; 888 const char *compstart;
889 struct udf_fileident_bh fibh; 889 struct udf_fileident_bh fibh;
890 struct extent_position epos = {}; 890 struct extent_position epos = {};
891 int eoffset, elen = 0; 891 int eoffset, elen = 0;
892 struct fileIdentDesc *fi; 892 struct fileIdentDesc *fi;
893 struct fileIdentDesc cfi; 893 struct fileIdentDesc cfi;
894 char *ea; 894 uint8_t *ea;
895 int err; 895 int err;
896 int block; 896 int block;
897 char *name = NULL; 897 unsigned char *name = NULL;
898 int namelen; 898 int namelen;
899 struct buffer_head *bh; 899 struct buffer_head *bh;
900 struct udf_inode_info *iinfo; 900 struct udf_inode_info *iinfo;
@@ -970,7 +970,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
970 970
971 pc = (struct pathComponent *)(ea + elen); 971 pc = (struct pathComponent *)(ea + elen);
972 972
973 compstart = (char *)symname; 973 compstart = symname;
974 974
975 do { 975 do {
976 symname++; 976 symname++;
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index c3265e1385d4..852e91845688 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -32,12 +32,12 @@
32#include <linux/buffer_head.h> 32#include <linux/buffer_head.h>
33#include "udf_i.h" 33#include "udf_i.h"
34 34
35static void udf_pc_to_char(struct super_block *sb, char *from, int fromlen, 35static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
36 char *to) 36 int fromlen, unsigned char *to)
37{ 37{
38 struct pathComponent *pc; 38 struct pathComponent *pc;
39 int elen = 0; 39 int elen = 0;
40 char *p = to; 40 unsigned char *p = to;
41 41
42 while (elen < fromlen) { 42 while (elen < fromlen) {
43 pc = (struct pathComponent *)(from + elen); 43 pc = (struct pathComponent *)(from + elen);
@@ -75,9 +75,9 @@ static int udf_symlink_filler(struct file *file, struct page *page)
75{ 75{
76 struct inode *inode = page->mapping->host; 76 struct inode *inode = page->mapping->host;
77 struct buffer_head *bh = NULL; 77 struct buffer_head *bh = NULL;
78 char *symlink; 78 unsigned char *symlink;
79 int err = -EIO; 79 int err = -EIO;
80 char *p = kmap(page); 80 unsigned char *p = kmap(page);
81 struct udf_inode_info *iinfo; 81 struct udf_inode_info *iinfo;
82 82
83 lock_kernel(); 83 lock_kernel();
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 22af68f8b682..317a0d444f6b 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -31,7 +31,7 @@
31 * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller. 31 * len <= UFS_MAXNAMLEN and de != NULL are guaranteed by caller.
32 */ 32 */
33static inline int ufs_match(struct super_block *sb, int len, 33static inline int ufs_match(struct super_block *sb, int len,
34 const char * const name, struct ufs_dir_entry * de) 34 const unsigned char *name, struct ufs_dir_entry *de)
35{ 35{
36 if (len != ufs_get_de_namlen(sb, de)) 36 if (len != ufs_get_de_namlen(sb, de))
37 return 0; 37 return 0;
@@ -70,7 +70,7 @@ static inline unsigned long ufs_dir_pages(struct inode *inode)
70 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; 70 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
71} 71}
72 72
73ino_t ufs_inode_by_name(struct inode *dir, struct qstr *qstr) 73ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr)
74{ 74{
75 ino_t res = 0; 75 ino_t res = 0;
76 struct ufs_dir_entry *de; 76 struct ufs_dir_entry *de;
@@ -249,11 +249,11 @@ struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
249 * (as a parameter - res_dir). Page is returned mapped and unlocked. 249 * (as a parameter - res_dir). Page is returned mapped and unlocked.
250 * Entry is guaranteed to be valid. 250 * Entry is guaranteed to be valid.
251 */ 251 */
252struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct qstr *qstr, 252struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
253 struct page **res_page) 253 struct page **res_page)
254{ 254{
255 struct super_block *sb = dir->i_sb; 255 struct super_block *sb = dir->i_sb;
256 const char *name = qstr->name; 256 const unsigned char *name = qstr->name;
257 int namelen = qstr->len; 257 int namelen = qstr->len;
258 unsigned reclen = UFS_DIR_REC_LEN(namelen); 258 unsigned reclen = UFS_DIR_REC_LEN(namelen);
259 unsigned long start, n; 259 unsigned long start, n;
@@ -313,7 +313,7 @@ found:
313int ufs_add_link(struct dentry *dentry, struct inode *inode) 313int ufs_add_link(struct dentry *dentry, struct inode *inode)
314{ 314{
315 struct inode *dir = dentry->d_parent->d_inode; 315 struct inode *dir = dentry->d_parent->d_inode;
316 const char *name = dentry->d_name.name; 316 const unsigned char *name = dentry->d_name.name;
317 int namelen = dentry->d_name.len; 317 int namelen = dentry->d_name.len;
318 struct super_block *sb = dir->i_sb; 318 struct super_block *sb = dir->i_sb;
319 unsigned reclen = UFS_DIR_REC_LEN(namelen); 319 unsigned reclen = UFS_DIR_REC_LEN(namelen);
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 0b4c39bc0d9e..01d0e2a3b230 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -86,9 +86,9 @@ extern void ufs_put_cylinder (struct super_block *, unsigned);
86/* dir.c */ 86/* dir.c */
87extern const struct inode_operations ufs_dir_inode_operations; 87extern const struct inode_operations ufs_dir_inode_operations;
88extern int ufs_add_link (struct dentry *, struct inode *); 88extern int ufs_add_link (struct dentry *, struct inode *);
89extern ino_t ufs_inode_by_name(struct inode *, struct qstr *); 89extern ino_t ufs_inode_by_name(struct inode *, const struct qstr *);
90extern int ufs_make_empty(struct inode *, struct inode *); 90extern int ufs_make_empty(struct inode *, struct inode *);
91extern struct ufs_dir_entry *ufs_find_entry(struct inode *, struct qstr *, struct page **); 91extern struct ufs_dir_entry *ufs_find_entry(struct inode *, const struct qstr *, struct page **);
92extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *); 92extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *);
93extern int ufs_empty_dir (struct inode *); 93extern int ufs_empty_dir (struct inode *);
94extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **); 94extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **);
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index 1e67c441ea82..f745948b61e4 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -77,6 +77,7 @@ struct drm_nouveau_gpuobj_free {
77#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 77#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
78#define NOUVEAU_GETPARAM_CHIPSET_ID 11 78#define NOUVEAU_GETPARAM_CHIPSET_ID 11
79#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 79#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
80#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
80struct drm_nouveau_getparam { 81struct drm_nouveau_getparam {
81 uint64_t param; 82 uint64_t param;
82 uint64_t value; 83 uint64_t value;
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
index 2be7e1249b6f..c7645f480d12 100644
--- a/include/drm/vmwgfx_drm.h
+++ b/include/drm/vmwgfx_drm.h
@@ -68,7 +68,8 @@
68#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 68#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
69#define DRM_VMW_PARAM_3D 2 69#define DRM_VMW_PARAM_3D 2
70#define DRM_VMW_PARAM_FIFO_OFFSET 3 70#define DRM_VMW_PARAM_FIFO_OFFSET 3
71 71#define DRM_VMW_PARAM_HW_CAPS 4
72#define DRM_VMW_PARAM_FIFO_CAPS 5
72 73
73/** 74/**
74 * struct drm_vmw_getparam_arg 75 * struct drm_vmw_getparam_arg
@@ -181,6 +182,8 @@ struct drm_vmw_context_arg {
181 * The size of the array should equal the total number of mipmap levels. 182 * The size of the array should equal the total number of mipmap levels.
182 * @shareable: Boolean whether other clients (as identified by file descriptors) 183 * @shareable: Boolean whether other clients (as identified by file descriptors)
183 * may reference this surface. 184 * may reference this surface.
185 * @scanout: Boolean whether the surface is intended to be used as a
186 * scanout.
184 * 187 *
185 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl. 188 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
186 * Output data from the DRM_VMW_REF_SURFACE Ioctl. 189 * Output data from the DRM_VMW_REF_SURFACE Ioctl.
@@ -192,7 +195,7 @@ struct drm_vmw_surface_create_req {
192 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 195 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
193 uint64_t size_addr; 196 uint64_t size_addr;
194 int32_t shareable; 197 int32_t shareable;
195 uint32_t pad64; 198 int32_t scanout;
196}; 199};
197 200
198/** 201/**
@@ -295,17 +298,28 @@ union drm_vmw_surface_reference_arg {
295 * 298 *
296 * @commands: User-space address of a command buffer cast to an uint64_t. 299 * @commands: User-space address of a command buffer cast to an uint64_t.
297 * @command-size: Size in bytes of the command buffer. 300 * @command-size: Size in bytes of the command buffer.
301 * @throttle-us: Sleep until software is less than @throttle_us
302 * microseconds ahead of hardware. The driver may round this value
303 * to the nearest kernel tick.
298 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an 304 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
299 * uint64_t. 305 * uint64_t.
306 * @version: Allows expanding the execbuf ioctl parameters without breaking
307 * backwards compatibility, since user-space will always tell the kernel
308 * which version it uses.
309 * @flags: Execbuf flags. None currently.
300 * 310 *
301 * Argument to the DRM_VMW_EXECBUF Ioctl. 311 * Argument to the DRM_VMW_EXECBUF Ioctl.
302 */ 312 */
303 313
314#define DRM_VMW_EXECBUF_VERSION 0
315
304struct drm_vmw_execbuf_arg { 316struct drm_vmw_execbuf_arg {
305 uint64_t commands; 317 uint64_t commands;
306 uint32_t command_size; 318 uint32_t command_size;
307 uint32_t pad64; 319 uint32_t throttle_us;
308 uint64_t fence_rep; 320 uint64_t fence_rep;
321 uint32_t version;
322 uint32_t flags;
309}; 323};
310 324
311/** 325/**
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index ab94335b4bb9..6816be6c3f77 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -1,5 +1,9 @@
1/* 1/*
2 * linux/include/asm-arm/hardware/amba.h 2 * linux/include/amba/bus.h
3 *
4 * This device type deals with ARM PrimeCells and anything else that
5 * presents a proper CID (0xB105F00D) at the end of the I/O register
6 * region or that is derived from a PrimeCell.
3 * 7 *
4 * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. 8 * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved.
5 * 9 *
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5c8018977efa..1896e868854f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -461,8 +461,7 @@ struct request_queue
461#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ 461#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
462#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 462#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
463#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 463#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
464#define QUEUE_FLAG_CQ 16 /* hardware does queuing */ 464#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
465#define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */
466 465
467#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 466#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
468 (1 << QUEUE_FLAG_CLUSTER) | \ 467 (1 << QUEUE_FLAG_CLUSTER) | \
@@ -586,7 +585,6 @@ enum {
586 585
587#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 586#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
588#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 587#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
589#define blk_queue_queuing(q) test_bit(QUEUE_FLAG_CQ, &(q)->queue_flags)
590#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 588#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
591#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 589#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
592#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 590#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b1bcb275b596..5b3182c7eb5f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -729,6 +729,7 @@ struct inode {
729 uid_t i_uid; 729 uid_t i_uid;
730 gid_t i_gid; 730 gid_t i_gid;
731 dev_t i_rdev; 731 dev_t i_rdev;
732 unsigned int i_blkbits;
732 u64 i_version; 733 u64 i_version;
733 loff_t i_size; 734 loff_t i_size;
734#ifdef __NEED_I_SIZE_ORDERED 735#ifdef __NEED_I_SIZE_ORDERED
@@ -738,7 +739,6 @@ struct inode {
738 struct timespec i_mtime; 739 struct timespec i_mtime;
739 struct timespec i_ctime; 740 struct timespec i_ctime;
740 blkcnt_t i_blocks; 741 blkcnt_t i_blocks;
741 unsigned int i_blkbits;
742 unsigned short i_bytes; 742 unsigned short i_bytes;
743 umode_t i_mode; 743 umode_t i_mode;
744 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ 744 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
@@ -1305,6 +1305,8 @@ extern int send_sigurg(struct fown_struct *fown);
1305#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */ 1305#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
1306#define MNT_DETACH 0x00000002 /* Just detach from the tree */ 1306#define MNT_DETACH 0x00000002 /* Just detach from the tree */
1307#define MNT_EXPIRE 0x00000004 /* Mark for expiry */ 1307#define MNT_EXPIRE 0x00000004 /* Mark for expiry */
1308#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
1309#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
1308 1310
1309extern struct list_head super_blocks; 1311extern struct list_head super_blocks;
1310extern spinlock_t sb_lock; 1312extern spinlock_t sb_lock;
@@ -1314,9 +1316,9 @@ extern spinlock_t sb_lock;
1314struct super_block { 1316struct super_block {
1315 struct list_head s_list; /* Keep this first */ 1317 struct list_head s_list; /* Keep this first */
1316 dev_t s_dev; /* search index; _not_ kdev_t */ 1318 dev_t s_dev; /* search index; _not_ kdev_t */
1317 unsigned long s_blocksize;
1318 unsigned char s_blocksize_bits;
1319 unsigned char s_dirt; 1319 unsigned char s_dirt;
1320 unsigned char s_blocksize_bits;
1321 unsigned long s_blocksize;
1320 loff_t s_maxbytes; /* Max file size */ 1322 loff_t s_maxbytes; /* Max file size */
1321 struct file_system_type *s_type; 1323 struct file_system_type *s_type;
1322 const struct super_operations *s_op; 1324 const struct super_operations *s_op;
@@ -1357,16 +1359,16 @@ struct super_block {
1357 void *s_fs_info; /* Filesystem private info */ 1359 void *s_fs_info; /* Filesystem private info */
1358 fmode_t s_mode; 1360 fmode_t s_mode;
1359 1361
1362 /* Granularity of c/m/atime in ns.
1363 Cannot be worse than a second */
1364 u32 s_time_gran;
1365
1360 /* 1366 /*
1361 * The next field is for VFS *only*. No filesystems have any business 1367 * The next field is for VFS *only*. No filesystems have any business
1362 * even looking at it. You had been warned. 1368 * even looking at it. You had been warned.
1363 */ 1369 */
1364 struct mutex s_vfs_rename_mutex; /* Kludge */ 1370 struct mutex s_vfs_rename_mutex; /* Kludge */
1365 1371
1366 /* Granularity of c/m/atime in ns.
1367 Cannot be worse than a second */
1368 u32 s_time_gran;
1369
1370 /* 1372 /*
1371 * Filesystem subtype. If non-empty the filesystem type field 1373 * Filesystem subtype. If non-empty the filesystem type field
1372 * in /proc/mounts will be "type.subtype" 1374 * in /proc/mounts will be "type.subtype"
@@ -1794,7 +1796,8 @@ extern int may_umount(struct vfsmount *);
1794extern long do_mount(char *, char *, char *, unsigned long, void *); 1796extern long do_mount(char *, char *, char *, unsigned long, void *);
1795extern struct vfsmount *collect_mounts(struct path *); 1797extern struct vfsmount *collect_mounts(struct path *);
1796extern void drop_collected_mounts(struct vfsmount *); 1798extern void drop_collected_mounts(struct vfsmount *);
1797 1799extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
1800 struct vfsmount *);
1798extern int vfs_statfs(struct dentry *, struct kstatfs *); 1801extern int vfs_statfs(struct dentry *, struct kstatfs *);
1799 1802
1800extern int current_umask(void); 1803extern int current_umask(void);
@@ -2058,12 +2061,6 @@ extern int invalidate_inodes(struct super_block *);
2058unsigned long invalidate_mapping_pages(struct address_space *mapping, 2061unsigned long invalidate_mapping_pages(struct address_space *mapping,
2059 pgoff_t start, pgoff_t end); 2062 pgoff_t start, pgoff_t end);
2060 2063
2061static inline unsigned long __deprecated
2062invalidate_inode_pages(struct address_space *mapping)
2063{
2064 return invalidate_mapping_pages(mapping, 0, ~0UL);
2065}
2066
2067static inline void invalidate_remote_inode(struct inode *inode) 2064static inline void invalidate_remote_inode(struct inode *inode)
2068{ 2065{
2069 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 2066 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
@@ -2132,6 +2129,7 @@ extern struct file * open_exec(const char *);
2132 2129
2133/* fs/dcache.c -- generic fs support functions */ 2130/* fs/dcache.c -- generic fs support functions */
2134extern int is_subdir(struct dentry *, struct dentry *); 2131extern int is_subdir(struct dentry *, struct dentry *);
2132extern int path_is_under(struct path *, struct path *);
2135extern ino_t find_inode_number(struct dentry *, struct qstr *); 2133extern ino_t find_inode_number(struct dentry *, struct qstr *);
2136 2134
2137#include <linux/err.h> 2135#include <linux/err.h>
@@ -2340,8 +2338,6 @@ extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct
2340extern int simple_sync_file(struct file *, struct dentry *, int); 2338extern int simple_sync_file(struct file *, struct dentry *, int);
2341extern int simple_empty(struct dentry *); 2339extern int simple_empty(struct dentry *);
2342extern int simple_readpage(struct file *file, struct page *page); 2340extern int simple_readpage(struct file *file, struct page *page);
2343extern int simple_prepare_write(struct file *file, struct page *page,
2344 unsigned offset, unsigned to);
2345extern int simple_write_begin(struct file *file, struct address_space *mapping, 2341extern int simple_write_begin(struct file *file, struct address_space *mapping,
2346 loff_t pos, unsigned len, unsigned flags, 2342 loff_t pos, unsigned len, unsigned flags,
2347 struct page **pagep, void **fsdata); 2343 struct page **pagep, void **fsdata);
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 070ba0621738..5977b724f7c6 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -44,7 +44,7 @@ static inline int hw_breakpoint_type(struct perf_event *bp)
44 return bp->attr.bp_type; 44 return bp->attr.bp_type;
45} 45}
46 46
47static inline int hw_breakpoint_len(struct perf_event *bp) 47static inline unsigned long hw_breakpoint_len(struct perf_event *bp)
48{ 48{
49 return bp->attr.bp_len; 49 return bp->attr.bp_len;
50} 50}
diff --git a/include/linux/input.h b/include/linux/input.h
index 735ceaf1bc2d..663208afb64c 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -376,6 +376,7 @@ struct input_absinfo {
376#define KEY_DISPLAY_OFF 245 /* display device to off state */ 376#define KEY_DISPLAY_OFF 245 /* display device to off state */
377 377
378#define KEY_WIMAX 246 378#define KEY_WIMAX 246
379#define KEY_RFKILL 247 /* Key that controls all radios */
379 380
380/* Range 248 - 255 is reserved for special needs of AT keyboard driver */ 381/* Range 248 - 255 is reserved for special needs of AT keyboard driver */
381 382
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 6f6c5f300af6..bc0fc795bd35 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -124,7 +124,7 @@ extern __must_check unsigned int kfifo_out_peek(struct kfifo *fifo,
124 */ 124 */
125static inline bool kfifo_initialized(struct kfifo *fifo) 125static inline bool kfifo_initialized(struct kfifo *fifo)
126{ 126{
127 return fifo->buffer != 0; 127 return fifo->buffer != NULL;
128} 128}
129 129
130/** 130/**
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index d74785c2393a..0b89efc6f215 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -35,6 +35,7 @@ static inline void get_mnt_ns(struct mnt_namespace *ns)
35extern const struct seq_operations mounts_op; 35extern const struct seq_operations mounts_op;
36extern const struct seq_operations mountinfo_op; 36extern const struct seq_operations mountinfo_op;
37extern const struct seq_operations mountstats_op; 37extern const struct seq_operations mountstats_op;
38extern int mnt_had_events(struct proc_mounts *);
38 39
39#endif 40#endif
40#endif 41#endif
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 5d5275364867..ca726ebf50a3 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -34,7 +34,18 @@ struct mnt_namespace;
34 34
35#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */ 35#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */
36#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ 36#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */
37#define MNT_PNODE_MASK 0x3000 /* propagation flag mask */ 37/*
38 * MNT_SHARED_MASK is the set of flags that should be cleared when a
39 * mount becomes shared. Currently, this is only the flag that says a
40 * mount cannot be bind mounted, since this is how we create a mount
41 * that shares events with another mount. If you add a new MNT_*
42 * flag, consider how it interacts with shared mounts.
43 */
44#define MNT_SHARED_MASK (MNT_UNBINDABLE)
45#define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE)
46
47
48#define MNT_INTERNAL 0x4000
38 49
39struct vfsmount { 50struct vfsmount {
40 struct list_head mnt_hash; 51 struct list_head mnt_hash;
@@ -123,7 +134,6 @@ extern int do_add_mount(struct vfsmount *newmnt, struct path *path,
123 134
124extern void mark_mounts_for_expiry(struct list_head *mounts); 135extern void mark_mounts_for_expiry(struct list_head *mounts);
125 136
126extern spinlock_t vfsmount_lock;
127extern dev_t name_to_dev_t(char *name); 137extern dev_t name_to_dev_t(char *name);
128 138
129#endif /* _LINUX_MOUNT_H */ 139#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8fa71874113f..a177698d95e2 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -211,11 +211,9 @@ struct perf_event_attr {
211 __u32 wakeup_watermark; /* bytes before wakeup */ 211 __u32 wakeup_watermark; /* bytes before wakeup */
212 }; 212 };
213 213
214 __u32 __reserved_2;
215
216 __u64 bp_addr;
217 __u32 bp_type; 214 __u32 bp_type;
218 __u32 bp_len; 215 __u64 bp_addr;
216 __u64 bp_len;
219}; 217};
220 218
221/* 219/*
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index ba1ba0c5efd1..63d449807d9b 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -11,6 +11,8 @@ struct nf_conntrack_ecache;
11struct netns_ct { 11struct netns_ct {
12 atomic_t count; 12 atomic_t count;
13 unsigned int expect_count; 13 unsigned int expect_count;
14 unsigned int htable_size;
15 struct kmem_cache *nf_conntrack_cachep;
14 struct hlist_nulls_head *hash; 16 struct hlist_nulls_head *hash;
15 struct hlist_head *expect_hash; 17 struct hlist_head *expect_hash;
16 struct hlist_nulls_head unconfirmed; 18 struct hlist_nulls_head unconfirmed;
@@ -28,5 +30,6 @@ struct netns_ct {
28#endif 30#endif
29 int hash_vmalloc; 31 int hash_vmalloc;
30 int expect_vmalloc; 32 int expect_vmalloc;
33 char *slabname;
31}; 34};
32#endif 35#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 2eb3814d6258..9a4b8b714079 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -40,6 +40,7 @@ struct netns_ipv4 {
40 struct xt_table *iptable_security; 40 struct xt_table *iptable_security;
41 struct xt_table *nat_table; 41 struct xt_table *nat_table;
42 struct hlist_head *nat_bysource; 42 struct hlist_head *nat_bysource;
43 unsigned int nat_htable_size;
43 int nat_vmalloced; 44 int nat_vmalloced;
44#endif 45#endif
45 46
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 614241b5200c..2b108538d0d9 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -30,11 +30,7 @@ static int __init do_linuxrc(void * shell)
30 extern char * envp_init[]; 30 extern char * envp_init[];
31 31
32 sys_close(old_fd);sys_close(root_fd); 32 sys_close(old_fd);sys_close(root_fd);
33 sys_close(0);sys_close(1);sys_close(2);
34 sys_setsid(); 33 sys_setsid();
35 (void) sys_open("/dev/console",O_RDWR,0);
36 (void) sys_dup(0);
37 (void) sys_dup(0);
38 return kernel_execve(shell, argv, envp_init); 34 return kernel_execve(shell, argv, envp_init);
39} 35}
40 36
diff --git a/init/main.c b/init/main.c
index 4cb47a159f02..106e02d7ffa5 100644
--- a/init/main.c
+++ b/init/main.c
@@ -806,11 +806,6 @@ static noinline int init_post(void)
806 system_state = SYSTEM_RUNNING; 806 system_state = SYSTEM_RUNNING;
807 numa_default_policy(); 807 numa_default_policy();
808 808
809 if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
810 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
811
812 (void) sys_dup(0);
813 (void) sys_dup(0);
814 809
815 current->signal->flags |= SIGNAL_UNKILLABLE; 810 current->signal->flags |= SIGNAL_UNKILLABLE;
816 811
@@ -873,6 +868,12 @@ static int __init kernel_init(void * unused)
873 868
874 do_basic_setup(); 869 do_basic_setup();
875 870
871 /* Open the /dev/console on the rootfs, this should never fail */
872 if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
873 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
874
875 (void) sys_dup(0);
876 (void) sys_dup(0);
876 /* 877 /*
877 * check if there is an early userspace init. If yes, let it do all 878 * check if there is an early userspace init. If yes, let it do all
878 * the work 879 * the work
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index c79bd57353e7..b6cb06451f4b 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -134,7 +134,6 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
134 init_waitqueue_head(&info->wait_q); 134 init_waitqueue_head(&info->wait_q);
135 INIT_LIST_HEAD(&info->e_wait_q[0].list); 135 INIT_LIST_HEAD(&info->e_wait_q[0].list);
136 INIT_LIST_HEAD(&info->e_wait_q[1].list); 136 INIT_LIST_HEAD(&info->e_wait_q[1].list);
137 info->messages = NULL;
138 info->notify_owner = NULL; 137 info->notify_owner = NULL;
139 info->qsize = 0; 138 info->qsize = 0;
140 info->user = NULL; /* set when all is ok */ 139 info->user = NULL; /* set when all is ok */
@@ -146,6 +145,10 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
146 info->attr.mq_msgsize = attr->mq_msgsize; 145 info->attr.mq_msgsize = attr->mq_msgsize;
147 } 146 }
148 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *); 147 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
148 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
149 if (!info->messages)
150 goto out_inode;
151
149 mq_bytes = (mq_msg_tblsz + 152 mq_bytes = (mq_msg_tblsz +
150 (info->attr.mq_maxmsg * info->attr.mq_msgsize)); 153 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
151 154
@@ -154,18 +157,12 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
154 u->mq_bytes + mq_bytes > 157 u->mq_bytes + mq_bytes >
155 p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) { 158 p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) {
156 spin_unlock(&mq_lock); 159 spin_unlock(&mq_lock);
160 kfree(info->messages);
157 goto out_inode; 161 goto out_inode;
158 } 162 }
159 u->mq_bytes += mq_bytes; 163 u->mq_bytes += mq_bytes;
160 spin_unlock(&mq_lock); 164 spin_unlock(&mq_lock);
161 165
162 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
163 if (!info->messages) {
164 spin_lock(&mq_lock);
165 u->mq_bytes -= mq_bytes;
166 spin_unlock(&mq_lock);
167 goto out_inode;
168 }
169 /* all is ok */ 166 /* all is ok */
170 info->user = get_uid(u); 167 info->user = get_uid(u);
171 } else if (S_ISDIR(mode)) { 168 } else if (S_ISDIR(mode)) {
@@ -187,7 +184,7 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
187{ 184{
188 struct inode *inode; 185 struct inode *inode;
189 struct ipc_namespace *ns = data; 186 struct ipc_namespace *ns = data;
190 int error = 0; 187 int error;
191 188
192 sb->s_blocksize = PAGE_CACHE_SIZE; 189 sb->s_blocksize = PAGE_CACHE_SIZE;
193 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 190 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
@@ -205,7 +202,9 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
205 if (!sb->s_root) { 202 if (!sb->s_root) {
206 iput(inode); 203 iput(inode);
207 error = -ENOMEM; 204 error = -ENOMEM;
205 goto out;
208 } 206 }
207 error = 0;
209 208
210out: 209out:
211 return error; 210 return error;
@@ -264,8 +263,9 @@ static void mqueue_delete_inode(struct inode *inode)
264 263
265 clear_inode(inode); 264 clear_inode(inode);
266 265
267 mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) + 266 /* Total amount of bytes accounted for the mqueue */
268 (info->attr.mq_maxmsg * info->attr.mq_msgsize)); 267 mq_bytes = info->attr.mq_maxmsg * (sizeof(struct msg_msg *)
268 + info->attr.mq_msgsize);
269 user = info->user; 269 user = info->user;
270 if (user) { 270 if (user) {
271 spin_lock(&mq_lock); 271 spin_lock(&mq_lock);
@@ -604,8 +604,8 @@ static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
604 /* check for overflow */ 604 /* check for overflow */
605 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) 605 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
606 return 0; 606 return 0;
607 if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) + 607 if ((unsigned long)(attr->mq_maxmsg * (attr->mq_msgsize
608 (attr->mq_maxmsg * sizeof (struct msg_msg *)) < 608 + sizeof (struct msg_msg *))) <
609 (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize)) 609 (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
610 return 0; 610 return 0;
611 return 1; 611 return 1;
@@ -623,9 +623,10 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
623 int ret; 623 int ret;
624 624
625 if (attr) { 625 if (attr) {
626 ret = -EINVAL; 626 if (!mq_attr_ok(ipc_ns, attr)) {
627 if (!mq_attr_ok(ipc_ns, attr)) 627 ret = -EINVAL;
628 goto out; 628 goto out;
629 }
629 /* store for use during create */ 630 /* store for use during create */
630 dentry->d_fsdata = attr; 631 dentry->d_fsdata = attr;
631 } 632 }
@@ -659,24 +660,28 @@ out:
659static struct file *do_open(struct ipc_namespace *ipc_ns, 660static struct file *do_open(struct ipc_namespace *ipc_ns,
660 struct dentry *dentry, int oflag) 661 struct dentry *dentry, int oflag)
661{ 662{
663 int ret;
662 const struct cred *cred = current_cred(); 664 const struct cred *cred = current_cred();
663 665
664 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, 666 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
665 MAY_READ | MAY_WRITE }; 667 MAY_READ | MAY_WRITE };
666 668
667 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) { 669 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
668 dput(dentry); 670 ret = -EINVAL;
669 mntput(ipc_ns->mq_mnt); 671 goto err;
670 return ERR_PTR(-EINVAL);
671 } 672 }
672 673
673 if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) { 674 if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) {
674 dput(dentry); 675 ret = -EACCES;
675 mntput(ipc_ns->mq_mnt); 676 goto err;
676 return ERR_PTR(-EACCES);
677 } 677 }
678 678
679 return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); 679 return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
680
681err:
682 dput(dentry);
683 mntput(ipc_ns->mq_mnt);
684 return ERR_PTR(ret);
680} 685}
681 686
682SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode, 687SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
@@ -705,16 +710,17 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
705 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); 710 dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
706 if (IS_ERR(dentry)) { 711 if (IS_ERR(dentry)) {
707 error = PTR_ERR(dentry); 712 error = PTR_ERR(dentry);
708 goto out_err; 713 goto out_putfd;
709 } 714 }
710 mntget(ipc_ns->mq_mnt); 715 mntget(ipc_ns->mq_mnt);
711 716
712 if (oflag & O_CREAT) { 717 if (oflag & O_CREAT) {
713 if (dentry->d_inode) { /* entry already exists */ 718 if (dentry->d_inode) { /* entry already exists */
714 audit_inode(name, dentry); 719 audit_inode(name, dentry);
715 error = -EEXIST; 720 if (oflag & O_EXCL) {
716 if (oflag & O_EXCL) 721 error = -EEXIST;
717 goto out; 722 goto out;
723 }
718 filp = do_open(ipc_ns, dentry, oflag); 724 filp = do_open(ipc_ns, dentry, oflag);
719 } else { 725 } else {
720 filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root, 726 filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root,
@@ -722,9 +728,10 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode,
722 u_attr ? &attr : NULL); 728 u_attr ? &attr : NULL);
723 } 729 }
724 } else { 730 } else {
725 error = -ENOENT; 731 if (!dentry->d_inode) {
726 if (!dentry->d_inode) 732 error = -ENOENT;
727 goto out; 733 goto out;
734 }
728 audit_inode(name, dentry); 735 audit_inode(name, dentry);
729 filp = do_open(ipc_ns, dentry, oflag); 736 filp = do_open(ipc_ns, dentry, oflag);
730 } 737 }
@@ -742,7 +749,6 @@ out:
742 mntput(ipc_ns->mq_mnt); 749 mntput(ipc_ns->mq_mnt);
743out_putfd: 750out_putfd:
744 put_unused_fd(fd); 751 put_unused_fd(fd);
745out_err:
746 fd = error; 752 fd = error;
747out_upsem: 753out_upsem:
748 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); 754 mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
@@ -872,19 +878,24 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
872 audit_mq_sendrecv(mqdes, msg_len, msg_prio, p); 878 audit_mq_sendrecv(mqdes, msg_len, msg_prio, p);
873 timeout = prepare_timeout(p); 879 timeout = prepare_timeout(p);
874 880
875 ret = -EBADF;
876 filp = fget(mqdes); 881 filp = fget(mqdes);
877 if (unlikely(!filp)) 882 if (unlikely(!filp)) {
883 ret = -EBADF;
878 goto out; 884 goto out;
885 }
879 886
880 inode = filp->f_path.dentry->d_inode; 887 inode = filp->f_path.dentry->d_inode;
881 if (unlikely(filp->f_op != &mqueue_file_operations)) 888 if (unlikely(filp->f_op != &mqueue_file_operations)) {
889 ret = -EBADF;
882 goto out_fput; 890 goto out_fput;
891 }
883 info = MQUEUE_I(inode); 892 info = MQUEUE_I(inode);
884 audit_inode(NULL, filp->f_path.dentry); 893 audit_inode(NULL, filp->f_path.dentry);
885 894
886 if (unlikely(!(filp->f_mode & FMODE_WRITE))) 895 if (unlikely(!(filp->f_mode & FMODE_WRITE))) {
896 ret = -EBADF;
887 goto out_fput; 897 goto out_fput;
898 }
888 899
889 if (unlikely(msg_len > info->attr.mq_msgsize)) { 900 if (unlikely(msg_len > info->attr.mq_msgsize)) {
890 ret = -EMSGSIZE; 901 ret = -EMSGSIZE;
@@ -961,19 +972,24 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
961 audit_mq_sendrecv(mqdes, msg_len, 0, p); 972 audit_mq_sendrecv(mqdes, msg_len, 0, p);
962 timeout = prepare_timeout(p); 973 timeout = prepare_timeout(p);
963 974
964 ret = -EBADF;
965 filp = fget(mqdes); 975 filp = fget(mqdes);
966 if (unlikely(!filp)) 976 if (unlikely(!filp)) {
977 ret = -EBADF;
967 goto out; 978 goto out;
979 }
968 980
969 inode = filp->f_path.dentry->d_inode; 981 inode = filp->f_path.dentry->d_inode;
970 if (unlikely(filp->f_op != &mqueue_file_operations)) 982 if (unlikely(filp->f_op != &mqueue_file_operations)) {
983 ret = -EBADF;
971 goto out_fput; 984 goto out_fput;
985 }
972 info = MQUEUE_I(inode); 986 info = MQUEUE_I(inode);
973 audit_inode(NULL, filp->f_path.dentry); 987 audit_inode(NULL, filp->f_path.dentry);
974 988
975 if (unlikely(!(filp->f_mode & FMODE_READ))) 989 if (unlikely(!(filp->f_mode & FMODE_READ))) {
990 ret = -EBADF;
976 goto out_fput; 991 goto out_fput;
992 }
977 993
978 /* checks if buffer is big enough */ 994 /* checks if buffer is big enough */
979 if (unlikely(msg_len < info->attr.mq_msgsize)) { 995 if (unlikely(msg_len < info->attr.mq_msgsize)) {
@@ -1063,13 +1079,14 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1063 1079
1064 /* create the notify skb */ 1080 /* create the notify skb */
1065 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); 1081 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1066 ret = -ENOMEM; 1082 if (!nc) {
1067 if (!nc) 1083 ret = -ENOMEM;
1068 goto out; 1084 goto out;
1069 ret = -EFAULT; 1085 }
1070 if (copy_from_user(nc->data, 1086 if (copy_from_user(nc->data,
1071 notification.sigev_value.sival_ptr, 1087 notification.sigev_value.sival_ptr,
1072 NOTIFY_COOKIE_LEN)) { 1088 NOTIFY_COOKIE_LEN)) {
1089 ret = -EFAULT;
1073 goto out; 1090 goto out;
1074 } 1091 }
1075 1092
@@ -1078,9 +1095,10 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1078 /* and attach it to the socket */ 1095 /* and attach it to the socket */
1079retry: 1096retry:
1080 filp = fget(notification.sigev_signo); 1097 filp = fget(notification.sigev_signo);
1081 ret = -EBADF; 1098 if (!filp) {
1082 if (!filp) 1099 ret = -EBADF;
1083 goto out; 1100 goto out;
1101 }
1084 sock = netlink_getsockbyfilp(filp); 1102 sock = netlink_getsockbyfilp(filp);
1085 fput(filp); 1103 fput(filp);
1086 if (IS_ERR(sock)) { 1104 if (IS_ERR(sock)) {
@@ -1092,7 +1110,7 @@ retry:
1092 timeo = MAX_SCHEDULE_TIMEOUT; 1110 timeo = MAX_SCHEDULE_TIMEOUT;
1093 ret = netlink_attachskb(sock, nc, &timeo, NULL); 1111 ret = netlink_attachskb(sock, nc, &timeo, NULL);
1094 if (ret == 1) 1112 if (ret == 1)
1095 goto retry; 1113 goto retry;
1096 if (ret) { 1114 if (ret) {
1097 sock = NULL; 1115 sock = NULL;
1098 nc = NULL; 1116 nc = NULL;
@@ -1101,14 +1119,17 @@ retry:
1101 } 1119 }
1102 } 1120 }
1103 1121
1104 ret = -EBADF;
1105 filp = fget(mqdes); 1122 filp = fget(mqdes);
1106 if (!filp) 1123 if (!filp) {
1124 ret = -EBADF;
1107 goto out; 1125 goto out;
1126 }
1108 1127
1109 inode = filp->f_path.dentry->d_inode; 1128 inode = filp->f_path.dentry->d_inode;
1110 if (unlikely(filp->f_op != &mqueue_file_operations)) 1129 if (unlikely(filp->f_op != &mqueue_file_operations)) {
1130 ret = -EBADF;
1111 goto out_fput; 1131 goto out_fput;
1132 }
1112 info = MQUEUE_I(inode); 1133 info = MQUEUE_I(inode);
1113 1134
1114 ret = 0; 1135 ret = 0;
@@ -1171,14 +1192,17 @@ SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1171 return -EINVAL; 1192 return -EINVAL;
1172 } 1193 }
1173 1194
1174 ret = -EBADF;
1175 filp = fget(mqdes); 1195 filp = fget(mqdes);
1176 if (!filp) 1196 if (!filp) {
1197 ret = -EBADF;
1177 goto out; 1198 goto out;
1199 }
1178 1200
1179 inode = filp->f_path.dentry->d_inode; 1201 inode = filp->f_path.dentry->d_inode;
1180 if (unlikely(filp->f_op != &mqueue_file_operations)) 1202 if (unlikely(filp->f_op != &mqueue_file_operations)) {
1203 ret = -EBADF;
1181 goto out_fput; 1204 goto out_fput;
1205 }
1182 info = MQUEUE_I(inode); 1206 info = MQUEUE_I(inode);
1183 1207
1184 spin_lock(&info->lock); 1208 spin_lock(&info->lock);
@@ -1272,7 +1296,7 @@ static int __init init_mqueue_fs(void)
1272 if (mqueue_inode_cachep == NULL) 1296 if (mqueue_inode_cachep == NULL)
1273 return -ENOMEM; 1297 return -ENOMEM;
1274 1298
1275 /* ignore failues - they are not fatal */ 1299 /* ignore failures - they are not fatal */
1276 mq_sysctl_table = mq_register_sysctl_table(); 1300 mq_sysctl_table = mq_register_sysctl_table();
1277 1301
1278 error = register_filesystem(&mqueue_fs_type); 1302 error = register_filesystem(&mqueue_fs_type);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 4b05bd9479db..028e85663f27 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -548,6 +548,11 @@ int audit_remove_tree_rule(struct audit_krule *rule)
548 return 0; 548 return 0;
549} 549}
550 550
551static int compare_root(struct vfsmount *mnt, void *arg)
552{
553 return mnt->mnt_root->d_inode == arg;
554}
555
551void audit_trim_trees(void) 556void audit_trim_trees(void)
552{ 557{
553 struct list_head cursor; 558 struct list_head cursor;
@@ -559,7 +564,6 @@ void audit_trim_trees(void)
559 struct path path; 564 struct path path;
560 struct vfsmount *root_mnt; 565 struct vfsmount *root_mnt;
561 struct node *node; 566 struct node *node;
562 struct list_head list;
563 int err; 567 int err;
564 568
565 tree = container_of(cursor.next, struct audit_tree, list); 569 tree = container_of(cursor.next, struct audit_tree, list);
@@ -577,24 +581,16 @@ void audit_trim_trees(void)
577 if (!root_mnt) 581 if (!root_mnt)
578 goto skip_it; 582 goto skip_it;
579 583
580 list_add_tail(&list, &root_mnt->mnt_list);
581 spin_lock(&hash_lock); 584 spin_lock(&hash_lock);
582 list_for_each_entry(node, &tree->chunks, list) { 585 list_for_each_entry(node, &tree->chunks, list) {
583 struct audit_chunk *chunk = find_chunk(node); 586 struct inode *inode = find_chunk(node)->watch.inode;
584 struct inode *inode = chunk->watch.inode;
585 struct vfsmount *mnt;
586 node->index |= 1U<<31; 587 node->index |= 1U<<31;
587 list_for_each_entry(mnt, &list, mnt_list) { 588 if (iterate_mounts(compare_root, inode, root_mnt))
588 if (mnt->mnt_root->d_inode == inode) { 589 node->index &= ~(1U<<31);
589 node->index &= ~(1U<<31);
590 break;
591 }
592 }
593 } 590 }
594 spin_unlock(&hash_lock); 591 spin_unlock(&hash_lock);
595 trim_marked(tree); 592 trim_marked(tree);
596 put_tree(tree); 593 put_tree(tree);
597 list_del_init(&list);
598 drop_collected_mounts(root_mnt); 594 drop_collected_mounts(root_mnt);
599skip_it: 595skip_it:
600 mutex_lock(&audit_filter_mutex); 596 mutex_lock(&audit_filter_mutex);
@@ -603,22 +599,6 @@ skip_it:
603 mutex_unlock(&audit_filter_mutex); 599 mutex_unlock(&audit_filter_mutex);
604} 600}
605 601
606static int is_under(struct vfsmount *mnt, struct dentry *dentry,
607 struct path *path)
608{
609 if (mnt != path->mnt) {
610 for (;;) {
611 if (mnt->mnt_parent == mnt)
612 return 0;
613 if (mnt->mnt_parent == path->mnt)
614 break;
615 mnt = mnt->mnt_parent;
616 }
617 dentry = mnt->mnt_mountpoint;
618 }
619 return is_subdir(dentry, path->dentry);
620}
621
622int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) 602int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
623{ 603{
624 604
@@ -638,13 +618,17 @@ void audit_put_tree(struct audit_tree *tree)
638 put_tree(tree); 618 put_tree(tree);
639} 619}
640 620
621static int tag_mount(struct vfsmount *mnt, void *arg)
622{
623 return tag_chunk(mnt->mnt_root->d_inode, arg);
624}
625
641/* called with audit_filter_mutex */ 626/* called with audit_filter_mutex */
642int audit_add_tree_rule(struct audit_krule *rule) 627int audit_add_tree_rule(struct audit_krule *rule)
643{ 628{
644 struct audit_tree *seed = rule->tree, *tree; 629 struct audit_tree *seed = rule->tree, *tree;
645 struct path path; 630 struct path path;
646 struct vfsmount *mnt, *p; 631 struct vfsmount *mnt;
647 struct list_head list;
648 int err; 632 int err;
649 633
650 list_for_each_entry(tree, &tree_list, list) { 634 list_for_each_entry(tree, &tree_list, list) {
@@ -670,16 +654,9 @@ int audit_add_tree_rule(struct audit_krule *rule)
670 err = -ENOMEM; 654 err = -ENOMEM;
671 goto Err; 655 goto Err;
672 } 656 }
673 list_add_tail(&list, &mnt->mnt_list);
674 657
675 get_tree(tree); 658 get_tree(tree);
676 list_for_each_entry(p, &list, mnt_list) { 659 err = iterate_mounts(tag_mount, tree, mnt);
677 err = tag_chunk(p->mnt_root->d_inode, tree);
678 if (err)
679 break;
680 }
681
682 list_del(&list);
683 drop_collected_mounts(mnt); 660 drop_collected_mounts(mnt);
684 661
685 if (!err) { 662 if (!err) {
@@ -714,31 +691,23 @@ int audit_tag_tree(char *old, char *new)
714{ 691{
715 struct list_head cursor, barrier; 692 struct list_head cursor, barrier;
716 int failed = 0; 693 int failed = 0;
717 struct path path; 694 struct path path1, path2;
718 struct vfsmount *tagged; 695 struct vfsmount *tagged;
719 struct list_head list;
720 struct vfsmount *mnt;
721 struct dentry *dentry;
722 int err; 696 int err;
723 697
724 err = kern_path(new, 0, &path); 698 err = kern_path(new, 0, &path2);
725 if (err) 699 if (err)
726 return err; 700 return err;
727 tagged = collect_mounts(&path); 701 tagged = collect_mounts(&path2);
728 path_put(&path); 702 path_put(&path2);
729 if (!tagged) 703 if (!tagged)
730 return -ENOMEM; 704 return -ENOMEM;
731 705
732 err = kern_path(old, 0, &path); 706 err = kern_path(old, 0, &path1);
733 if (err) { 707 if (err) {
734 drop_collected_mounts(tagged); 708 drop_collected_mounts(tagged);
735 return err; 709 return err;
736 } 710 }
737 mnt = mntget(path.mnt);
738 dentry = dget(path.dentry);
739 path_put(&path);
740
741 list_add_tail(&list, &tagged->mnt_list);
742 711
743 mutex_lock(&audit_filter_mutex); 712 mutex_lock(&audit_filter_mutex);
744 list_add(&barrier, &tree_list); 713 list_add(&barrier, &tree_list);
@@ -746,7 +715,7 @@ int audit_tag_tree(char *old, char *new)
746 715
747 while (cursor.next != &tree_list) { 716 while (cursor.next != &tree_list) {
748 struct audit_tree *tree; 717 struct audit_tree *tree;
749 struct vfsmount *p; 718 int good_one = 0;
750 719
751 tree = container_of(cursor.next, struct audit_tree, list); 720 tree = container_of(cursor.next, struct audit_tree, list);
752 get_tree(tree); 721 get_tree(tree);
@@ -754,30 +723,19 @@ int audit_tag_tree(char *old, char *new)
754 list_add(&cursor, &tree->list); 723 list_add(&cursor, &tree->list);
755 mutex_unlock(&audit_filter_mutex); 724 mutex_unlock(&audit_filter_mutex);
756 725
757 err = kern_path(tree->pathname, 0, &path); 726 err = kern_path(tree->pathname, 0, &path2);
758 if (err) { 727 if (!err) {
759 put_tree(tree); 728 good_one = path_is_under(&path1, &path2);
760 mutex_lock(&audit_filter_mutex); 729 path_put(&path2);
761 continue;
762 } 730 }
763 731
764 spin_lock(&vfsmount_lock); 732 if (!good_one) {
765 if (!is_under(mnt, dentry, &path)) {
766 spin_unlock(&vfsmount_lock);
767 path_put(&path);
768 put_tree(tree); 733 put_tree(tree);
769 mutex_lock(&audit_filter_mutex); 734 mutex_lock(&audit_filter_mutex);
770 continue; 735 continue;
771 } 736 }
772 spin_unlock(&vfsmount_lock);
773 path_put(&path);
774
775 list_for_each_entry(p, &list, mnt_list) {
776 failed = tag_chunk(p->mnt_root->d_inode, tree);
777 if (failed)
778 break;
779 }
780 737
738 failed = iterate_mounts(tag_mount, tree, tagged);
781 if (failed) { 739 if (failed) {
782 put_tree(tree); 740 put_tree(tree);
783 mutex_lock(&audit_filter_mutex); 741 mutex_lock(&audit_filter_mutex);
@@ -818,10 +776,8 @@ int audit_tag_tree(char *old, char *new)
818 } 776 }
819 list_del(&barrier); 777 list_del(&barrier);
820 list_del(&cursor); 778 list_del(&cursor);
821 list_del(&list);
822 mutex_unlock(&audit_filter_mutex); 779 mutex_unlock(&audit_filter_mutex);
823 dput(dentry); 780 path_put(&path1);
824 mntput(mnt);
825 drop_collected_mounts(tagged); 781 drop_collected_mounts(tagged);
826 return failed; 782 return failed;
827} 783}
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 8a5c7d55ac9f..967e66143e11 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -360,8 +360,8 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
360int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) 360int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
361{ 361{
362 u64 old_addr = bp->attr.bp_addr; 362 u64 old_addr = bp->attr.bp_addr;
363 u64 old_len = bp->attr.bp_len;
363 int old_type = bp->attr.bp_type; 364 int old_type = bp->attr.bp_type;
364 int old_len = bp->attr.bp_len;
365 int err = 0; 365 int err = 0;
366 366
367 perf_event_disable(bp); 367 perf_event_disable(bp);
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 498cabba225e..35edbe22e9a9 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -80,7 +80,7 @@ int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask)
80 80
81 buffer = kmalloc(size, gfp_mask); 81 buffer = kmalloc(size, gfp_mask);
82 if (!buffer) { 82 if (!buffer) {
83 _kfifo_init(fifo, 0, 0); 83 _kfifo_init(fifo, NULL, 0);
84 return -ENOMEM; 84 return -ENOMEM;
85 } 85 }
86 86
@@ -97,6 +97,7 @@ EXPORT_SYMBOL(kfifo_alloc);
97void kfifo_free(struct kfifo *fifo) 97void kfifo_free(struct kfifo *fifo)
98{ 98{
99 kfree(fifo->buffer); 99 kfree(fifo->buffer);
100 _kfifo_init(fifo, NULL, 0);
100} 101}
101EXPORT_SYMBOL(kfifo_free); 102EXPORT_SYMBOL(kfifo_free);
102 103
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index d27746bd3a06..2ae7409bf38f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -3259,8 +3259,6 @@ static void perf_event_task_output(struct perf_event *event,
3259 task_event->event_id.tid = perf_event_tid(event, task); 3259 task_event->event_id.tid = perf_event_tid(event, task);
3260 task_event->event_id.ptid = perf_event_tid(event, current); 3260 task_event->event_id.ptid = perf_event_tid(event, current);
3261 3261
3262 task_event->event_id.time = perf_clock();
3263
3264 perf_output_put(&handle, task_event->event_id); 3262 perf_output_put(&handle, task_event->event_id);
3265 3263
3266 perf_output_end(&handle); 3264 perf_output_end(&handle);
@@ -3268,7 +3266,7 @@ static void perf_event_task_output(struct perf_event *event,
3268 3266
3269static int perf_event_task_match(struct perf_event *event) 3267static int perf_event_task_match(struct perf_event *event)
3270{ 3268{
3271 if (event->state != PERF_EVENT_STATE_ACTIVE) 3269 if (event->state < PERF_EVENT_STATE_INACTIVE)
3272 return 0; 3270 return 0;
3273 3271
3274 if (event->cpu != -1 && event->cpu != smp_processor_id()) 3272 if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -3300,7 +3298,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
3300 cpuctx = &get_cpu_var(perf_cpu_context); 3298 cpuctx = &get_cpu_var(perf_cpu_context);
3301 perf_event_task_ctx(&cpuctx->ctx, task_event); 3299 perf_event_task_ctx(&cpuctx->ctx, task_event);
3302 if (!ctx) 3300 if (!ctx)
3303 ctx = rcu_dereference(task_event->task->perf_event_ctxp); 3301 ctx = rcu_dereference(current->perf_event_ctxp);
3304 if (ctx) 3302 if (ctx)
3305 perf_event_task_ctx(ctx, task_event); 3303 perf_event_task_ctx(ctx, task_event);
3306 put_cpu_var(perf_cpu_context); 3304 put_cpu_var(perf_cpu_context);
@@ -3331,6 +3329,7 @@ static void perf_event_task(struct task_struct *task,
3331 /* .ppid */ 3329 /* .ppid */
3332 /* .tid */ 3330 /* .tid */
3333 /* .ptid */ 3331 /* .ptid */
3332 .time = perf_clock(),
3334 }, 3333 },
3335 }; 3334 };
3336 3335
@@ -3380,7 +3379,7 @@ static void perf_event_comm_output(struct perf_event *event,
3380 3379
3381static int perf_event_comm_match(struct perf_event *event) 3380static int perf_event_comm_match(struct perf_event *event)
3382{ 3381{
3383 if (event->state != PERF_EVENT_STATE_ACTIVE) 3382 if (event->state < PERF_EVENT_STATE_INACTIVE)
3384 return 0; 3383 return 0;
3385 3384
3386 if (event->cpu != -1 && event->cpu != smp_processor_id()) 3385 if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -3500,7 +3499,7 @@ static void perf_event_mmap_output(struct perf_event *event,
3500static int perf_event_mmap_match(struct perf_event *event, 3499static int perf_event_mmap_match(struct perf_event *event,
3501 struct perf_mmap_event *mmap_event) 3500 struct perf_mmap_event *mmap_event)
3502{ 3501{
3503 if (event->state != PERF_EVENT_STATE_ACTIVE) 3502 if (event->state < PERF_EVENT_STATE_INACTIVE)
3504 return 0; 3503 return 0;
3505 3504
3506 if (event->cpu != -1 && event->cpu != smp_processor_id()) 3505 if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -4580,7 +4579,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
4580 if (attr->type >= PERF_TYPE_MAX) 4579 if (attr->type >= PERF_TYPE_MAX)
4581 return -EINVAL; 4580 return -EINVAL;
4582 4581
4583 if (attr->__reserved_1 || attr->__reserved_2) 4582 if (attr->__reserved_1)
4584 return -EINVAL; 4583 return -EINVAL;
4585 4584
4586 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) 4585 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
diff --git a/kernel/softirq.c b/kernel/softirq.c
index a09502e2ef75..7c1a67ef0274 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -500,22 +500,17 @@ EXPORT_SYMBOL(tasklet_kill);
500 */ 500 */
501 501
502/* 502/*
503 * The trampoline is called when the hrtimer expires. If this is 503 * The trampoline is called when the hrtimer expires. It schedules a tasklet
504 * called from the hrtimer interrupt then we schedule the tasklet as 504 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
505 * the timer callback function expects to run in softirq context. If 505 * hrtimer callback, but from softirq context.
506 * it's called in softirq context anyway (i.e. high resolution timers
507 * disabled) then the hrtimer callback is called right away.
508 */ 506 */
509static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) 507static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
510{ 508{
511 struct tasklet_hrtimer *ttimer = 509 struct tasklet_hrtimer *ttimer =
512 container_of(timer, struct tasklet_hrtimer, timer); 510 container_of(timer, struct tasklet_hrtimer, timer);
513 511
514 if (hrtimer_is_hres_active(timer)) { 512 tasklet_hi_schedule(&ttimer->tasklet);
515 tasklet_hi_schedule(&ttimer->tasklet); 513 return HRTIMER_NORESTART;
516 return HRTIMER_NORESTART;
517 }
518 return ttimer->function(timer);
519} 514}
520 515
521/* 516/*
diff --git a/kernel/sys.c b/kernel/sys.c
index 26a6b73a6b85..18bde979f346 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -222,6 +222,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
222 if (which > PRIO_USER || which < PRIO_PROCESS) 222 if (which > PRIO_USER || which < PRIO_PROCESS)
223 return -EINVAL; 223 return -EINVAL;
224 224
225 rcu_read_lock();
225 read_lock(&tasklist_lock); 226 read_lock(&tasklist_lock);
226 switch (which) { 227 switch (which) {
227 case PRIO_PROCESS: 228 case PRIO_PROCESS:
@@ -267,6 +268,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
267 } 268 }
268out_unlock: 269out_unlock:
269 read_unlock(&tasklist_lock); 270 read_unlock(&tasklist_lock);
271 rcu_read_unlock();
270 272
271 return retval; 273 return retval;
272} 274}
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 8f5d16e0707a..8cd50d8f9bde 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1331,7 +1331,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
1331 ssize_t result; 1331 ssize_t result;
1332 char *pathname; 1332 char *pathname;
1333 int flags; 1333 int flags;
1334 int acc_mode, fmode; 1334 int acc_mode;
1335 1335
1336 pathname = sysctl_getname(name, nlen, &table); 1336 pathname = sysctl_getname(name, nlen, &table);
1337 result = PTR_ERR(pathname); 1337 result = PTR_ERR(pathname);
@@ -1342,15 +1342,12 @@ static ssize_t binary_sysctl(const int *name, int nlen,
1342 if (oldval && oldlen && newval && newlen) { 1342 if (oldval && oldlen && newval && newlen) {
1343 flags = O_RDWR; 1343 flags = O_RDWR;
1344 acc_mode = MAY_READ | MAY_WRITE; 1344 acc_mode = MAY_READ | MAY_WRITE;
1345 fmode = FMODE_READ | FMODE_WRITE;
1346 } else if (newval && newlen) { 1345 } else if (newval && newlen) {
1347 flags = O_WRONLY; 1346 flags = O_WRONLY;
1348 acc_mode = MAY_WRITE; 1347 acc_mode = MAY_WRITE;
1349 fmode = FMODE_WRITE;
1350 } else if (oldval && oldlen) { 1348 } else if (oldval && oldlen) {
1351 flags = O_RDONLY; 1349 flags = O_RDONLY;
1352 acc_mode = MAY_READ; 1350 acc_mode = MAY_READ;
1353 fmode = FMODE_READ;
1354 } else { 1351 } else {
1355 result = 0; 1352 result = 0;
1356 goto out_putname; 1353 goto out_putname;
@@ -1361,7 +1358,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
1361 if (result) 1358 if (result)
1362 goto out_putname; 1359 goto out_putname;
1363 1360
1364 result = may_open(&nd.path, acc_mode, fmode); 1361 result = may_open(&nd.path, acc_mode, flags);
1365 if (result) 1362 if (result)
1366 goto out_putpath; 1363 goto out_putpath;
1367 1364
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 7faaa32fbf4f..e2ab064c6d41 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -880,6 +880,7 @@ void getboottime(struct timespec *ts)
880 880
881 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); 881 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
882} 882}
883EXPORT_SYMBOL_GPL(getboottime);
883 884
884/** 885/**
885 * monotonic_to_bootbased - Convert the monotonic time to boot based. 886 * monotonic_to_bootbased - Convert the monotonic time to boot based.
@@ -889,6 +890,7 @@ void monotonic_to_bootbased(struct timespec *ts)
889{ 890{
890 *ts = timespec_add_safe(*ts, total_sleep_time); 891 *ts = timespec_add_safe(*ts, total_sleep_time);
891} 892}
893EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
892 894
893unsigned long get_seconds(void) 895unsigned long get_seconds(void)
894{ 896{
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 6ea90c0e2c96..50b1b8239806 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -689,7 +689,7 @@ static int create_trace_probe(int argc, char **argv)
689 return -EINVAL; 689 return -EINVAL;
690 } 690 }
691 /* an address specified */ 691 /* an address specified */
692 ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr); 692 ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr);
693 if (ret) { 693 if (ret) {
694 pr_info("Failed to parse address.\n"); 694 pr_info("Failed to parse address.\n");
695 return ret; 695 return ret;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 678a5120ee30..f4bc9b27de5f 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -157,6 +157,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
157 unsigned long val, flags; 157 unsigned long val, flags;
158 char buf[64]; 158 char buf[64];
159 int ret; 159 int ret;
160 int cpu;
160 161
161 if (count >= sizeof(buf)) 162 if (count >= sizeof(buf))
162 return -EINVAL; 163 return -EINVAL;
@@ -171,9 +172,20 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
171 return ret; 172 return ret;
172 173
173 local_irq_save(flags); 174 local_irq_save(flags);
175
176 /*
177 * In case we trace inside arch_spin_lock() or after (NMI),
178 * we will cause circular lock, so we also need to increase
179 * the percpu trace_active here.
180 */
181 cpu = smp_processor_id();
182 per_cpu(trace_active, cpu)++;
183
174 arch_spin_lock(&max_stack_lock); 184 arch_spin_lock(&max_stack_lock);
175 *ptr = val; 185 *ptr = val;
176 arch_spin_unlock(&max_stack_lock); 186 arch_spin_unlock(&max_stack_lock);
187
188 per_cpu(trace_active, cpu)--;
177 local_irq_restore(flags); 189 local_irq_restore(flags);
178 190
179 return count; 191 return count;
@@ -206,7 +218,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
206 218
207static void *t_start(struct seq_file *m, loff_t *pos) 219static void *t_start(struct seq_file *m, loff_t *pos)
208{ 220{
221 int cpu;
222
209 local_irq_disable(); 223 local_irq_disable();
224
225 cpu = smp_processor_id();
226 per_cpu(trace_active, cpu)++;
227
210 arch_spin_lock(&max_stack_lock); 228 arch_spin_lock(&max_stack_lock);
211 229
212 if (*pos == 0) 230 if (*pos == 0)
@@ -217,7 +235,13 @@ static void *t_start(struct seq_file *m, loff_t *pos)
217 235
218static void t_stop(struct seq_file *m, void *p) 236static void t_stop(struct seq_file *m, void *p)
219{ 237{
238 int cpu;
239
220 arch_spin_unlock(&max_stack_lock); 240 arch_spin_unlock(&max_stack_lock);
241
242 cpu = smp_processor_id();
243 per_cpu(trace_active, cpu)--;
244
221 local_irq_enable(); 245 local_irq_enable();
222} 246}
223 247
diff --git a/lib/idr.c b/lib/idr.c
index 1cac726c44bc..0dc782216d4b 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
156 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; 156 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
157 157
158 /* if already at the top layer, we need to grow */ 158 /* if already at the top layer, we need to grow */
159 if (!(p = pa[l])) { 159 if (id >= 1 << (idp->layers * IDR_BITS)) {
160 *starting_id = id; 160 *starting_id = id;
161 return IDR_NEED_TO_GROW; 161 return IDR_NEED_TO_GROW;
162 } 162 }
163 p = pa[l];
164 BUG_ON(!p);
163 165
164 /* If we need to go up one layer, continue the 166 /* If we need to go up one layer, continue the
165 * loop; otherwise, restart from the top. 167 * loop; otherwise, restart from the top.
diff --git a/mm/filemap.c b/mm/filemap.c
index 698ea80f2102..148b52a5bb7e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1117,7 +1117,7 @@ readpage:
1117 if (!PageUptodate(page)) { 1117 if (!PageUptodate(page)) {
1118 if (page->mapping == NULL) { 1118 if (page->mapping == NULL) {
1119 /* 1119 /*
1120 * invalidate_inode_pages got it 1120 * invalidate_mapping_pages got it
1121 */ 1121 */
1122 unlock_page(page); 1122 unlock_page(page);
1123 page_cache_release(page); 1123 page_cache_release(page);
diff --git a/mm/migrate.c b/mm/migrate.c
index 9a0db5bbabe4..880bd592d38e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1002,33 +1002,27 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1002#define DO_PAGES_STAT_CHUNK_NR 16 1002#define DO_PAGES_STAT_CHUNK_NR 16
1003 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; 1003 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1004 int chunk_status[DO_PAGES_STAT_CHUNK_NR]; 1004 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1005 unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1006 int err;
1007 1005
1008 for (i = 0; i < nr_pages; i += chunk_nr) { 1006 while (nr_pages) {
1009 if (chunk_nr > nr_pages - i) 1007 unsigned long chunk_nr;
1010 chunk_nr = nr_pages - i;
1011 1008
1012 err = copy_from_user(chunk_pages, &pages[i], 1009 chunk_nr = nr_pages;
1013 chunk_nr * sizeof(*chunk_pages)); 1010 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1014 if (err) { 1011 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1015 err = -EFAULT; 1012
1016 goto out; 1013 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1017 } 1014 break;
1018 1015
1019 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); 1016 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1020 1017
1021 err = copy_to_user(&status[i], chunk_status, 1018 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1022 chunk_nr * sizeof(*chunk_status)); 1019 break;
1023 if (err) {
1024 err = -EFAULT;
1025 goto out;
1026 }
1027 }
1028 err = 0;
1029 1020
1030out: 1021 pages += chunk_nr;
1031 return err; 1022 status += chunk_nr;
1023 nr_pages -= chunk_nr;
1024 }
1025 return nr_pages ? -EFAULT : 0;
1032} 1026}
1033 1027
1034/* 1028/*
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f52481b1c1e5..237050478f28 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -459,6 +459,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
459 list_for_each_entry(c, &p->children, sibling) { 459 list_for_each_entry(c, &p->children, sibling) {
460 if (c->mm == p->mm) 460 if (c->mm == p->mm)
461 continue; 461 continue;
462 if (mem && !task_in_mem_cgroup(c, mem))
463 continue;
462 if (!oom_kill_task(c)) 464 if (!oom_kill_task(c))
463 return 0; 465 return 0;
464 } 466 }
diff --git a/net/9p/client.c b/net/9p/client.c
index 8af95b2dddd6..09d4f1e2e4a8 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -69,7 +69,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
69 69
70static int parse_opts(char *opts, struct p9_client *clnt) 70static int parse_opts(char *opts, struct p9_client *clnt)
71{ 71{
72 char *options; 72 char *options, *tmp_options;
73 char *p; 73 char *p;
74 substring_t args[MAX_OPT_ARGS]; 74 substring_t args[MAX_OPT_ARGS];
75 int option; 75 int option;
@@ -81,12 +81,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
81 if (!opts) 81 if (!opts)
82 return 0; 82 return 0;
83 83
84 options = kstrdup(opts, GFP_KERNEL); 84 tmp_options = kstrdup(opts, GFP_KERNEL);
85 if (!options) { 85 if (!tmp_options) {
86 P9_DPRINTK(P9_DEBUG_ERROR, 86 P9_DPRINTK(P9_DEBUG_ERROR,
87 "failed to allocate copy of option string\n"); 87 "failed to allocate copy of option string\n");
88 return -ENOMEM; 88 return -ENOMEM;
89 } 89 }
90 options = tmp_options;
90 91
91 while ((p = strsep(&options, ",")) != NULL) { 92 while ((p = strsep(&options, ",")) != NULL) {
92 int token; 93 int token;
@@ -108,6 +109,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
108 break; 109 break;
109 case Opt_trans: 110 case Opt_trans:
110 clnt->trans_mod = v9fs_get_trans_by_name(&args[0]); 111 clnt->trans_mod = v9fs_get_trans_by_name(&args[0]);
112 if(clnt->trans_mod == NULL) {
113 P9_DPRINTK(P9_DEBUG_ERROR,
114 "Could not find request transport: %s\n",
115 (char *) &args[0]);
116 ret = -EINVAL;
117 goto free_and_return;
118 }
111 break; 119 break;
112 case Opt_legacy: 120 case Opt_legacy:
113 clnt->dotu = 0; 121 clnt->dotu = 0;
@@ -117,7 +125,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
117 } 125 }
118 } 126 }
119 127
120 kfree(options); 128free_and_return:
129 kfree(tmp_options);
121 return ret; 130 return ret;
122} 131}
123 132
@@ -667,18 +676,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
667 clnt->trans = NULL; 676 clnt->trans = NULL;
668 spin_lock_init(&clnt->lock); 677 spin_lock_init(&clnt->lock);
669 INIT_LIST_HEAD(&clnt->fidlist); 678 INIT_LIST_HEAD(&clnt->fidlist);
670 clnt->fidpool = p9_idpool_create();
671 if (IS_ERR(clnt->fidpool)) {
672 err = PTR_ERR(clnt->fidpool);
673 clnt->fidpool = NULL;
674 goto error;
675 }
676 679
677 p9_tag_init(clnt); 680 p9_tag_init(clnt);
678 681
679 err = parse_opts(options, clnt); 682 err = parse_opts(options, clnt);
680 if (err < 0) 683 if (err < 0)
681 goto error; 684 goto free_client;
682 685
683 if (!clnt->trans_mod) 686 if (!clnt->trans_mod)
684 clnt->trans_mod = v9fs_get_default_trans(); 687 clnt->trans_mod = v9fs_get_default_trans();
@@ -687,7 +690,14 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
687 err = -EPROTONOSUPPORT; 690 err = -EPROTONOSUPPORT;
688 P9_DPRINTK(P9_DEBUG_ERROR, 691 P9_DPRINTK(P9_DEBUG_ERROR,
689 "No transport defined or default transport\n"); 692 "No transport defined or default transport\n");
690 goto error; 693 goto free_client;
694 }
695
696 clnt->fidpool = p9_idpool_create();
697 if (IS_ERR(clnt->fidpool)) {
698 err = PTR_ERR(clnt->fidpool);
699 clnt->fidpool = NULL;
700 goto put_trans;
691 } 701 }
692 702
693 P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n", 703 P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n",
@@ -695,19 +705,25 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
695 705
696 err = clnt->trans_mod->create(clnt, dev_name, options); 706 err = clnt->trans_mod->create(clnt, dev_name, options);
697 if (err) 707 if (err)
698 goto error; 708 goto destroy_fidpool;
699 709
700 if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize) 710 if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize)
701 clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ; 711 clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ;
702 712
703 err = p9_client_version(clnt); 713 err = p9_client_version(clnt);
704 if (err) 714 if (err)
705 goto error; 715 goto close_trans;
706 716
707 return clnt; 717 return clnt;
708 718
709error: 719close_trans:
710 p9_client_destroy(clnt); 720 clnt->trans_mod->close(clnt);
721destroy_fidpool:
722 p9_idpool_destroy(clnt->fidpool);
723put_trans:
724 v9fs_put_trans(clnt->trans_mod);
725free_client:
726 kfree(clnt);
711 return ERR_PTR(err); 727 return ERR_PTR(err);
712} 728}
713EXPORT_SYMBOL(p9_client_create); 729EXPORT_SYMBOL(p9_client_create);
@@ -1214,10 +1230,11 @@ static int p9_client_statsize(struct p9_wstat *wst, int optional)
1214{ 1230{
1215 int ret; 1231 int ret;
1216 1232
1233 /* NOTE: size shouldn't include its own length */
1217 /* size[2] type[2] dev[4] qid[13] */ 1234 /* size[2] type[2] dev[4] qid[13] */
1218 /* mode[4] atime[4] mtime[4] length[8]*/ 1235 /* mode[4] atime[4] mtime[4] length[8]*/
1219 /* name[s] uid[s] gid[s] muid[s] */ 1236 /* name[s] uid[s] gid[s] muid[s] */
1220 ret = 2+2+4+13+4+4+4+8+2+2+2+2; 1237 ret = 2+4+13+4+4+4+8+2+2+2+2;
1221 1238
1222 if (wst->name) 1239 if (wst->name)
1223 ret += strlen(wst->name); 1240 ret += strlen(wst->name);
@@ -1258,7 +1275,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
1258 wst->name, wst->uid, wst->gid, wst->muid, wst->extension, 1275 wst->name, wst->uid, wst->gid, wst->muid, wst->extension,
1259 wst->n_uid, wst->n_gid, wst->n_muid); 1276 wst->n_uid, wst->n_gid, wst->n_muid);
1260 1277
1261 req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst); 1278 req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst);
1262 if (IS_ERR(req)) { 1279 if (IS_ERR(req)) {
1263 err = PTR_ERR(req); 1280 err = PTR_ERR(req);
1264 goto error; 1281 goto error;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index be1cb909d8c0..31d0b05582a9 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -714,7 +714,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
714 char *p; 714 char *p;
715 substring_t args[MAX_OPT_ARGS]; 715 substring_t args[MAX_OPT_ARGS];
716 int option; 716 int option;
717 char *options; 717 char *options, *tmp_options;
718 int ret; 718 int ret;
719 719
720 opts->port = P9_PORT; 720 opts->port = P9_PORT;
@@ -724,12 +724,13 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
724 if (!params) 724 if (!params)
725 return 0; 725 return 0;
726 726
727 options = kstrdup(params, GFP_KERNEL); 727 tmp_options = kstrdup(params, GFP_KERNEL);
728 if (!options) { 728 if (!tmp_options) {
729 P9_DPRINTK(P9_DEBUG_ERROR, 729 P9_DPRINTK(P9_DEBUG_ERROR,
730 "failed to allocate copy of option string\n"); 730 "failed to allocate copy of option string\n");
731 return -ENOMEM; 731 return -ENOMEM;
732 } 732 }
733 options = tmp_options;
733 734
734 while ((p = strsep(&options, ",")) != NULL) { 735 while ((p = strsep(&options, ",")) != NULL) {
735 int token; 736 int token;
@@ -760,7 +761,8 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
760 continue; 761 continue;
761 } 762 }
762 } 763 }
763 kfree(options); 764
765 kfree(tmp_options);
764 return 0; 766 return 0;
765} 767}
766 768
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 65cb29db03f8..2c95a89c0f46 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -166,7 +166,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
166 char *p; 166 char *p;
167 substring_t args[MAX_OPT_ARGS]; 167 substring_t args[MAX_OPT_ARGS];
168 int option; 168 int option;
169 char *options; 169 char *options, *tmp_options;
170 int ret; 170 int ret;
171 171
172 opts->port = P9_PORT; 172 opts->port = P9_PORT;
@@ -177,12 +177,13 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
177 if (!params) 177 if (!params)
178 return 0; 178 return 0;
179 179
180 options = kstrdup(params, GFP_KERNEL); 180 tmp_options = kstrdup(params, GFP_KERNEL);
181 if (!options) { 181 if (!tmp_options) {
182 P9_DPRINTK(P9_DEBUG_ERROR, 182 P9_DPRINTK(P9_DEBUG_ERROR,
183 "failed to allocate copy of option string\n"); 183 "failed to allocate copy of option string\n");
184 return -ENOMEM; 184 return -ENOMEM;
185 } 185 }
186 options = tmp_options;
186 187
187 while ((p = strsep(&options, ",")) != NULL) { 188 while ((p = strsep(&options, ",")) != NULL) {
188 int token; 189 int token;
@@ -216,7 +217,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
216 } 217 }
217 /* RQ must be at least as large as the SQ */ 218 /* RQ must be at least as large as the SQ */
218 opts->rq_depth = max(opts->rq_depth, opts->sq_depth); 219 opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
219 kfree(options); 220 kfree(tmp_options);
220 return 0; 221 return 0;
221} 222}
222 223
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index ea1e3daabefe..cb50f4ae5eef 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -102,7 +102,8 @@ static void p9_virtio_close(struct p9_client *client)
102 struct virtio_chan *chan = client->trans; 102 struct virtio_chan *chan = client->trans;
103 103
104 mutex_lock(&virtio_9p_lock); 104 mutex_lock(&virtio_9p_lock);
105 chan->inuse = false; 105 if (chan)
106 chan->inuse = false;
106 mutex_unlock(&virtio_9p_lock); 107 mutex_unlock(&virtio_9p_lock);
107} 108}
108 109
@@ -311,6 +312,7 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
311 } 312 }
312 313
313 client->trans = (void *)chan; 314 client->trans = (void *)chan;
315 client->status = Connected;
314 chan->client = client; 316 chan->client = client;
315 317
316 return 0; 318 return 0;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b7c4224f4e7d..b10e3cdb08f8 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -377,6 +377,9 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
377 377
378 if (acl->state == BT_CONNECTED && 378 if (acl->state == BT_CONNECTED &&
379 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 379 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
380 acl->power_save = 1;
381 hci_conn_enter_active_mode(acl);
382
380 if (lmp_esco_capable(hdev)) 383 if (lmp_esco_capable(hdev))
381 hci_setup_sync(sco, acl->handle); 384 hci_setup_sync(sco, acl->handle);
382 else 385 else
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 28517bad796c..592da5c909c1 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1699,6 +1699,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
1699 break; 1699 break;
1700 1700
1701 case 0x1c: /* SCO interval rejected */ 1701 case 0x1c: /* SCO interval rejected */
1702 case 0x1a: /* Unsupported Remote Feature */
1702 case 0x1f: /* Unspecified error */ 1703 case 0x1f: /* Unspecified error */
1703 if (conn->out && conn->attempt < 2) { 1704 if (conn->out && conn->attempt < 2) {
1704 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 1705 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 6cf526d06e21..fc6ec1e72652 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -703,29 +703,9 @@ static void hidp_close(struct hid_device *hid)
703static int hidp_parse(struct hid_device *hid) 703static int hidp_parse(struct hid_device *hid)
704{ 704{
705 struct hidp_session *session = hid->driver_data; 705 struct hidp_session *session = hid->driver_data;
706 struct hidp_connadd_req *req = session->req;
707 unsigned char *buf;
708 int ret;
709
710 buf = kmalloc(req->rd_size, GFP_KERNEL);
711 if (!buf)
712 return -ENOMEM;
713
714 if (copy_from_user(buf, req->rd_data, req->rd_size)) {
715 kfree(buf);
716 return -EFAULT;
717 }
718
719 ret = hid_parse_report(session->hid, buf, req->rd_size);
720
721 kfree(buf);
722
723 if (ret)
724 return ret;
725
726 session->req = NULL;
727 706
728 return 0; 707 return hid_parse_report(session->hid, session->rd_data,
708 session->rd_size);
729} 709}
730 710
731static int hidp_start(struct hid_device *hid) 711static int hidp_start(struct hid_device *hid)
@@ -770,12 +750,24 @@ static int hidp_setup_hid(struct hidp_session *session,
770 bdaddr_t src, dst; 750 bdaddr_t src, dst;
771 int err; 751 int err;
772 752
753 session->rd_data = kzalloc(req->rd_size, GFP_KERNEL);
754 if (!session->rd_data)
755 return -ENOMEM;
756
757 if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) {
758 err = -EFAULT;
759 goto fault;
760 }
761 session->rd_size = req->rd_size;
762
773 hid = hid_allocate_device(); 763 hid = hid_allocate_device();
774 if (IS_ERR(hid)) 764 if (IS_ERR(hid)) {
775 return PTR_ERR(hid); 765 err = PTR_ERR(hid);
766 goto fault;
767 }
776 768
777 session->hid = hid; 769 session->hid = hid;
778 session->req = req; 770
779 hid->driver_data = session; 771 hid->driver_data = session;
780 772
781 baswap(&src, &bt_sk(session->ctrl_sock->sk)->src); 773 baswap(&src, &bt_sk(session->ctrl_sock->sk)->src);
@@ -806,6 +798,10 @@ failed:
806 hid_destroy_device(hid); 798 hid_destroy_device(hid);
807 session->hid = NULL; 799 session->hid = NULL;
808 800
801fault:
802 kfree(session->rd_data);
803 session->rd_data = NULL;
804
809 return err; 805 return err;
810} 806}
811 807
@@ -900,6 +896,9 @@ unlink:
900 session->hid = NULL; 896 session->hid = NULL;
901 } 897 }
902 898
899 kfree(session->rd_data);
900 session->rd_data = NULL;
901
903purge: 902purge:
904 skb_queue_purge(&session->ctrl_transmit); 903 skb_queue_purge(&session->ctrl_transmit);
905 skb_queue_purge(&session->intr_transmit); 904 skb_queue_purge(&session->intr_transmit);
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index faf3d74c3586..a4e215d50c10 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -154,7 +154,9 @@ struct hidp_session {
154 struct sk_buff_head ctrl_transmit; 154 struct sk_buff_head ctrl_transmit;
155 struct sk_buff_head intr_transmit; 155 struct sk_buff_head intr_transmit;
156 156
157 struct hidp_connadd_req *req; 157 /* Report descriptor */
158 __u8 *rd_data;
159 uint rd_size;
158}; 160};
159 161
160static inline void hidp_schedule(struct hidp_session *session) 162static inline void hidp_schedule(struct hidp_session *session)
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index fc5ee3296e22..89f4a59eb82b 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -252,7 +252,6 @@ static void rfcomm_session_timeout(unsigned long arg)
252 BT_DBG("session %p state %ld", s, s->state); 252 BT_DBG("session %p state %ld", s, s->state);
253 253
254 set_bit(RFCOMM_TIMED_OUT, &s->flags); 254 set_bit(RFCOMM_TIMED_OUT, &s->flags);
255 rfcomm_session_put(s);
256 rfcomm_schedule(RFCOMM_SCHED_TIMEO); 255 rfcomm_schedule(RFCOMM_SCHED_TIMEO);
257} 256}
258 257
@@ -1151,7 +1150,11 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1151 break; 1150 break;
1152 1151
1153 case BT_DISCONN: 1152 case BT_DISCONN:
1154 rfcomm_session_put(s); 1153 /* When socket is closed and we are not RFCOMM
1154 * initiator rfcomm_process_rx already calls
1155 * rfcomm_session_put() */
1156 if (s->sock->sk->sk_state != BT_CLOSED)
1157 rfcomm_session_put(s);
1155 break; 1158 break;
1156 } 1159 }
1157 } 1160 }
@@ -1920,6 +1923,7 @@ static inline void rfcomm_process_sessions(void)
1920 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { 1923 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
1921 s->state = BT_DISCONN; 1924 s->state = BT_DISCONN;
1922 rfcomm_send_disc(s, 0); 1925 rfcomm_send_disc(s, 0);
1926 rfcomm_session_put(s);
1923 continue; 1927 continue;
1924 } 1928 }
1925 1929
diff --git a/net/core/dev.c b/net/core/dev.c
index be9924f60ec3..ec874218b206 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2761,7 +2761,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
2761 switch (ret) { 2761 switch (ret) {
2762 case GRO_NORMAL: 2762 case GRO_NORMAL:
2763 case GRO_HELD: 2763 case GRO_HELD:
2764 skb->protocol = eth_type_trans(skb, napi->dev); 2764 skb->protocol = eth_type_trans(skb, skb->dev);
2765 2765
2766 if (ret == GRO_HELD) 2766 if (ret == GRO_HELD)
2767 skb_gro_pull(skb, -ETH_HLEN); 2767 skb_gro_pull(skb, -ETH_HLEN);
diff --git a/net/core/dst.c b/net/core/dst.c
index 57bc4d5b8d08..cb1b3488b739 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -17,6 +17,7 @@
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <net/net_namespace.h> 19#include <net/net_namespace.h>
20#include <linux/sched.h>
20 21
21#include <net/dst.h> 22#include <net/dst.h>
22 23
@@ -79,6 +80,7 @@ loop:
79 while ((dst = next) != NULL) { 80 while ((dst = next) != NULL) {
80 next = dst->next; 81 next = dst->next;
81 prefetch(&next->next); 82 prefetch(&next->next);
83 cond_resched();
82 if (likely(atomic_read(&dst->__refcnt))) { 84 if (likely(atomic_read(&dst->__refcnt))) {
83 last->next = dst; 85 last->next = dst;
84 last = dst; 86 last = dst;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index d8aee584e8d1..236a9988ea91 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -927,6 +927,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
927 case ETHTOOL_GPERMADDR: 927 case ETHTOOL_GPERMADDR:
928 case ETHTOOL_GUFO: 928 case ETHTOOL_GUFO:
929 case ETHTOOL_GGSO: 929 case ETHTOOL_GGSO:
930 case ETHTOOL_GGRO:
930 case ETHTOOL_GFLAGS: 931 case ETHTOOL_GFLAGS:
931 case ETHTOOL_GPFLAGS: 932 case ETHTOOL_GPFLAGS:
932 case ETHTOOL_GRXFH: 933 case ETHTOOL_GRXFH:
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index fbc1c7472c5e..099c753c4213 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -410,7 +410,8 @@ static ssize_t wireless_show(struct device *d, char *buf,
410 const struct iw_statistics *iw; 410 const struct iw_statistics *iw;
411 ssize_t ret = -EINVAL; 411 ssize_t ret = -EINVAL;
412 412
413 rtnl_lock(); 413 if (!rtnl_trylock())
414 return restart_syscall();
414 if (dev_isalive(dev)) { 415 if (dev_isalive(dev)) {
415 iw = get_wireless_stats(dev); 416 iw = get_wireless_stats(dev);
416 if (iw) 417 if (iw)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index de0c2c726420..2e692afdc55d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3524,6 +3524,7 @@ static int pktgen_thread_worker(void *arg)
3524 wait_event_interruptible_timeout(t->queue, 3524 wait_event_interruptible_timeout(t->queue,
3525 t->control != 0, 3525 t->control != 0,
3526 HZ/10); 3526 HZ/10);
3527 try_to_freeze();
3527 continue; 3528 continue;
3528 } 3529 }
3529 3530
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 57dfb9c8c4f2..ff16e9df1969 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -83,7 +83,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f
83 va_list args; 83 va_list args;
84 84
85 va_start(args, fmt); 85 va_start(args, fmt);
86 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); 86 vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args);
87 va_end(args); 87 va_end(args);
88 88
89 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, 89 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 269958bf7fe9..6df6f8ac9636 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -19,7 +19,9 @@
19#include <linux/list.h> 19#include <linux/list.h>
20#include <linux/module.h> 20#include <linux/module.h>
21 21
22#define CCID_MAX 255 22/* maximum value for a CCID (RFC 4340, 19.5) */
23#define CCID_MAX 255
24#define CCID_SLAB_NAME_LENGTH 32
23 25
24struct tcp_info; 26struct tcp_info;
25 27
@@ -49,8 +51,8 @@ struct ccid_operations {
49 const char *ccid_name; 51 const char *ccid_name;
50 struct kmem_cache *ccid_hc_rx_slab, 52 struct kmem_cache *ccid_hc_rx_slab,
51 *ccid_hc_tx_slab; 53 *ccid_hc_tx_slab;
52 char ccid_hc_rx_slab_name[32]; 54 char ccid_hc_rx_slab_name[CCID_SLAB_NAME_LENGTH];
53 char ccid_hc_tx_slab_name[32]; 55 char ccid_hc_tx_slab_name[CCID_SLAB_NAME_LENGTH];
54 __u32 ccid_hc_rx_obj_size, 56 __u32 ccid_hc_rx_obj_size,
55 ccid_hc_tx_obj_size; 57 ccid_hc_tx_obj_size;
56 /* Interface Routines */ 58 /* Interface Routines */
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index bace1d8cbcfd..f5b3464f1242 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -161,8 +161,8 @@ static __init int dccpprobe_init(void)
161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) 161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
162 goto err0; 162 goto err0;
163 163
164 ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0), 164 try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0,
165 "dccp"); 165 "dccp");
166 if (ret) 166 if (ret)
167 goto err1; 167 goto err1;
168 168
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 040c4f05b653..26dec2be9615 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1317,14 +1317,19 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
1317{ 1317{
1318 int *valp = ctl->data; 1318 int *valp = ctl->data;
1319 int val = *valp; 1319 int val = *valp;
1320 loff_t pos = *ppos;
1320 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 1321 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1321 1322
1322 if (write && *valp != val) { 1323 if (write && *valp != val) {
1323 struct net *net = ctl->extra2; 1324 struct net *net = ctl->extra2;
1324 1325
1325 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) { 1326 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
1326 if (!rtnl_trylock()) 1327 if (!rtnl_trylock()) {
1328 /* Restore the original values before restarting */
1329 *valp = val;
1330 *ppos = pos;
1327 return restart_syscall(); 1331 return restart_syscall();
1332 }
1328 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) { 1333 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
1329 inet_forward_change(net); 1334 inet_forward_change(net);
1330 } else if (*valp) { 1335 } else if (*valp) {
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 76c08402c933..a42f658e756a 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -946,7 +946,6 @@ int igmp_rcv(struct sk_buff *skb)
946 break; 946 break;
947 case IGMP_HOST_MEMBERSHIP_REPORT: 947 case IGMP_HOST_MEMBERSHIP_REPORT:
948 case IGMPV2_HOST_MEMBERSHIP_REPORT: 948 case IGMPV2_HOST_MEMBERSHIP_REPORT:
949 case IGMPV3_HOST_MEMBERSHIP_REPORT:
950 /* Is it our report looped back? */ 949 /* Is it our report looped back? */
951 if (skb_rtable(skb)->fl.iif == 0) 950 if (skb_rtable(skb)->fl.iif == 0)
952 break; 951 break;
@@ -960,6 +959,7 @@ int igmp_rcv(struct sk_buff *skb)
960 in_dev_put(in_dev); 959 in_dev_put(in_dev);
961 return pim_rcv_v1(skb); 960 return pim_rcv_v1(skb);
962#endif 961#endif
962 case IGMPV3_HOST_MEMBERSHIP_REPORT:
963 case IGMP_DVMRP: 963 case IGMP_DVMRP:
964 case IGMP_TRACE: 964 case IGMP_TRACE:
965 case IGMP_HOST_LEAVE_MESSAGE: 965 case IGMP_HOST_LEAVE_MESSAGE:
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 38fbf04150ae..544ce0876f12 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -124,16 +124,12 @@ static int ipcomp4_init_state(struct xfrm_state *x)
124 if (x->props.mode == XFRM_MODE_TUNNEL) { 124 if (x->props.mode == XFRM_MODE_TUNNEL) {
125 err = ipcomp_tunnel_attach(x); 125 err = ipcomp_tunnel_attach(x);
126 if (err) 126 if (err)
127 goto error_tunnel; 127 goto out;
128 } 128 }
129 129
130 err = 0; 130 err = 0;
131out: 131out:
132 return err; 132 return err;
133
134error_tunnel:
135 ipcomp_destroy(x);
136 goto out;
137} 133}
138 134
139static const struct xfrm_type ipcomp_type = { 135static const struct xfrm_type ipcomp_type = {
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 06632762ba5f..90203e1b9187 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -925,10 +925,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
925 if (t && !IS_ERR(t)) { 925 if (t && !IS_ERR(t)) {
926 struct arpt_getinfo info; 926 struct arpt_getinfo info;
927 const struct xt_table_info *private = t->private; 927 const struct xt_table_info *private = t->private;
928
929#ifdef CONFIG_COMPAT 928#ifdef CONFIG_COMPAT
929 struct xt_table_info tmp;
930
930 if (compat) { 931 if (compat) {
931 struct xt_table_info tmp;
932 ret = compat_table_info(private, &tmp); 932 ret = compat_table_info(private, &tmp);
933 xt_compat_flush_offsets(NFPROTO_ARP); 933 xt_compat_flush_offsets(NFPROTO_ARP);
934 private = &tmp; 934 private = &tmp;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 572330a552ef..3ce53cf13d5a 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1132,10 +1132,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1132 if (t && !IS_ERR(t)) { 1132 if (t && !IS_ERR(t)) {
1133 struct ipt_getinfo info; 1133 struct ipt_getinfo info;
1134 const struct xt_table_info *private = t->private; 1134 const struct xt_table_info *private = t->private;
1135
1136#ifdef CONFIG_COMPAT 1135#ifdef CONFIG_COMPAT
1136 struct xt_table_info tmp;
1137
1137 if (compat) { 1138 if (compat) {
1138 struct xt_table_info tmp;
1139 ret = compat_table_info(private, &tmp); 1139 ret = compat_table_info(private, &tmp);
1140 xt_compat_flush_offsets(AF_INET); 1140 xt_compat_flush_offsets(AF_INET);
1141 private = &tmp; 1141 private = &tmp;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index d171b123a656..d1ea38a7c490 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -210,7 +210,7 @@ static ctl_table ip_ct_sysctl_table[] = {
210 }, 210 },
211 { 211 {
212 .procname = "ip_conntrack_buckets", 212 .procname = "ip_conntrack_buckets",
213 .data = &nf_conntrack_htable_size, 213 .data = &init_net.ct.htable_size,
214 .maxlen = sizeof(unsigned int), 214 .maxlen = sizeof(unsigned int),
215 .mode = 0444, 215 .mode = 0444,
216 .proc_handler = proc_dointvec, 216 .proc_handler = proc_dointvec,
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 8668a3defda6..2fb7b76da94f 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
32 struct hlist_nulls_node *n; 32 struct hlist_nulls_node *n;
33 33
34 for (st->bucket = 0; 34 for (st->bucket = 0;
35 st->bucket < nf_conntrack_htable_size; 35 st->bucket < net->ct.htable_size;
36 st->bucket++) { 36 st->bucket++) {
37 n = rcu_dereference(net->ct.hash[st->bucket].first); 37 n = rcu_dereference(net->ct.hash[st->bucket].first);
38 if (!is_a_nulls(n)) 38 if (!is_a_nulls(n))
@@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
50 head = rcu_dereference(head->next); 50 head = rcu_dereference(head->next);
51 while (is_a_nulls(head)) { 51 while (is_a_nulls(head)) {
52 if (likely(get_nulls_value(head) == st->bucket)) { 52 if (likely(get_nulls_value(head) == st->bucket)) {
53 if (++st->bucket >= nf_conntrack_htable_size) 53 if (++st->bucket >= net->ct.htable_size)
54 return NULL; 54 return NULL;
55 } 55 }
56 head = rcu_dereference(net->ct.hash[st->bucket].first); 56 head = rcu_dereference(net->ct.hash[st->bucket].first);
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index fe1a64479dd0..26066a2327ad 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -35,9 +35,6 @@ static DEFINE_SPINLOCK(nf_nat_lock);
35 35
36static struct nf_conntrack_l3proto *l3proto __read_mostly; 36static struct nf_conntrack_l3proto *l3proto __read_mostly;
37 37
38/* Calculated at init based on memory size */
39static unsigned int nf_nat_htable_size __read_mostly;
40
41#define MAX_IP_NAT_PROTO 256 38#define MAX_IP_NAT_PROTO 256
42static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] 39static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
43 __read_mostly; 40 __read_mostly;
@@ -72,7 +69,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
72 69
73/* We keep an extra hash for each conntrack, for fast searching. */ 70/* We keep an extra hash for each conntrack, for fast searching. */
74static inline unsigned int 71static inline unsigned int
75hash_by_src(const struct nf_conntrack_tuple *tuple) 72hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
76{ 73{
77 unsigned int hash; 74 unsigned int hash;
78 75
@@ -80,7 +77,7 @@ hash_by_src(const struct nf_conntrack_tuple *tuple)
80 hash = jhash_3words((__force u32)tuple->src.u3.ip, 77 hash = jhash_3words((__force u32)tuple->src.u3.ip,
81 (__force u32)tuple->src.u.all, 78 (__force u32)tuple->src.u.all,
82 tuple->dst.protonum, 0); 79 tuple->dst.protonum, 0);
83 return ((u64)hash * nf_nat_htable_size) >> 32; 80 return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
84} 81}
85 82
86/* Is this tuple already taken? (not by us) */ 83/* Is this tuple already taken? (not by us) */
@@ -147,7 +144,7 @@ find_appropriate_src(struct net *net,
147 struct nf_conntrack_tuple *result, 144 struct nf_conntrack_tuple *result,
148 const struct nf_nat_range *range) 145 const struct nf_nat_range *range)
149{ 146{
150 unsigned int h = hash_by_src(tuple); 147 unsigned int h = hash_by_src(net, tuple);
151 const struct nf_conn_nat *nat; 148 const struct nf_conn_nat *nat;
152 const struct nf_conn *ct; 149 const struct nf_conn *ct;
153 const struct hlist_node *n; 150 const struct hlist_node *n;
@@ -330,7 +327,7 @@ nf_nat_setup_info(struct nf_conn *ct,
330 if (have_to_hash) { 327 if (have_to_hash) {
331 unsigned int srchash; 328 unsigned int srchash;
332 329
333 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 330 srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
334 spin_lock_bh(&nf_nat_lock); 331 spin_lock_bh(&nf_nat_lock);
335 /* nf_conntrack_alter_reply might re-allocate exntension aera */ 332 /* nf_conntrack_alter_reply might re-allocate exntension aera */
336 nat = nfct_nat(ct); 333 nat = nfct_nat(ct);
@@ -679,8 +676,10 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
679 676
680static int __net_init nf_nat_net_init(struct net *net) 677static int __net_init nf_nat_net_init(struct net *net)
681{ 678{
682 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 679 /* Leave them the same for the moment. */
683 &net->ipv4.nat_vmalloced, 0); 680 net->ipv4.nat_htable_size = net->ct.htable_size;
681 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
682 &net->ipv4.nat_vmalloced, 0);
684 if (!net->ipv4.nat_bysource) 683 if (!net->ipv4.nat_bysource)
685 return -ENOMEM; 684 return -ENOMEM;
686 return 0; 685 return 0;
@@ -703,7 +702,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
703 nf_ct_iterate_cleanup(net, &clean_nat, NULL); 702 nf_ct_iterate_cleanup(net, &clean_nat, NULL);
704 synchronize_rcu(); 703 synchronize_rcu();
705 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, 704 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
706 nf_nat_htable_size); 705 net->ipv4.nat_htable_size);
707} 706}
708 707
709static struct pernet_operations nf_nat_net_ops = { 708static struct pernet_operations nf_nat_net_ops = {
@@ -724,9 +723,6 @@ static int __init nf_nat_init(void)
724 return ret; 723 return ret;
725 } 724 }
726 725
727 /* Leave them the same for the moment. */
728 nf_nat_htable_size = nf_conntrack_htable_size;
729
730 ret = register_pernet_subsys(&nf_nat_net_ops); 726 ret = register_pernet_subsys(&nf_nat_net_ops);
731 if (ret < 0) 727 if (ret < 0)
732 goto cleanup_extend; 728 goto cleanup_extend;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 28e029632493..3fddc69ccccc 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5783,11 +5783,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5783 5783
5784 /* tcp_ack considers this ACK as duplicate 5784 /* tcp_ack considers this ACK as duplicate
5785 * and does not calculate rtt. 5785 * and does not calculate rtt.
5786 * Fix it at least with timestamps. 5786 * Force it here.
5787 */ 5787 */
5788 if (tp->rx_opt.saw_tstamp && 5788 tcp_ack_update_rtt(sk, 0, 0);
5789 tp->rx_opt.rcv_tsecr && !tp->srtt)
5790 tcp_ack_saw_tstamp(sk, 0);
5791 5789
5792 if (tp->rx_opt.tstamp_ok) 5790 if (tp->rx_opt.tstamp_ok)
5793 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5791 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index de7a194a64ab..143791da062c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -502,8 +502,11 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
502 if (p == &net->ipv6.devconf_dflt->forwarding) 502 if (p == &net->ipv6.devconf_dflt->forwarding)
503 return 0; 503 return 0;
504 504
505 if (!rtnl_trylock()) 505 if (!rtnl_trylock()) {
506 /* Restore the original values before restarting */
507 *p = old;
506 return restart_syscall(); 508 return restart_syscall();
509 }
507 510
508 if (p == &net->ipv6.devconf_all->forwarding) { 511 if (p == &net->ipv6.devconf_all->forwarding) {
509 __s32 newf = net->ipv6.devconf_all->forwarding; 512 __s32 newf = net->ipv6.devconf_all->forwarding;
@@ -4028,12 +4031,15 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
4028{ 4031{
4029 int *valp = ctl->data; 4032 int *valp = ctl->data;
4030 int val = *valp; 4033 int val = *valp;
4034 loff_t pos = *ppos;
4031 int ret; 4035 int ret;
4032 4036
4033 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 4037 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
4034 4038
4035 if (write) 4039 if (write)
4036 ret = addrconf_fixup_forwarding(ctl, valp, val); 4040 ret = addrconf_fixup_forwarding(ctl, valp, val);
4041 if (ret)
4042 *ppos = pos;
4037 return ret; 4043 return ret;
4038} 4044}
4039 4045
@@ -4075,8 +4081,11 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
4075 if (p == &net->ipv6.devconf_dflt->disable_ipv6) 4081 if (p == &net->ipv6.devconf_dflt->disable_ipv6)
4076 return 0; 4082 return 0;
4077 4083
4078 if (!rtnl_trylock()) 4084 if (!rtnl_trylock()) {
4085 /* Restore the original values before restarting */
4086 *p = old;
4079 return restart_syscall(); 4087 return restart_syscall();
4088 }
4080 4089
4081 if (p == &net->ipv6.devconf_all->disable_ipv6) { 4090 if (p == &net->ipv6.devconf_all->disable_ipv6) {
4082 __s32 newf = net->ipv6.devconf_all->disable_ipv6; 4091 __s32 newf = net->ipv6.devconf_all->disable_ipv6;
@@ -4095,12 +4104,15 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
4095{ 4104{
4096 int *valp = ctl->data; 4105 int *valp = ctl->data;
4097 int val = *valp; 4106 int val = *valp;
4107 loff_t pos = *ppos;
4098 int ret; 4108 int ret;
4099 4109
4100 ret = proc_dointvec(ctl, write, buffer, lenp, ppos); 4110 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
4101 4111
4102 if (write) 4112 if (write)
4103 ret = addrconf_disable_ipv6(ctl, valp, val); 4113 ret = addrconf_disable_ipv6(ctl, valp, val);
4114 if (ret)
4115 *ppos = pos;
4104 return ret; 4116 return ret;
4105} 4117}
4106 4118
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 2f2a5ca2c878..002e6eef9120 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -154,16 +154,12 @@ static int ipcomp6_init_state(struct xfrm_state *x)
154 if (x->props.mode == XFRM_MODE_TUNNEL) { 154 if (x->props.mode == XFRM_MODE_TUNNEL) {
155 err = ipcomp6_tunnel_attach(x); 155 err = ipcomp6_tunnel_attach(x);
156 if (err) 156 if (err)
157 goto error_tunnel; 157 goto out;
158 } 158 }
159 159
160 err = 0; 160 err = 0;
161out: 161out:
162 return err; 162 return err;
163error_tunnel:
164 ipcomp_destroy(x);
165
166 goto out;
167} 163}
168 164
169static const struct xfrm_type ipcomp6_type = 165static const struct xfrm_type ipcomp6_type =
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 480d7f8c9802..8a7e0f52e177 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1164,10 +1164,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1164 if (t && !IS_ERR(t)) { 1164 if (t && !IS_ERR(t)) {
1165 struct ip6t_getinfo info; 1165 struct ip6t_getinfo info;
1166 const struct xt_table_info *private = t->private; 1166 const struct xt_table_info *private = t->private;
1167
1168#ifdef CONFIG_COMPAT 1167#ifdef CONFIG_COMPAT
1168 struct xt_table_info tmp;
1169
1169 if (compat) { 1170 if (compat) {
1170 struct xt_table_info tmp;
1171 ret = compat_table_info(private, &tmp); 1171 ret = compat_table_info(private, &tmp);
1172 xt_compat_flush_offsets(AF_INET6); 1172 xt_compat_flush_offsets(AF_INET6);
1173 private = &tmp; 1173 private = &tmp;
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 156020d138b5..6b3602de359a 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -698,15 +698,18 @@ dev_irnet_ioctl(
698 698
699 /* Query PPP channel and unit number */ 699 /* Query PPP channel and unit number */
700 case PPPIOCGCHAN: 700 case PPPIOCGCHAN:
701 lock_kernel();
701 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan), 702 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan),
702 (int __user *)argp)) 703 (int __user *)argp))
703 err = 0; 704 err = 0;
705 unlock_kernel();
704 break; 706 break;
705 case PPPIOCGUNIT: 707 case PPPIOCGUNIT:
706 lock_kernel(); 708 lock_kernel();
707 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan), 709 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan),
708 (int __user *)argp)) 710 (int __user *)argp))
709 err = 0; 711 err = 0;
712 unlock_kernel();
710 break; 713 break;
711 714
712 /* All these ioctls can be passed both directly and from ppp_generic, 715 /* All these ioctls can be passed both directly and from ppp_generic,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 76fa6fef6473..539f43bc97db 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3794,9 +3794,9 @@ static struct pernet_operations pfkey_net_ops = {
3794 3794
3795static void __exit ipsec_pfkey_exit(void) 3795static void __exit ipsec_pfkey_exit(void)
3796{ 3796{
3797 unregister_pernet_subsys(&pfkey_net_ops);
3798 xfrm_unregister_km(&pfkeyv2_mgr); 3797 xfrm_unregister_km(&pfkeyv2_mgr);
3799 sock_unregister(PF_KEY); 3798 sock_unregister(PF_KEY);
3799 unregister_pernet_subsys(&pfkey_net_ops);
3800 proto_unregister(&key_proto); 3800 proto_unregister(&key_proto);
3801} 3801}
3802 3802
@@ -3807,21 +3807,22 @@ static int __init ipsec_pfkey_init(void)
3807 if (err != 0) 3807 if (err != 0)
3808 goto out; 3808 goto out;
3809 3809
3810 err = sock_register(&pfkey_family_ops); 3810 err = register_pernet_subsys(&pfkey_net_ops);
3811 if (err != 0) 3811 if (err != 0)
3812 goto out_unregister_key_proto; 3812 goto out_unregister_key_proto;
3813 err = sock_register(&pfkey_family_ops);
3814 if (err != 0)
3815 goto out_unregister_pernet;
3813 err = xfrm_register_km(&pfkeyv2_mgr); 3816 err = xfrm_register_km(&pfkeyv2_mgr);
3814 if (err != 0) 3817 if (err != 0)
3815 goto out_sock_unregister; 3818 goto out_sock_unregister;
3816 err = register_pernet_subsys(&pfkey_net_ops);
3817 if (err != 0)
3818 goto out_xfrm_unregister_km;
3819out: 3819out:
3820 return err; 3820 return err;
3821out_xfrm_unregister_km: 3821
3822 xfrm_unregister_km(&pfkeyv2_mgr);
3823out_sock_unregister: 3822out_sock_unregister:
3824 sock_unregister(PF_KEY); 3823 sock_unregister(PF_KEY);
3824out_unregister_pernet:
3825 unregister_pernet_subsys(&pfkey_net_ops);
3825out_unregister_key_proto: 3826out_unregister_key_proto:
3826 proto_unregister(&key_proto); 3827 proto_unregister(&key_proto);
3827 goto out; 3828 goto out;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 1f2db647bb5c..22f0c2aa7a89 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -647,7 +647,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
647 } 647 }
648 if (pos[1] != 0 && 648 if (pos[1] != 0 &&
649 (pos[1] != ifibss->ssid_len || 649 (pos[1] != ifibss->ssid_len ||
650 !memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { 650 memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
651 /* Ignore ProbeReq for foreign SSID */ 651 /* Ignore ProbeReq for foreign SSID */
652 return; 652 return;
653 } 653 }
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index b9007f80cb92..12a2bff7dcdb 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -245,6 +245,9 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
245 info->control.rates[i].count = 1; 245 info->control.rates[i].count = 1;
246 } 246 }
247 247
248 if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
249 return;
250
248 if (sta && sdata->force_unicast_rateidx > -1) { 251 if (sta && sdata->force_unicast_rateidx > -1) {
249 info->control.rates[0].idx = sdata->force_unicast_rateidx; 252 info->control.rates[0].idx = sdata->force_unicast_rateidx;
250 } else { 253 } else {
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index f934c9620b73..bc17cf7d68db 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -439,6 +439,16 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
439 if (local->scan_req) 439 if (local->scan_req)
440 return -EBUSY; 440 return -EBUSY;
441 441
442 if (req != local->int_scan_req &&
443 sdata->vif.type == NL80211_IFTYPE_STATION &&
444 !list_empty(&ifmgd->work_list)) {
445 /* actually wait for the work it's doing to finish/time out */
446 set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
447 local->scan_req = req;
448 local->scan_sdata = sdata;
449 return 0;
450 }
451
442 if (local->ops->hw_scan) { 452 if (local->ops->hw_scan) {
443 u8 *ies; 453 u8 *ies;
444 454
@@ -463,14 +473,6 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
463 local->scan_req = req; 473 local->scan_req = req;
464 local->scan_sdata = sdata; 474 local->scan_sdata = sdata;
465 475
466 if (req != local->int_scan_req &&
467 sdata->vif.type == NL80211_IFTYPE_STATION &&
468 !list_empty(&ifmgd->work_list)) {
469 /* actually wait for the work it's doing to finish/time out */
470 set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
471 return 0;
472 }
473
474 if (local->ops->hw_scan) 476 if (local->ops->hw_scan)
475 __set_bit(SCAN_HW_SCANNING, &local->scanning); 477 __set_bit(SCAN_HW_SCANNING, &local->scanning);
476 else 478 else
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0e98c3282d42..4d79e3c1616c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -30,6 +30,7 @@
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/socket.h> 31#include <linux/socket.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/nsproxy.h>
33#include <linux/rculist_nulls.h> 34#include <linux/rculist_nulls.h>
34 35
35#include <net/netfilter/nf_conntrack.h> 36#include <net/netfilter/nf_conntrack.h>
@@ -63,8 +64,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
63struct nf_conn nf_conntrack_untracked __read_mostly; 64struct nf_conn nf_conntrack_untracked __read_mostly;
64EXPORT_SYMBOL_GPL(nf_conntrack_untracked); 65EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
65 66
66static struct kmem_cache *nf_conntrack_cachep __read_mostly;
67
68static int nf_conntrack_hash_rnd_initted; 67static int nf_conntrack_hash_rnd_initted;
69static unsigned int nf_conntrack_hash_rnd; 68static unsigned int nf_conntrack_hash_rnd;
70 69
@@ -86,9 +85,10 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
86 return ((u64)h * size) >> 32; 85 return ((u64)h * size) >> 32;
87} 86}
88 87
89static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) 88static inline u_int32_t hash_conntrack(const struct net *net,
89 const struct nf_conntrack_tuple *tuple)
90{ 90{
91 return __hash_conntrack(tuple, nf_conntrack_htable_size, 91 return __hash_conntrack(tuple, net->ct.htable_size,
92 nf_conntrack_hash_rnd); 92 nf_conntrack_hash_rnd);
93} 93}
94 94
@@ -296,7 +296,7 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
296{ 296{
297 struct nf_conntrack_tuple_hash *h; 297 struct nf_conntrack_tuple_hash *h;
298 struct hlist_nulls_node *n; 298 struct hlist_nulls_node *n;
299 unsigned int hash = hash_conntrack(tuple); 299 unsigned int hash = hash_conntrack(net, tuple);
300 300
301 /* Disable BHs the entire time since we normally need to disable them 301 /* Disable BHs the entire time since we normally need to disable them
302 * at least once for the stats anyway. 302 * at least once for the stats anyway.
@@ -366,10 +366,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
366 366
367void nf_conntrack_hash_insert(struct nf_conn *ct) 367void nf_conntrack_hash_insert(struct nf_conn *ct)
368{ 368{
369 struct net *net = nf_ct_net(ct);
369 unsigned int hash, repl_hash; 370 unsigned int hash, repl_hash;
370 371
371 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 372 hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
372 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 373 repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
373 374
374 __nf_conntrack_hash_insert(ct, hash, repl_hash); 375 __nf_conntrack_hash_insert(ct, hash, repl_hash);
375} 376}
@@ -397,8 +398,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
397 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 398 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
398 return NF_ACCEPT; 399 return NF_ACCEPT;
399 400
400 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 401 hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
401 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 402 repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
402 403
403 /* We're not in hash table, and we refuse to set up related 404 /* We're not in hash table, and we refuse to set up related
404 connections for unconfirmed conns. But packet copies and 405 connections for unconfirmed conns. But packet copies and
@@ -468,7 +469,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
468 struct net *net = nf_ct_net(ignored_conntrack); 469 struct net *net = nf_ct_net(ignored_conntrack);
469 struct nf_conntrack_tuple_hash *h; 470 struct nf_conntrack_tuple_hash *h;
470 struct hlist_nulls_node *n; 471 struct hlist_nulls_node *n;
471 unsigned int hash = hash_conntrack(tuple); 472 unsigned int hash = hash_conntrack(net, tuple);
472 473
473 /* Disable BHs the entire time since we need to disable them at 474 /* Disable BHs the entire time since we need to disable them at
474 * least once for the stats anyway. 475 * least once for the stats anyway.
@@ -503,7 +504,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
503 int dropped = 0; 504 int dropped = 0;
504 505
505 rcu_read_lock(); 506 rcu_read_lock();
506 for (i = 0; i < nf_conntrack_htable_size; i++) { 507 for (i = 0; i < net->ct.htable_size; i++) {
507 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], 508 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
508 hnnode) { 509 hnnode) {
509 tmp = nf_ct_tuplehash_to_ctrack(h); 510 tmp = nf_ct_tuplehash_to_ctrack(h);
@@ -523,7 +524,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
523 if (cnt >= NF_CT_EVICTION_RANGE) 524 if (cnt >= NF_CT_EVICTION_RANGE)
524 break; 525 break;
525 526
526 hash = (hash + 1) % nf_conntrack_htable_size; 527 hash = (hash + 1) % net->ct.htable_size;
527 } 528 }
528 rcu_read_unlock(); 529 rcu_read_unlock();
529 530
@@ -557,7 +558,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
557 558
558 if (nf_conntrack_max && 559 if (nf_conntrack_max &&
559 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { 560 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
560 unsigned int hash = hash_conntrack(orig); 561 unsigned int hash = hash_conntrack(net, orig);
561 if (!early_drop(net, hash)) { 562 if (!early_drop(net, hash)) {
562 atomic_dec(&net->ct.count); 563 atomic_dec(&net->ct.count);
563 if (net_ratelimit()) 564 if (net_ratelimit())
@@ -572,7 +573,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
572 * Do not use kmem_cache_zalloc(), as this cache uses 573 * Do not use kmem_cache_zalloc(), as this cache uses
573 * SLAB_DESTROY_BY_RCU. 574 * SLAB_DESTROY_BY_RCU.
574 */ 575 */
575 ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); 576 ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
576 if (ct == NULL) { 577 if (ct == NULL) {
577 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); 578 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
578 atomic_dec(&net->ct.count); 579 atomic_dec(&net->ct.count);
@@ -611,7 +612,7 @@ void nf_conntrack_free(struct nf_conn *ct)
611 nf_ct_ext_destroy(ct); 612 nf_ct_ext_destroy(ct);
612 atomic_dec(&net->ct.count); 613 atomic_dec(&net->ct.count);
613 nf_ct_ext_free(ct); 614 nf_ct_ext_free(ct);
614 kmem_cache_free(nf_conntrack_cachep, ct); 615 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
615} 616}
616EXPORT_SYMBOL_GPL(nf_conntrack_free); 617EXPORT_SYMBOL_GPL(nf_conntrack_free);
617 618
@@ -1014,7 +1015,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1014 struct hlist_nulls_node *n; 1015 struct hlist_nulls_node *n;
1015 1016
1016 spin_lock_bh(&nf_conntrack_lock); 1017 spin_lock_bh(&nf_conntrack_lock);
1017 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 1018 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1018 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { 1019 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1019 ct = nf_ct_tuplehash_to_ctrack(h); 1020 ct = nf_ct_tuplehash_to_ctrack(h);
1020 if (iter(ct, data)) 1021 if (iter(ct, data))
@@ -1113,9 +1114,12 @@ static void nf_ct_release_dying_list(struct net *net)
1113 1114
1114static void nf_conntrack_cleanup_init_net(void) 1115static void nf_conntrack_cleanup_init_net(void)
1115{ 1116{
1117 /* wait until all references to nf_conntrack_untracked are dropped */
1118 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1119 schedule();
1120
1116 nf_conntrack_helper_fini(); 1121 nf_conntrack_helper_fini();
1117 nf_conntrack_proto_fini(); 1122 nf_conntrack_proto_fini();
1118 kmem_cache_destroy(nf_conntrack_cachep);
1119} 1123}
1120 1124
1121static void nf_conntrack_cleanup_net(struct net *net) 1125static void nf_conntrack_cleanup_net(struct net *net)
@@ -1127,15 +1131,14 @@ static void nf_conntrack_cleanup_net(struct net *net)
1127 schedule(); 1131 schedule();
1128 goto i_see_dead_people; 1132 goto i_see_dead_people;
1129 } 1133 }
1130 /* wait until all references to nf_conntrack_untracked are dropped */
1131 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1132 schedule();
1133 1134
1134 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1135 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1135 nf_conntrack_htable_size); 1136 net->ct.htable_size);
1136 nf_conntrack_ecache_fini(net); 1137 nf_conntrack_ecache_fini(net);
1137 nf_conntrack_acct_fini(net); 1138 nf_conntrack_acct_fini(net);
1138 nf_conntrack_expect_fini(net); 1139 nf_conntrack_expect_fini(net);
1140 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1141 kfree(net->ct.slabname);
1139 free_percpu(net->ct.stat); 1142 free_percpu(net->ct.stat);
1140} 1143}
1141 1144
@@ -1190,10 +1193,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1190{ 1193{
1191 int i, bucket, vmalloced, old_vmalloced; 1194 int i, bucket, vmalloced, old_vmalloced;
1192 unsigned int hashsize, old_size; 1195 unsigned int hashsize, old_size;
1193 int rnd;
1194 struct hlist_nulls_head *hash, *old_hash; 1196 struct hlist_nulls_head *hash, *old_hash;
1195 struct nf_conntrack_tuple_hash *h; 1197 struct nf_conntrack_tuple_hash *h;
1196 1198
1199 if (current->nsproxy->net_ns != &init_net)
1200 return -EOPNOTSUPP;
1201
1197 /* On boot, we can set this without any fancy locking. */ 1202 /* On boot, we can set this without any fancy locking. */
1198 if (!nf_conntrack_htable_size) 1203 if (!nf_conntrack_htable_size)
1199 return param_set_uint(val, kp); 1204 return param_set_uint(val, kp);
@@ -1206,33 +1211,29 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1206 if (!hash) 1211 if (!hash)
1207 return -ENOMEM; 1212 return -ENOMEM;
1208 1213
1209 /* We have to rehahs for the new table anyway, so we also can
1210 * use a newrandom seed */
1211 get_random_bytes(&rnd, sizeof(rnd));
1212
1213 /* Lookups in the old hash might happen in parallel, which means we 1214 /* Lookups in the old hash might happen in parallel, which means we
1214 * might get false negatives during connection lookup. New connections 1215 * might get false negatives during connection lookup. New connections
1215 * created because of a false negative won't make it into the hash 1216 * created because of a false negative won't make it into the hash
1216 * though since that required taking the lock. 1217 * though since that required taking the lock.
1217 */ 1218 */
1218 spin_lock_bh(&nf_conntrack_lock); 1219 spin_lock_bh(&nf_conntrack_lock);
1219 for (i = 0; i < nf_conntrack_htable_size; i++) { 1220 for (i = 0; i < init_net.ct.htable_size; i++) {
1220 while (!hlist_nulls_empty(&init_net.ct.hash[i])) { 1221 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1221 h = hlist_nulls_entry(init_net.ct.hash[i].first, 1222 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1222 struct nf_conntrack_tuple_hash, hnnode); 1223 struct nf_conntrack_tuple_hash, hnnode);
1223 hlist_nulls_del_rcu(&h->hnnode); 1224 hlist_nulls_del_rcu(&h->hnnode);
1224 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1225 bucket = __hash_conntrack(&h->tuple, hashsize,
1226 nf_conntrack_hash_rnd);
1225 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); 1227 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1226 } 1228 }
1227 } 1229 }
1228 old_size = nf_conntrack_htable_size; 1230 old_size = init_net.ct.htable_size;
1229 old_vmalloced = init_net.ct.hash_vmalloc; 1231 old_vmalloced = init_net.ct.hash_vmalloc;
1230 old_hash = init_net.ct.hash; 1232 old_hash = init_net.ct.hash;
1231 1233
1232 nf_conntrack_htable_size = hashsize; 1234 init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1233 init_net.ct.hash_vmalloc = vmalloced; 1235 init_net.ct.hash_vmalloc = vmalloced;
1234 init_net.ct.hash = hash; 1236 init_net.ct.hash = hash;
1235 nf_conntrack_hash_rnd = rnd;
1236 spin_unlock_bh(&nf_conntrack_lock); 1237 spin_unlock_bh(&nf_conntrack_lock);
1237 1238
1238 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); 1239 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
@@ -1271,15 +1272,6 @@ static int nf_conntrack_init_init_net(void)
1271 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1272 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1272 nf_conntrack_max); 1273 nf_conntrack_max);
1273 1274
1274 nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1275 sizeof(struct nf_conn),
1276 0, SLAB_DESTROY_BY_RCU, NULL);
1277 if (!nf_conntrack_cachep) {
1278 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1279 ret = -ENOMEM;
1280 goto err_cache;
1281 }
1282
1283 ret = nf_conntrack_proto_init(); 1275 ret = nf_conntrack_proto_init();
1284 if (ret < 0) 1276 if (ret < 0)
1285 goto err_proto; 1277 goto err_proto;
@@ -1288,13 +1280,19 @@ static int nf_conntrack_init_init_net(void)
1288 if (ret < 0) 1280 if (ret < 0)
1289 goto err_helper; 1281 goto err_helper;
1290 1282
1283 /* Set up fake conntrack: to never be deleted, not in any hashes */
1284#ifdef CONFIG_NET_NS
1285 nf_conntrack_untracked.ct_net = &init_net;
1286#endif
1287 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1288 /* - and look it like as a confirmed connection */
1289 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1290
1291 return 0; 1291 return 0;
1292 1292
1293err_helper: 1293err_helper:
1294 nf_conntrack_proto_fini(); 1294 nf_conntrack_proto_fini();
1295err_proto: 1295err_proto:
1296 kmem_cache_destroy(nf_conntrack_cachep);
1297err_cache:
1298 return ret; 1296 return ret;
1299} 1297}
1300 1298
@@ -1316,7 +1314,24 @@ static int nf_conntrack_init_net(struct net *net)
1316 ret = -ENOMEM; 1314 ret = -ENOMEM;
1317 goto err_stat; 1315 goto err_stat;
1318 } 1316 }
1319 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1317
1318 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1319 if (!net->ct.slabname) {
1320 ret = -ENOMEM;
1321 goto err_slabname;
1322 }
1323
1324 net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1325 sizeof(struct nf_conn), 0,
1326 SLAB_DESTROY_BY_RCU, NULL);
1327 if (!net->ct.nf_conntrack_cachep) {
1328 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1329 ret = -ENOMEM;
1330 goto err_cache;
1331 }
1332
1333 net->ct.htable_size = nf_conntrack_htable_size;
1334 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
1320 &net->ct.hash_vmalloc, 1); 1335 &net->ct.hash_vmalloc, 1);
1321 if (!net->ct.hash) { 1336 if (!net->ct.hash) {
1322 ret = -ENOMEM; 1337 ret = -ENOMEM;
@@ -1333,15 +1348,6 @@ static int nf_conntrack_init_net(struct net *net)
1333 if (ret < 0) 1348 if (ret < 0)
1334 goto err_ecache; 1349 goto err_ecache;
1335 1350
1336 /* Set up fake conntrack:
1337 - to never be deleted, not in any hashes */
1338#ifdef CONFIG_NET_NS
1339 nf_conntrack_untracked.ct_net = &init_net;
1340#endif
1341 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1342 /* - and look it like as a confirmed connection */
1343 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1344
1345 return 0; 1351 return 0;
1346 1352
1347err_ecache: 1353err_ecache:
@@ -1350,8 +1356,12 @@ err_acct:
1350 nf_conntrack_expect_fini(net); 1356 nf_conntrack_expect_fini(net);
1351err_expect: 1357err_expect:
1352 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1358 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1353 nf_conntrack_htable_size); 1359 net->ct.htable_size);
1354err_hash: 1360err_hash:
1361 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1362err_cache:
1363 kfree(net->ct.slabname);
1364err_slabname:
1355 free_percpu(net->ct.stat); 1365 free_percpu(net->ct.stat);
1356err_stat: 1366err_stat:
1357 return ret; 1367 return ret;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index fdf5d2a1d9b4..2f25ff610982 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -569,7 +569,7 @@ static void exp_proc_remove(struct net *net)
569#endif /* CONFIG_PROC_FS */ 569#endif /* CONFIG_PROC_FS */
570} 570}
571 571
572module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600); 572module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
573 573
574int nf_conntrack_expect_init(struct net *net) 574int nf_conntrack_expect_init(struct net *net)
575{ 575{
@@ -577,7 +577,7 @@ int nf_conntrack_expect_init(struct net *net)
577 577
578 if (net_eq(net, &init_net)) { 578 if (net_eq(net, &init_net)) {
579 if (!nf_ct_expect_hsize) { 579 if (!nf_ct_expect_hsize) {
580 nf_ct_expect_hsize = nf_conntrack_htable_size / 256; 580 nf_ct_expect_hsize = net->ct.htable_size / 256;
581 if (!nf_ct_expect_hsize) 581 if (!nf_ct_expect_hsize)
582 nf_ct_expect_hsize = 1; 582 nf_ct_expect_hsize = 1;
583 } 583 }
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 65c2a7bc3afc..4b1a56bd074c 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -192,7 +192,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
192 /* Get rid of expecteds, set helpers to NULL. */ 192 /* Get rid of expecteds, set helpers to NULL. */
193 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) 193 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
194 unhelp(h, me); 194 unhelp(h, me);
195 for (i = 0; i < nf_conntrack_htable_size; i++) { 195 for (i = 0; i < net->ct.htable_size; i++) {
196 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) 196 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
197 unhelp(h, me); 197 unhelp(h, me);
198 } 198 }
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 42f21c01a93e..0ffe689dfe97 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -594,7 +594,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
594 594
595 rcu_read_lock(); 595 rcu_read_lock();
596 last = (struct nf_conn *)cb->args[1]; 596 last = (struct nf_conn *)cb->args[1];
597 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 597 for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) {
598restart: 598restart:
599 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], 599 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
600 hnnode) { 600 hnnode) {
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 028aba667ef7..e310f1561bb2 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -51,7 +51,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
51 struct hlist_nulls_node *n; 51 struct hlist_nulls_node *n;
52 52
53 for (st->bucket = 0; 53 for (st->bucket = 0;
54 st->bucket < nf_conntrack_htable_size; 54 st->bucket < net->ct.htable_size;
55 st->bucket++) { 55 st->bucket++) {
56 n = rcu_dereference(net->ct.hash[st->bucket].first); 56 n = rcu_dereference(net->ct.hash[st->bucket].first);
57 if (!is_a_nulls(n)) 57 if (!is_a_nulls(n))
@@ -69,7 +69,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
69 head = rcu_dereference(head->next); 69 head = rcu_dereference(head->next);
70 while (is_a_nulls(head)) { 70 while (is_a_nulls(head)) {
71 if (likely(get_nulls_value(head) == st->bucket)) { 71 if (likely(get_nulls_value(head) == st->bucket)) {
72 if (++st->bucket >= nf_conntrack_htable_size) 72 if (++st->bucket >= net->ct.htable_size)
73 return NULL; 73 return NULL;
74 } 74 }
75 head = rcu_dereference(net->ct.hash[st->bucket].first); 75 head = rcu_dereference(net->ct.hash[st->bucket].first);
@@ -355,7 +355,7 @@ static ctl_table nf_ct_sysctl_table[] = {
355 }, 355 },
356 { 356 {
357 .procname = "nf_conntrack_buckets", 357 .procname = "nf_conntrack_buckets",
358 .data = &nf_conntrack_htable_size, 358 .data = &init_net.ct.htable_size,
359 .maxlen = sizeof(unsigned int), 359 .maxlen = sizeof(unsigned int),
360 .mode = 0444, 360 .mode = 0444,
361 .proc_handler = proc_dointvec, 361 .proc_handler = proc_dointvec,
@@ -421,6 +421,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
421 goto out_kmemdup; 421 goto out_kmemdup;
422 422
423 table[1].data = &net->ct.count; 423 table[1].data = &net->ct.count;
424 table[2].data = &net->ct.htable_size;
424 table[3].data = &net->ct.sysctl_checksum; 425 table[3].data = &net->ct.sysctl_checksum;
425 table[4].data = &net->ct.sysctl_log_invalid; 426 table[4].data = &net->ct.sysctl_log_invalid;
426 427
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a4957bf2ca60..4c5972ba8c78 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -455,9 +455,14 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
455 if (nl_table[protocol].registered && 455 if (nl_table[protocol].registered &&
456 try_module_get(nl_table[protocol].module)) 456 try_module_get(nl_table[protocol].module))
457 module = nl_table[protocol].module; 457 module = nl_table[protocol].module;
458 else
459 err = -EPROTONOSUPPORT;
458 cb_mutex = nl_table[protocol].cb_mutex; 460 cb_mutex = nl_table[protocol].cb_mutex;
459 netlink_unlock_table(); 461 netlink_unlock_table();
460 462
463 if (err < 0)
464 goto out;
465
461 err = __netlink_create(net, sock, cb_mutex, protocol); 466 err = __netlink_create(net, sock, cb_mutex, protocol);
462 if (err < 0) 467 if (err < 0)
463 goto out_module; 468 goto out_module;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 929218a47620..21f9c7678aa3 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -433,7 +433,7 @@ config NET_ACT_POLICE
433 module. 433 module.
434 434
435 To compile this code as a module, choose M here: the 435 To compile this code as a module, choose M here: the
436 module will be called police. 436 module will be called act_police.
437 437
438config NET_ACT_GACT 438config NET_ACT_GACT
439 tristate "Generic actions" 439 tristate "Generic actions"
@@ -443,7 +443,7 @@ config NET_ACT_GACT
443 accepting packets. 443 accepting packets.
444 444
445 To compile this code as a module, choose M here: the 445 To compile this code as a module, choose M here: the
446 module will be called gact. 446 module will be called act_gact.
447 447
448config GACT_PROB 448config GACT_PROB
449 bool "Probability support" 449 bool "Probability support"
@@ -459,7 +459,7 @@ config NET_ACT_MIRRED
459 other devices. 459 other devices.
460 460
461 To compile this code as a module, choose M here: the 461 To compile this code as a module, choose M here: the
462 module will be called mirred. 462 module will be called act_mirred.
463 463
464config NET_ACT_IPT 464config NET_ACT_IPT
465 tristate "IPtables targets" 465 tristate "IPtables targets"
@@ -469,7 +469,7 @@ config NET_ACT_IPT
469 classification. 469 classification.
470 470
471 To compile this code as a module, choose M here: the 471 To compile this code as a module, choose M here: the
472 module will be called ipt. 472 module will be called act_ipt.
473 473
474config NET_ACT_NAT 474config NET_ACT_NAT
475 tristate "Stateless NAT" 475 tristate "Stateless NAT"
@@ -479,7 +479,7 @@ config NET_ACT_NAT
479 netfilter for NAT unless you know what you are doing. 479 netfilter for NAT unless you know what you are doing.
480 480
481 To compile this code as a module, choose M here: the 481 To compile this code as a module, choose M here: the
482 module will be called nat. 482 module will be called act_nat.
483 483
484config NET_ACT_PEDIT 484config NET_ACT_PEDIT
485 tristate "Packet Editing" 485 tristate "Packet Editing"
@@ -488,7 +488,7 @@ config NET_ACT_PEDIT
488 Say Y here if you want to mangle the content of packets. 488 Say Y here if you want to mangle the content of packets.
489 489
490 To compile this code as a module, choose M here: the 490 To compile this code as a module, choose M here: the
491 module will be called pedit. 491 module will be called act_pedit.
492 492
493config NET_ACT_SIMP 493config NET_ACT_SIMP
494 tristate "Simple Example (Debug)" 494 tristate "Simple Example (Debug)"
@@ -502,7 +502,7 @@ config NET_ACT_SIMP
502 If unsure, say N. 502 If unsure, say N.
503 503
504 To compile this code as a module, choose M here: the 504 To compile this code as a module, choose M here: the
505 module will be called simple. 505 module will be called act_simple.
506 506
507config NET_ACT_SKBEDIT 507config NET_ACT_SKBEDIT
508 tristate "SKB Editing" 508 tristate "SKB Editing"
@@ -513,7 +513,7 @@ config NET_ACT_SKBEDIT
513 If unsure, say N. 513 If unsure, say N.
514 514
515 To compile this code as a module, choose M here: the 515 To compile this code as a module, choose M here: the
516 module will be called skbedit. 516 module will be called act_skbedit.
517 517
518config NET_CLS_IND 518config NET_CLS_IND
519 bool "Incoming device classification" 519 bool "Incoming device classification"
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 49278f830367..9ac493fcc873 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -999,19 +999,14 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
999 inode = rpc_get_inode(sb, S_IFDIR | 0755); 999 inode = rpc_get_inode(sb, S_IFDIR | 0755);
1000 if (!inode) 1000 if (!inode)
1001 return -ENOMEM; 1001 return -ENOMEM;
1002 root = d_alloc_root(inode); 1002 sb->s_root = root = d_alloc_root(inode);
1003 if (!root) { 1003 if (!root) {
1004 iput(inode); 1004 iput(inode);
1005 return -ENOMEM; 1005 return -ENOMEM;
1006 } 1006 }
1007 if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) 1007 if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL))
1008 goto out; 1008 return -ENOMEM;
1009 sb->s_root = root;
1010 return 0; 1009 return 0;
1011out:
1012 d_genocide(root);
1013 dput(root);
1014 return -ENOMEM;
1015} 1010}
1016 1011
1017static int 1012static int
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index b36cc344474b..f445ea1c5f52 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1102,7 +1102,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1102 int err = -ENOMEM; 1102 int err = -ENOMEM;
1103 struct xfrm_state *x = xfrm_state_alloc(net); 1103 struct xfrm_state *x = xfrm_state_alloc(net);
1104 if (!x) 1104 if (!x)
1105 goto error; 1105 goto out;
1106 1106
1107 memcpy(&x->id, &orig->id, sizeof(x->id)); 1107 memcpy(&x->id, &orig->id, sizeof(x->id));
1108 memcpy(&x->sel, &orig->sel, sizeof(x->sel)); 1108 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
@@ -1160,16 +1160,10 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
1160 return x; 1160 return x;
1161 1161
1162 error: 1162 error:
1163 xfrm_state_put(x);
1164out:
1163 if (errp) 1165 if (errp)
1164 *errp = err; 1166 *errp = err;
1165 if (x) {
1166 kfree(x->aalg);
1167 kfree(x->ealg);
1168 kfree(x->calg);
1169 kfree(x->encap);
1170 kfree(x->coaddr);
1171 }
1172 kfree(x);
1173 return NULL; 1167 return NULL;
1174} 1168}
1175 1169
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 529c9ca65878..8dffcb7c9d81 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -387,7 +387,7 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags)
387 struct smk_audit_info ad; 387 struct smk_audit_info ad;
388 388
389 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 389 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
390 smk_ad_setfield_u_fs_path_dentry(&ad, mnt->mnt_mountpoint); 390 smk_ad_setfield_u_fs_path_dentry(&ad, mnt->mnt_root);
391 smk_ad_setfield_u_fs_path_mnt(&ad, mnt); 391 smk_ad_setfield_u_fs_path_mnt(&ad, mnt);
392 392
393 sbp = mnt->mnt_sb->s_security; 393 sbp = mnt->mnt_sb->s_security;
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 18369d497eb8..455bc391b76d 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -89,29 +89,14 @@ int tomoyo_realpath_from_path2(struct path *path, char *newname,
89 sp = dentry->d_op->d_dname(dentry, newname + offset, 89 sp = dentry->d_op->d_dname(dentry, newname + offset,
90 newname_len - offset); 90 newname_len - offset);
91 } else { 91 } else {
92 /* Taken from d_namespace_path(). */ 92 struct path ns_root = {.mnt = NULL, .dentry = NULL};
93 struct path root; 93
94 struct path ns_root = { };
95 struct path tmp;
96
97 read_lock(&current->fs->lock);
98 root = current->fs->root;
99 path_get(&root);
100 read_unlock(&current->fs->lock);
101 spin_lock(&vfsmount_lock);
102 if (root.mnt && root.mnt->mnt_ns)
103 ns_root.mnt = mntget(root.mnt->mnt_ns->root);
104 if (ns_root.mnt)
105 ns_root.dentry = dget(ns_root.mnt->mnt_root);
106 spin_unlock(&vfsmount_lock);
107 spin_lock(&dcache_lock); 94 spin_lock(&dcache_lock);
108 tmp = ns_root; 95 /* go to whatever namespace root we are under */
109 sp = __d_path(path, &tmp, newname, newname_len); 96 sp = __d_path(path, &ns_root, newname, newname_len);
110 spin_unlock(&dcache_lock); 97 spin_unlock(&dcache_lock);
111 path_put(&root);
112 path_put(&ns_root);
113 /* Prepend "/proc" prefix if using internal proc vfs mount. */ 98 /* Prepend "/proc" prefix if using internal proc vfs mount. */
114 if (!IS_ERR(sp) && (path->mnt->mnt_parent == path->mnt) && 99 if (!IS_ERR(sp) && (path->mnt->mnt_flags & MNT_INTERNAL) &&
115 (strcmp(path->mnt->mnt_sb->s_type->name, "proc") == 0)) { 100 (strcmp(path->mnt->mnt_sb->s_type->name, "proc") == 0)) {
116 sp -= 5; 101 sp -= 5;
117 if (sp >= newname) 102 if (sp >= newname)
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b8faa6dc5abe..ff6da6f386d1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1893,6 +1893,9 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
1893 1893
1894 if (!bdl_pos_adj[chip->dev_index]) 1894 if (!bdl_pos_adj[chip->dev_index])
1895 return 1; /* no delayed ack */ 1895 return 1; /* no delayed ack */
1896 if (WARN_ONCE(!azx_dev->period_bytes,
1897 "hda-intel: zero azx_dev->period_bytes"))
1898 return 0; /* this shouldn't happen! */
1896 if (pos % azx_dev->period_bytes > azx_dev->period_bytes / 2) 1899 if (pos % azx_dev->period_bytes > azx_dev->period_bytes / 2)
1897 return 0; /* NG - it's below the period boundary */ 1900 return 0; /* NG - it's below the period boundary */
1898 return 1; /* OK, it's fine */ 1901 return 1; /* OK, it's fine */
@@ -2347,7 +2350,7 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
2347 */ 2350 */
2348static struct snd_pci_quirk msi_black_list[] __devinitdata = { 2351static struct snd_pci_quirk msi_black_list[] __devinitdata = {
2349 SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ 2352 SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
2350 SND_PCI_QUIRK(0x1043, 0x829c, "ASUS", 0), /* nvidia */ 2353 SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
2351 {} 2354 {}
2352}; 2355};
2353 2356
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index ddc584b64871..4b91d8cf00ec 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -705,7 +705,7 @@ static void print_mapped_keys(void)
705 fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0); 705 fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);
706 706
707 fprintf(stdout, 707 fprintf(stdout,
708 "\t[K] hide kernel_symbols symbols. \t(%s)\n", 708 "\t[K] hide kernel_symbols symbols. \t(%s)\n",
709 hide_kernel_symbols ? "yes" : "no"); 709 hide_kernel_symbols ? "yes" : "no");
710 fprintf(stdout, 710 fprintf(stdout,
711 "\t[U] hide user symbols. \t(%s)\n", 711 "\t[U] hide user symbols. \t(%s)\n",
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index bb0fd6da2d56..8a9e6baa3099 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -295,10 +295,10 @@ void thread__find_addr_location(struct thread *self,
295 al->thread = self; 295 al->thread = self;
296 al->addr = addr; 296 al->addr = addr;
297 297
298 if (cpumode & PERF_RECORD_MISC_KERNEL) { 298 if (cpumode == PERF_RECORD_MISC_KERNEL) {
299 al->level = 'k'; 299 al->level = 'k';
300 mg = &session->kmaps; 300 mg = &session->kmaps;
301 } else if (cpumode & PERF_RECORD_MISC_USER) 301 } else if (cpumode == PERF_RECORD_MISC_USER)
302 al->level = '.'; 302 al->level = '.';
303 else { 303 else {
304 al->level = 'H'; 304 al->level = 'H';
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 29465d440043..fde17b090a47 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -272,6 +272,7 @@ int synthesize_perf_probe_point(struct probe_point *pp)
272 int ret; 272 int ret;
273 273
274 pp->probes[0] = buf = zalloc(MAX_CMDLEN); 274 pp->probes[0] = buf = zalloc(MAX_CMDLEN);
275 pp->found = 1;
275 if (!buf) 276 if (!buf)
276 die("Failed to allocate memory by zalloc."); 277 die("Failed to allocate memory by zalloc.");
277 if (pp->offset) { 278 if (pp->offset) {
@@ -294,6 +295,7 @@ int synthesize_perf_probe_point(struct probe_point *pp)
294error: 295error:
295 free(pp->probes[0]); 296 free(pp->probes[0]);
296 pp->probes[0] = NULL; 297 pp->probes[0] = NULL;
298 pp->found = 0;
297 } 299 }
298 return ret; 300 return ret;
299} 301}
@@ -455,6 +457,7 @@ void show_perf_probe_events(void)
455 struct strlist *rawlist; 457 struct strlist *rawlist;
456 struct str_node *ent; 458 struct str_node *ent;
457 459
460 memset(&pp, 0, sizeof(pp));
458 fd = open_kprobe_events(O_RDONLY, 0); 461 fd = open_kprobe_events(O_RDONLY, 0);
459 rawlist = get_trace_kprobe_event_rawlist(fd); 462 rawlist = get_trace_kprobe_event_rawlist(fd);
460 close(fd); 463 close(fd);