aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DMA-API-HOWTO.txt85
-rw-r--r--Documentation/SubmittingDrivers5
-rw-r--r--Documentation/cgroups/cgroups.txt2
-rw-r--r--Documentation/cgroups/memory.txt326
-rw-r--r--Documentation/filesystems/Locking5
-rw-r--r--Documentation/filesystems/squashfs.txt32
-rw-r--r--Documentation/vm/numa186
-rw-r--r--MAINTAINERS18
-rw-r--r--arch/alpha/Kconfig3
-rw-r--r--arch/alpha/include/asm/scatterlist.h19
-rw-r--r--arch/alpha/math-emu/sfp-util.h5
-rw-r--r--arch/arm/include/asm/scatterlist.h3
-rw-r--r--arch/arm/mach-davinci/include/mach/mmc.h3
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c4
-rw-r--r--arch/arm/mach-omap2/board-ldp.c3
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c3
-rw-r--r--arch/arm/plat-omap/gpio.c104
-rw-r--r--arch/avr32/include/asm/scatterlist.h20
-rw-r--r--arch/blackfin/include/asm/scatterlist.h22
-rw-r--r--arch/blackfin/kernel/ptrace.c33
-rw-r--r--arch/cris/include/asm/scatterlist.h17
-rw-r--r--arch/frv/include/asm/scatterlist.h40
-rw-r--r--arch/frv/kernel/ptrace.c20
-rw-r--r--arch/frv/kernel/sysctl.c18
-rw-r--r--arch/h8300/include/asm/scatterlist.h12
-rw-r--r--arch/ia64/Kconfig11
-rw-r--r--arch/ia64/include/asm/scatterlist.h4
-rw-r--r--arch/ia64/include/asm/topology.h5
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c2
-rw-r--r--arch/ia64/kernel/ptrace.c4
-rw-r--r--arch/ia64/kernel/smpboot.c7
-rw-r--r--arch/m32r/include/asm/scatterlist.h15
-rw-r--r--arch/m68k/include/asm/scatterlist.h16
-rw-r--r--arch/microblaze/include/asm/scatterlist.h2
-rw-r--r--arch/mips/include/asm/scatterlist.h22
-rw-r--r--arch/mn10300/include/asm/scatterlist.h39
-rw-r--r--arch/parisc/Kconfig3
-rw-r--r--arch/parisc/include/asm/scatterlist.h20
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/include/asm/scatterlist.h28
-rw-r--r--arch/powerpc/include/asm/sfp-machine.h6
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c4
-rw-r--r--arch/powerpc/kernel/dma.c12
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c376
-rw-r--r--arch/s390/include/asm/scatterlist.h2
-rw-r--r--arch/s390/include/asm/sfp-util.h2
-rw-r--r--arch/s390/kernel/smp.c6
-rw-r--r--arch/score/include/asm/scatterlist.h2
-rw-r--r--arch/sh/Kconfig3
-rw-r--r--arch/sh/kernel/ptrace_32.c23
-rw-r--r--arch/sh/math-emu/sfp-util.h4
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/include/asm/scatterlist.h5
-rw-r--r--arch/sparc/math-emu/sfp-util_32.h6
-rw-r--r--arch/sparc/math-emu/sfp-util_64.h6
-rw-r--r--arch/x86/Kconfig7
-rw-r--r--arch/x86/boot/compressed/relocs.c4
-rw-r--r--arch/x86/include/asm/scatterlist.h5
-rw-r--r--arch/x86/include/asm/topology.h26
-rw-r--r--arch/x86/kernel/cpu/common.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c2
-rw-r--r--arch/x86/kernel/cpuid.c2
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86/kernel/setup_percpu.c4
-rw-r--r--arch/x86/mm/numa.c17
-rw-r--r--arch/x86/mm/numa_64.c9
-rw-r--r--arch/xtensa/include/asm/scatterlist.h23
-rw-r--r--drivers/base/topology.c2
-rw-r--r--drivers/char/Kconfig7
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/amd64-agp.c28
-rw-r--r--drivers/char/applicom.c11
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c15
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c468
-rw-r--r--drivers/char/ppdev.c4
-rw-r--r--drivers/char/ramoops.c162
-rw-r--r--drivers/char/vt.c10
-rw-r--r--drivers/edac/i5000_edac.c20
-rw-r--r--drivers/edac/i5400_edac.c20
-rw-r--r--drivers/edac/i82443bxgx_edac.c22
-rw-r--r--drivers/gpio/Kconfig11
-rw-r--r--drivers/gpio/cs5535-gpio.c2
-rw-r--r--drivers/gpio/gpiolib.c49
-rw-r--r--drivers/gpio/it8761e_gpio.c5
-rw-r--r--drivers/gpio/langwell_gpio.c83
-rw-r--r--drivers/gpio/max732x.c368
-rw-r--r--drivers/gpio/pca953x.c2
-rw-r--r--drivers/gpio/pl061.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/hid/Kconfig8
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-gyration.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-roccat-kone.c73
-rw-r--r--drivers/hid/hid-roccat-kone.h9
-rw-r--r--drivers/hid/hid-roccat.c428
-rw-r--r--drivers/hid/hid-roccat.h31
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/input/joydev.c10
-rw-r--r--drivers/input/misc/Kconfig10
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/max8925_onkey.c148
-rw-r--r--drivers/input/misc/twl4030-vibra.c2
-rw-r--r--drivers/input/misc/uinput.c4
-rw-r--r--drivers/input/touchscreen/ads7846.c4
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c2
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c7
-rw-r--r--drivers/isdn/mISDN/timerdev.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/message/i2o/i2o_config.c11
-rw-r--r--drivers/misc/lkdtm.c20
-rw-r--r--drivers/mmc/core/core.c3
-rw-r--r--drivers/mmc/core/sd_ops.c2
-rw-r--r--drivers/mmc/core/sdio_io.c30
-rw-r--r--drivers/mmc/host/Kconfig20
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/at91_mci.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c64
-rw-r--r--drivers/mmc/host/au1xmmc.c2
-rw-r--r--drivers/mmc/host/bfin_sdh.c2
-rw-r--r--drivers/mmc/host/cb710-mmc.c2
-rw-r--r--drivers/mmc/host/davinci_mmc.c111
-rw-r--r--drivers/mmc/host/imxmmc.c2
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/msm_sdcc.c2
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/mxcmmc.c2
-rw-r--r--drivers/mmc/host/omap.c64
-rw-r--r--drivers/mmc/host/omap_hsmmc.c279
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c3
-rw-r--r--drivers/mmc/host/sdhci-of-core.c2
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c12
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c12
-rw-r--r--drivers/mmc/host/sdhci-pci.c2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c26
-rw-r--r--drivers/mmc/host/sdhci-s3c.c10
-rw-r--r--drivers/mmc/host/sdhci-spear.c298
-rw-r--r--drivers/mmc/host/sdhci.c25
-rw-r--r--drivers/mmc/host/sdhci.h42
-rw-r--r--drivers/mmc/host/sdricoh_cs.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c965
-rw-r--r--drivers/mmc/host/tifm_sd.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/rapidio/Kconfig24
-rw-r--r--drivers/rapidio/Makefile4
-rw-r--r--drivers/rapidio/rio-scan.c424
-rw-r--r--drivers/rapidio/rio.c431
-rw-r--r--drivers/rapidio/rio.h44
-rw-r--r--drivers/rapidio/switches/Kconfig28
-rw-r--r--drivers/rapidio/switches/Makefile9
-rw-r--r--drivers/rapidio/switches/idtcps.c137
-rw-r--r--drivers/rapidio/switches/tsi500.c20
-rw-r--r--drivers/rapidio/switches/tsi568.c146
-rw-r--r--drivers/rapidio/switches/tsi57x.c315
-rw-r--r--drivers/rtc/Kconfig7
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ab8500.c363
-rw-r--r--drivers/rtc/rtc-m41t80.c6
-rw-r--r--drivers/scsi/osst.c9
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/staging/go7007/saa7134-go7007.c8
-rw-r--r--drivers/telephony/ixj.c15
-rw-r--r--drivers/video/bf54x-lq043fb.c7
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c7
-rw-r--r--drivers/video/s3fb.c101
-rw-r--r--drivers/video/via/viafbdev.c11
-rw-r--r--fs/affs/namei.c2
-rw-r--r--fs/aio.c65
-rw-r--r--fs/autofs/root.c1
-rw-r--r--fs/autofs4/dev-ioctl.c13
-rw-r--r--fs/compat.c132
-rw-r--r--fs/exec.c195
-rw-r--r--fs/freevxfs/vxfs_lookup.c2
-rw-r--r--fs/fscache/object-list.c2
-rw-r--r--fs/isofs/dir.c1
-rw-r--r--fs/ncpfs/dir.c1
-rw-r--r--fs/proc/array.c4
-rw-r--r--fs/proc/base.c16
-rw-r--r--fs/proc/generic.c15
-rw-r--r--fs/proc/kcore.c2
-rw-r--r--fs/proc/root.c1
-rw-r--r--fs/qnx4/dir.c1
-rw-r--r--fs/read_write.c17
-rw-r--r--fs/reiserfs/dir.c1
-rw-r--r--fs/smbfs/dir.c1
-rw-r--r--fs/squashfs/Kconfig11
-rw-r--r--fs/squashfs/Makefile2
-rw-r--r--fs/squashfs/inode.c92
-rw-r--r--fs/squashfs/namei.c6
-rw-r--r--fs/squashfs/squashfs.h12
-rw-r--r--fs/squashfs/squashfs_fs.h76
-rw-r--r--fs/squashfs/squashfs_fs_i.h3
-rw-r--r--fs/squashfs/squashfs_fs_sb.h3
-rw-r--r--fs/squashfs/super.c30
-rw-r--r--fs/squashfs/symlink.c11
-rw-r--r--fs/squashfs/xattr.c323
-rw-r--r--fs/squashfs/xattr.h46
-rw-r--r--fs/squashfs/xattr_id.c100
-rw-r--r--fs/udf/dir.c1
-rw-r--r--fs/ufs/super.c2
-rw-r--r--fs/ufs/ufs_fs.h1
-rw-r--r--include/asm-generic/dma-mapping-common.h20
-rw-r--r--include/asm-generic/gpio.h11
-rw-r--r--include/asm-generic/scatterlist.h17
-rw-r--r--include/asm-generic/topology.h3
-rw-r--r--include/asm-generic/vmlinux.lds.h8
-rw-r--r--include/linux/aio.h5
-rw-r--r--include/linux/bitmap.h1
-rw-r--r--include/linux/byteorder/big_endian.h3
-rw-r--r--include/linux/byteorder/little_endian.h3
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/compat.h4
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/dma-mapping.h25
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/gpio.h5
-rw-r--r--include/linux/i2c/max732x.h3
-rw-r--r--include/linux/i2c/pca953x.h2
-rw-r--r--include/linux/init_task.h13
-rw-r--r--include/linux/input.h12
-rw-r--r--include/linux/joystick.h4
-rw-r--r--include/linux/kmod.h64
-rw-r--r--include/linux/memcontrol.h6
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmc/sdhci-spear.h42
-rw-r--r--include/linux/mmc/sdio_func.h3
-rw-r--r--include/linux/mmc/sh_mmcif.h39
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/nodemask.h8
-rw-r--r--include/linux/notifier.h5
-rw-r--r--include/linux/page_cgroup.h5
-rw-r--r--include/linux/random.h28
-rw-r--r--include/linux/rio.h49
-rw-r--r--include/linux/rio_drv.h6
-rw-r--r--include/linux/rio_ids.h14
-rw-r--r--include/linux/rio_regs.h80
-rw-r--r--include/linux/sched.h17
-rw-r--r--include/linux/sdhci-pltfm.h35
-rw-r--r--include/linux/sem.h4
-rw-r--r--include/linux/swap.h5
-rw-r--r--include/linux/swiotlb.h10
-rw-r--r--include/linux/threads.h9
-rw-r--r--include/linux/topology.h112
-rw-r--r--include/linux/uinput.h10
-rw-r--r--ipc/sem.c322
-rw-r--r--kernel/cgroup.c1
-rw-r--r--kernel/cpu.c104
-rw-r--r--kernel/cpuset.c20
-rw-r--r--kernel/cred.c60
-rw-r--r--kernel/exit.c40
-rw-r--r--kernel/fork.c55
-rw-r--r--kernel/kmod.c193
-rw-r--r--kernel/padata.c4
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/pid.c7
-rw-r--r--kernel/posix-cpu-timers.c12
-rw-r--r--kernel/profile.c8
-rw-r--r--kernel/ptrace.c26
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/sched.c8
-rw-r--r--kernel/sched_debug.c10
-rw-r--r--kernel/signal.c23
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/sys.c6
-rw-r--r--kernel/timer.c7
-rw-r--r--kernel/workqueue.c9
-rw-r--r--lib/Kconfig.debug12
-rw-r--r--lib/Makefile1
-rw-r--r--lib/bitmap.c2
-rw-r--r--lib/cpu-notifier-error-inject.c63
-rw-r--r--lib/crc32.c2
-rw-r--r--lib/idr.c5
-rw-r--r--lib/radix-tree.c4
-rw-r--r--lib/random32.c38
-rw-r--r--lib/swiotlb.c31
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/memcontrol.c689
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/nommu.c32
-rw-r--r--mm/oom_kill.c5
-rw-r--r--mm/page_alloc.c50
-rw-r--r--mm/shmem.c64
-rw-r--r--mm/slab.c47
-rw-r--r--net/iucv/iucv.c9
-rw-r--r--scripts/gen_initramfs_list.sh1
-rw-r--r--security/keys/internal.h1
-rw-r--r--security/keys/keyctl.c2
-rw-r--r--security/keys/process_keys.c3
-rw-r--r--security/keys/request_key.c32
-rw-r--r--sound/core/pcm_lib.c13
-rw-r--r--sound/core/pcm_native.c39
-rw-r--r--sound/pci/aw2/aw2-alsa.c11
-rw-r--r--sound/pci/emu10k1/emufx.c36
-rw-r--r--sound/pci/hda/hda_intel.c9
-rw-r--r--sound/pci/hda/patch_realtek.c84
-rw-r--r--sound/pci/hda/patch_sigmatel.c2
-rw-r--r--sound/usb/caiaq/input.c2
-rw-r--r--sound/usb/midi.c110
-rw-r--r--sound/usb/midi.h2
-rw-r--r--sound/usb/quirks-table.h11
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--sound/usb/usbaudio.h1
-rw-r--r--usr/Makefile5
-rw-r--r--usr/initramfs_data.lzo.S29
322 files changed, 9561 insertions, 2695 deletions
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 2e435adfbd6b..98ce51796f71 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -639,6 +639,36 @@ is planned to completely remove virt_to_bus() and bus_to_virt() as
639they are entirely deprecated. Some ports already do not provide these 639they are entirely deprecated. Some ports already do not provide these
640as it is impossible to correctly support them. 640as it is impossible to correctly support them.
641 641
642 Handling Errors
643
644DMA address space is limited on some architectures and an allocation
645failure can be determined by:
646
647- checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0
648
649- checking the returned dma_addr_t of dma_map_single and dma_map_page
650 by using dma_mapping_error():
651
652 dma_addr_t dma_handle;
653
654 dma_handle = dma_map_single(dev, addr, size, direction);
655 if (dma_mapping_error(dev, dma_handle)) {
656 /*
657 * reduce current DMA mapping usage,
658 * delay and try again later or
659 * reset driver.
660 */
661 }
662
663Networking drivers must call dev_kfree_skb to free the socket buffer
664and return NETDEV_TX_OK if the DMA mapping fails on the transmit hook
665(ndo_start_xmit). This means that the socket buffer is just dropped in
666the failure case.
667
668SCSI drivers must return SCSI_MLQUEUE_HOST_BUSY if the DMA mapping
669fails in the queuecommand hook. This means that the SCSI subsystem
670passes the command to the driver again later.
671
642 Optimizing Unmap State Space Consumption 672 Optimizing Unmap State Space Consumption
643 673
644On many platforms, dma_unmap_{single,page}() is simply a nop. 674On many platforms, dma_unmap_{single,page}() is simply a nop.
@@ -703,42 +733,25 @@ to "Closing".
703 733
7041) Struct scatterlist requirements. 7341) Struct scatterlist requirements.
705 735
706 Struct scatterlist must contain, at a minimum, the following 736 Don't invent the architecture specific struct scatterlist; just use
707 members: 737 <asm-generic/scatterlist.h>. You need to enable
708 738 CONFIG_NEED_SG_DMA_LENGTH if the architecture supports IOMMUs
709 struct page *page; 739 (including software IOMMU).
710 unsigned int offset; 740
711 unsigned int length; 7412) ARCH_KMALLOC_MINALIGN
712 742
713 The base address is specified by a "page+offset" pair. 743 Architectures must ensure that kmalloc'ed buffer is
714 744 DMA-safe. Drivers and subsystems depend on it. If an architecture
715 Previous versions of struct scatterlist contained a "void *address" 745 isn't fully DMA-coherent (i.e. hardware doesn't ensure that data in
716 field that was sometimes used instead of page+offset. As of Linux 746 the CPU cache is identical to data in main memory),
717 2.5., page+offset is always used, and the "address" field has been 747 ARCH_KMALLOC_MINALIGN must be set so that the memory allocator
718 deleted. 748 makes sure that kmalloc'ed buffer doesn't share a cache line with
719 749 the others. See arch/arm/include/asm/cache.h as an example.
7202) More to come... 750
721 751 Note that ARCH_KMALLOC_MINALIGN is about DMA memory alignment
722 Handling Errors 752 constraints. You don't need to worry about the architecture data
723 753 alignment constraints (e.g. the alignment constraints about 64-bit
724DMA address space is limited on some architectures and an allocation 754 objects).
725failure can be determined by:
726
727- checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0
728
729- checking the returned dma_addr_t of dma_map_single and dma_map_page
730 by using dma_mapping_error():
731
732 dma_addr_t dma_handle;
733
734 dma_handle = dma_map_single(dev, addr, size, direction);
735 if (dma_mapping_error(dev, dma_handle)) {
736 /*
737 * reduce current DMA mapping usage,
738 * delay and try again later or
739 * reset driver.
740 */
741 }
742 755
743 Closing 756 Closing
744 757
diff --git a/Documentation/SubmittingDrivers b/Documentation/SubmittingDrivers
index 99e72a81fa2f..4947fd8fb182 100644
--- a/Documentation/SubmittingDrivers
+++ b/Documentation/SubmittingDrivers
@@ -130,6 +130,8 @@ Linux kernel master tree:
130 ftp.??.kernel.org:/pub/linux/kernel/... 130 ftp.??.kernel.org:/pub/linux/kernel/...
131 ?? == your country code, such as "us", "uk", "fr", etc. 131 ?? == your country code, such as "us", "uk", "fr", etc.
132 132
133 http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git
134
133Linux kernel mailing list: 135Linux kernel mailing list:
134 linux-kernel@vger.kernel.org 136 linux-kernel@vger.kernel.org
135 [mail majordomo@vger.kernel.org to subscribe] 137 [mail majordomo@vger.kernel.org to subscribe]
@@ -160,3 +162,6 @@ How to NOT write kernel driver by Arjan van de Ven:
160 162
161Kernel Janitor: 163Kernel Janitor:
162 http://janitor.kernelnewbies.org/ 164 http://janitor.kernelnewbies.org/
165
166GIT, Fast Version Control System:
167 http://git-scm.com/
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 57444c2609fc..b34823ff1646 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -339,7 +339,7 @@ To mount a cgroup hierarchy with all available subsystems, type:
339The "xxx" is not interpreted by the cgroup code, but will appear in 339The "xxx" is not interpreted by the cgroup code, but will appear in
340/proc/mounts so may be any useful identifying string that you like. 340/proc/mounts so may be any useful identifying string that you like.
341 341
342To mount a cgroup hierarchy with just the cpuset and numtasks 342To mount a cgroup hierarchy with just the cpuset and memory
343subsystems, type: 343subsystems, type:
344# mount -t cgroup -o cpuset,memory hier1 /dev/cgroup 344# mount -t cgroup -o cpuset,memory hier1 /dev/cgroup
345 345
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 6cab1f29da4c..7781857dc940 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -1,18 +1,15 @@
1Memory Resource Controller 1Memory Resource Controller
2 2
3NOTE: The Memory Resource Controller has been generically been referred 3NOTE: The Memory Resource Controller has been generically been referred
4to as the memory controller in this document. Do not confuse memory controller 4 to as the memory controller in this document. Do not confuse memory
5used here with the memory controller that is used in hardware. 5 controller used here with the memory controller that is used in hardware.
6 6
7Salient features 7(For editors)
8 8In this document:
9a. Enable control of Anonymous, Page Cache (mapped and unmapped) and 9 When we mention a cgroup (cgroupfs's directory) with memory controller,
10 Swap Cache memory pages. 10 we call it "memory cgroup". When you see git-log and source code, you'll
11b. The infrastructure allows easy addition of other types of memory to control 11 see patch's title and function names tend to use "memcg".
12c. Provides *zero overhead* for non memory controller users 12 In this document, we avoid using it.
13d. Provides a double LRU: global memory pressure causes reclaim from the
14 global LRU; a cgroup on hitting a limit, reclaims from the per
15 cgroup LRU
16 13
17Benefits and Purpose of the memory controller 14Benefits and Purpose of the memory controller
18 15
@@ -33,6 +30,45 @@ d. A CD/DVD burner could control the amount of memory used by the
33e. There are several other use cases, find one or use the controller just 30e. There are several other use cases, find one or use the controller just
34 for fun (to learn and hack on the VM subsystem). 31 for fun (to learn and hack on the VM subsystem).
35 32
33Current Status: linux-2.6.34-mmotm(development version of 2010/April)
34
35Features:
36 - accounting anonymous pages, file caches, swap caches usage and limiting them.
37 - private LRU and reclaim routine. (system's global LRU and private LRU
38 work independently from each other)
39 - optionally, memory+swap usage can be accounted and limited.
40 - hierarchical accounting
41 - soft limit
42 - moving(recharging) account at moving a task is selectable.
43 - usage threshold notifier
44 - oom-killer disable knob and oom-notifier
45 - Root cgroup has no limit controls.
46
47 Kernel memory and Hugepages are not under control yet. We just manage
48 pages on LRU. To add more controls, we have to take care of performance.
49
50Brief summary of control files.
51
52 tasks # attach a task(thread) and show list of threads
53 cgroup.procs # show list of processes
54 cgroup.event_control # an interface for event_fd()
55 memory.usage_in_bytes # show current memory(RSS+Cache) usage.
56 memory.memsw.usage_in_bytes # show current memory+Swap usage
57 memory.limit_in_bytes # set/show limit of memory usage
58 memory.memsw.limit_in_bytes # set/show limit of memory+Swap usage
59 memory.failcnt # show the number of memory usage hits limits
60 memory.memsw.failcnt # show the number of memory+Swap hits limits
61 memory.max_usage_in_bytes # show max memory usage recorded
62 memory.memsw.usage_in_bytes # show max memory+Swap usage recorded
63 memory.soft_limit_in_bytes # set/show soft limit of memory usage
64 memory.stat # show various statistics
65 memory.use_hierarchy # set/show hierarchical account enabled
66 memory.force_empty # trigger forced move charge to parent
67 memory.swappiness # set/show swappiness parameter of vmscan
68 (See sysctl's vm.swappiness)
69 memory.move_charge_at_immigrate # set/show controls of moving charges
70 memory.oom_control # set/show oom controls.
71
361. History 721. History
37 73
38The memory controller has a long history. A request for comments for the memory 74The memory controller has a long history. A request for comments for the memory
@@ -106,14 +142,14 @@ the necessary data structures and check if the cgroup that is being charged
106is over its limit. If it is then reclaim is invoked on the cgroup. 142is over its limit. If it is then reclaim is invoked on the cgroup.
107More details can be found in the reclaim section of this document. 143More details can be found in the reclaim section of this document.
108If everything goes well, a page meta-data-structure called page_cgroup is 144If everything goes well, a page meta-data-structure called page_cgroup is
109allocated and associated with the page. This routine also adds the page to 145updated. page_cgroup has its own LRU on cgroup.
110the per cgroup LRU. 146(*) page_cgroup structure is allocated at boot/memory-hotplug time.
111 147
1122.2.1 Accounting details 1482.2.1 Accounting details
113 149
114All mapped anon pages (RSS) and cache pages (Page Cache) are accounted. 150All mapped anon pages (RSS) and cache pages (Page Cache) are accounted.
115(some pages which never be reclaimable and will not be on global LRU 151Some pages which are never reclaimable and will not be on the global LRU
116 are not accounted. we just accounts pages under usual vm management.) 152are not accounted. We just account pages under usual VM management.
117 153
118RSS pages are accounted at page_fault unless they've already been accounted 154RSS pages are accounted at page_fault unless they've already been accounted
119for earlier. A file page will be accounted for as Page Cache when it's 155for earlier. A file page will be accounted for as Page Cache when it's
@@ -121,12 +157,19 @@ inserted into inode (radix-tree). While it's mapped into the page tables of
121processes, duplicate accounting is carefully avoided. 157processes, duplicate accounting is carefully avoided.
122 158
123A RSS page is unaccounted when it's fully unmapped. A PageCache page is 159A RSS page is unaccounted when it's fully unmapped. A PageCache page is
124unaccounted when it's removed from radix-tree. 160unaccounted when it's removed from radix-tree. Even if RSS pages are fully
161unmapped (by kswapd), they may exist as SwapCache in the system until they
162are really freed. Such SwapCaches also also accounted.
163A swapped-in page is not accounted until it's mapped.
164
165Note: The kernel does swapin-readahead and read multiple swaps at once.
166This means swapped-in pages may contain pages for other tasks than a task
167causing page fault. So, we avoid accounting at swap-in I/O.
125 168
126At page migration, accounting information is kept. 169At page migration, accounting information is kept.
127 170
128Note: we just account pages-on-lru because our purpose is to control amount 171Note: we just account pages-on-LRU because our purpose is to control amount
129of used pages. not-on-lru pages are tend to be out-of-control from vm view. 172of used pages; not-on-LRU pages tend to be out-of-control from VM view.
130 173
1312.3 Shared Page Accounting 1742.3 Shared Page Accounting
132 175
@@ -143,6 +186,7 @@ caller of swapoff rather than the users of shmem.
143 186
144 187
1452.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP) 1882.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP)
189
146Swap Extension allows you to record charge for swap. A swapped-in page is 190Swap Extension allows you to record charge for swap. A swapped-in page is
147charged back to original page allocator if possible. 191charged back to original page allocator if possible.
148 192
@@ -150,13 +194,20 @@ When swap is accounted, following files are added.
150 - memory.memsw.usage_in_bytes. 194 - memory.memsw.usage_in_bytes.
151 - memory.memsw.limit_in_bytes. 195 - memory.memsw.limit_in_bytes.
152 196
153usage of mem+swap is limited by memsw.limit_in_bytes. 197memsw means memory+swap. Usage of memory+swap is limited by
198memsw.limit_in_bytes.
154 199
155* why 'mem+swap' rather than swap. 200Example: Assume a system with 4G of swap. A task which allocates 6G of memory
201(by mistake) under 2G memory limitation will use all swap.
202In this case, setting memsw.limit_in_bytes=3G will prevent bad use of swap.
203By using memsw limit, you can avoid system OOM which can be caused by swap
204shortage.
205
206* why 'memory+swap' rather than swap.
156The global LRU(kswapd) can swap out arbitrary pages. Swap-out means 207The global LRU(kswapd) can swap out arbitrary pages. Swap-out means
157to move account from memory to swap...there is no change in usage of 208to move account from memory to swap...there is no change in usage of
158mem+swap. In other words, when we want to limit the usage of swap without 209memory+swap. In other words, when we want to limit the usage of swap without
159affecting global LRU, mem+swap limit is better than just limiting swap from 210affecting global LRU, memory+swap limit is better than just limiting swap from
160OS point of view. 211OS point of view.
161 212
162* What happens when a cgroup hits memory.memsw.limit_in_bytes 213* What happens when a cgroup hits memory.memsw.limit_in_bytes
@@ -168,12 +219,12 @@ it by cgroup.
168 219
1692.5 Reclaim 2202.5 Reclaim
170 221
171Each cgroup maintains a per cgroup LRU that consists of an active 222Each cgroup maintains a per cgroup LRU which has the same structure as
172and inactive list. When a cgroup goes over its limit, we first try 223global VM. When a cgroup goes over its limit, we first try
173to reclaim memory from the cgroup so as to make space for the new 224to reclaim memory from the cgroup so as to make space for the new
174pages that the cgroup has touched. If the reclaim is unsuccessful, 225pages that the cgroup has touched. If the reclaim is unsuccessful,
175an OOM routine is invoked to select and kill the bulkiest task in the 226an OOM routine is invoked to select and kill the bulkiest task in the
176cgroup. 227cgroup. (See 10. OOM Control below.)
177 228
178The reclaim algorithm has not been modified for cgroups, except that 229The reclaim algorithm has not been modified for cgroups, except that
179pages that are selected for reclaiming come from the per cgroup LRU 230pages that are selected for reclaiming come from the per cgroup LRU
@@ -184,13 +235,22 @@ limits on the root cgroup.
184 235
185Note2: When panic_on_oom is set to "2", the whole system will panic. 236Note2: When panic_on_oom is set to "2", the whole system will panic.
186 237
1872. Locking 238When oom event notifier is registered, event will be delivered.
239(See oom_control section)
240
2412.6 Locking
188 242
189The memory controller uses the following hierarchy 243 lock_page_cgroup()/unlock_page_cgroup() should not be called under
244 mapping->tree_lock.
190 245
1911. zone->lru_lock is used for selecting pages to be isolated 246 Other lock order is following:
1922. mem->per_zone->lru_lock protects the per cgroup LRU (per zone) 247 PG_locked.
1933. lock_page_cgroup() is used to protect page->page_cgroup 248 mm->page_table_lock
249 zone->lru_lock
250 lock_page_cgroup.
251 In many cases, just lock_page_cgroup() is called.
252 per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by
253 zone->lru_lock, it has no lock of its own.
194 254
1953. User Interface 2553. User Interface
196 256
@@ -199,6 +259,7 @@ The memory controller uses the following hierarchy
199a. Enable CONFIG_CGROUPS 259a. Enable CONFIG_CGROUPS
200b. Enable CONFIG_RESOURCE_COUNTERS 260b. Enable CONFIG_RESOURCE_COUNTERS
201c. Enable CONFIG_CGROUP_MEM_RES_CTLR 261c. Enable CONFIG_CGROUP_MEM_RES_CTLR
262d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension)
202 263
2031. Prepare the cgroups 2641. Prepare the cgroups
204# mkdir -p /cgroups 265# mkdir -p /cgroups
@@ -206,31 +267,28 @@ c. Enable CONFIG_CGROUP_MEM_RES_CTLR
206 267
2072. Make the new group and move bash into it 2682. Make the new group and move bash into it
208# mkdir /cgroups/0 269# mkdir /cgroups/0
209# echo $$ > /cgroups/0/tasks 270# echo $$ > /cgroups/0/tasks
210 271
211Since now we're in the 0 cgroup, 272Since now we're in the 0 cgroup, we can alter the memory limit:
212We can alter the memory limit:
213# echo 4M > /cgroups/0/memory.limit_in_bytes 273# echo 4M > /cgroups/0/memory.limit_in_bytes
214 274
215NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo, 275NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
216mega or gigabytes. 276mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
277
217NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited). 278NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited).
218NOTE: We cannot set limits on the root cgroup any more. 279NOTE: We cannot set limits on the root cgroup any more.
219 280
220# cat /cgroups/0/memory.limit_in_bytes 281# cat /cgroups/0/memory.limit_in_bytes
2214194304 2824194304
222 283
223NOTE: The interface has now changed to display the usage in bytes
224instead of pages
225
226We can check the usage: 284We can check the usage:
227# cat /cgroups/0/memory.usage_in_bytes 285# cat /cgroups/0/memory.usage_in_bytes
2281216512 2861216512
229 287
230A successful write to this file does not guarantee a successful set of 288A successful write to this file does not guarantee a successful set of
231this limit to the value written into the file. This can be due to a 289this limit to the value written into the file. This can be due to a
232number of factors, such as rounding up to page boundaries or the total 290number of factors, such as rounding up to page boundaries or the total
233availability of memory on the system. The user is required to re-read 291availability of memory on the system. The user is required to re-read
234this file after a write to guarantee the value committed by the kernel. 292this file after a write to guarantee the value committed by the kernel.
235 293
236# echo 1 > memory.limit_in_bytes 294# echo 1 > memory.limit_in_bytes
@@ -245,15 +303,23 @@ caches, RSS and Active pages/Inactive pages are shown.
245 303
2464. Testing 3044. Testing
247 305
248Balbir posted lmbench, AIM9, LTP and vmmstress results [10] and [11]. 306For testing features and implementation, see memcg_test.txt.
249Apart from that v6 has been tested with several applications and regular 307
250daily use. The controller has also been tested on the PPC64, x86_64 and 308Performance test is also important. To see pure memory controller's overhead,
251UML platforms. 309testing on tmpfs will give you good numbers of small overheads.
310Example: do kernel make on tmpfs.
311
312Page-fault scalability is also important. At measuring parallel
313page fault test, multi-process test may be better than multi-thread
314test because it has noise of shared objects/status.
315
316But the above two are testing extreme situations.
317Trying usual test under memory controller is always helpful.
252 318
2534.1 Troubleshooting 3194.1 Troubleshooting
254 320
255Sometimes a user might find that the application under a cgroup is 321Sometimes a user might find that the application under a cgroup is
256terminated. There are several causes for this: 322terminated by OOM killer. There are several causes for this:
257 323
2581. The cgroup limit is too low (just too low to do anything useful) 3241. The cgroup limit is too low (just too low to do anything useful)
2592. The user is using anonymous memory and swap is turned off or too low 3252. The user is using anonymous memory and swap is turned off or too low
@@ -261,6 +327,9 @@ terminated. There are several causes for this:
261A sync followed by echo 1 > /proc/sys/vm/drop_caches will help get rid of 327A sync followed by echo 1 > /proc/sys/vm/drop_caches will help get rid of
262some of the pages cached in the cgroup (page cache pages). 328some of the pages cached in the cgroup (page cache pages).
263 329
330To know what happens, disable OOM_Kill by 10. OOM Control(see below) and
331seeing what happens will be helpful.
332
2644.2 Task migration 3334.2 Task migration
265 334
266When a task migrates from one cgroup to another, its charge is not 335When a task migrates from one cgroup to another, its charge is not
@@ -268,16 +337,19 @@ carried forward by default. The pages allocated from the original cgroup still
268remain charged to it, the charge is dropped when the page is freed or 337remain charged to it, the charge is dropped when the page is freed or
269reclaimed. 338reclaimed.
270 339
271Note: You can move charges of a task along with task migration. See 8. 340You can move charges of a task along with task migration.
341See 8. "Move charges at task migration"
272 342
2734.3 Removing a cgroup 3434.3 Removing a cgroup
274 344
275A cgroup can be removed by rmdir, but as discussed in sections 4.1 and 4.2, a 345A cgroup can be removed by rmdir, but as discussed in sections 4.1 and 4.2, a
276cgroup might have some charge associated with it, even though all 346cgroup might have some charge associated with it, even though all
277tasks have migrated away from it. 347tasks have migrated away from it. (because we charge against pages, not
278Such charges are freed(at default) or moved to its parent. When moved, 348against tasks.)
279both of RSS and CACHES are moved to parent. 349
280If both of them are busy, rmdir() returns -EBUSY. See 5.1 Also. 350Such charges are freed or moved to their parent. At moving, both of RSS
351and CACHES are moved to parent.
352rmdir() may return -EBUSY if freeing/moving fails. See 5.1 also.
281 353
282Charges recorded in swap information is not updated at removal of cgroup. 354Charges recorded in swap information is not updated at removal of cgroup.
283Recorded information is discarded and a cgroup which uses swap (swapcache) 355Recorded information is discarded and a cgroup which uses swap (swapcache)
@@ -293,10 +365,10 @@ will be charged as a new owner of it.
293 365
294 # echo 0 > memory.force_empty 366 # echo 0 > memory.force_empty
295 367
296 Almost all pages tracked by this memcg will be unmapped and freed. Some of 368 Almost all pages tracked by this memory cgroup will be unmapped and freed.
297 pages cannot be freed because it's locked or in-use. Such pages are moved 369 Some pages cannot be freed because they are locked or in-use. Such pages are
298 to parent and this cgroup will be empty. But this may return -EBUSY in 370 moved to parent and this cgroup will be empty. This may return -EBUSY if
299 some too busy case. 371 VM is too busy to free/move all pages immediately.
300 372
301 Typical use case of this interface is that calling this before rmdir(). 373 Typical use case of this interface is that calling this before rmdir().
302 Because rmdir() moves all pages to parent, some out-of-use page caches can be 374 Because rmdir() moves all pages to parent, some out-of-use page caches can be
@@ -306,19 +378,41 @@ will be charged as a new owner of it.
306 378
307memory.stat file includes following statistics 379memory.stat file includes following statistics
308 380
381# per-memory cgroup local status
309cache - # of bytes of page cache memory. 382cache - # of bytes of page cache memory.
310rss - # of bytes of anonymous and swap cache memory. 383rss - # of bytes of anonymous and swap cache memory.
384mapped_file - # of bytes of mapped file (includes tmpfs/shmem)
311pgpgin - # of pages paged in (equivalent to # of charging events). 385pgpgin - # of pages paged in (equivalent to # of charging events).
312pgpgout - # of pages paged out (equivalent to # of uncharging events). 386pgpgout - # of pages paged out (equivalent to # of uncharging events).
313active_anon - # of bytes of anonymous and swap cache memory on active 387swap - # of bytes of swap usage
314 lru list.
315inactive_anon - # of bytes of anonymous memory and swap cache memory on 388inactive_anon - # of bytes of anonymous memory and swap cache memory on
316 inactive lru list. 389 LRU list.
317active_file - # of bytes of file-backed memory on active lru list. 390active_anon - # of bytes of anonymous and swap cache memory on active
318inactive_file - # of bytes of file-backed memory on inactive lru list. 391 inactive LRU list.
392inactive_file - # of bytes of file-backed memory on inactive LRU list.
393active_file - # of bytes of file-backed memory on active LRU list.
319unevictable - # of bytes of memory that cannot be reclaimed (mlocked etc). 394unevictable - # of bytes of memory that cannot be reclaimed (mlocked etc).
320 395
321The following additional stats are dependent on CONFIG_DEBUG_VM. 396# status considering hierarchy (see memory.use_hierarchy settings)
397
398hierarchical_memory_limit - # of bytes of memory limit with regard to hierarchy
399 under which the memory cgroup is
400hierarchical_memsw_limit - # of bytes of memory+swap limit with regard to
401 hierarchy under which memory cgroup is.
402
403total_cache - sum of all children's "cache"
404total_rss - sum of all children's "rss"
405total_mapped_file - sum of all children's "cache"
406total_pgpgin - sum of all children's "pgpgin"
407total_pgpgout - sum of all children's "pgpgout"
408total_swap - sum of all children's "swap"
409total_inactive_anon - sum of all children's "inactive_anon"
410total_active_anon - sum of all children's "active_anon"
411total_inactive_file - sum of all children's "inactive_file"
412total_active_file - sum of all children's "active_file"
413total_unevictable - sum of all children's "unevictable"
414
415# The following additional stats are dependent on CONFIG_DEBUG_VM.
322 416
323inactive_ratio - VM internal parameter. (see mm/page_alloc.c) 417inactive_ratio - VM internal parameter. (see mm/page_alloc.c)
324recent_rotated_anon - VM internal parameter. (see mm/vmscan.c) 418recent_rotated_anon - VM internal parameter. (see mm/vmscan.c)
@@ -327,24 +421,37 @@ recent_scanned_anon - VM internal parameter. (see mm/vmscan.c)
327recent_scanned_file - VM internal parameter. (see mm/vmscan.c) 421recent_scanned_file - VM internal parameter. (see mm/vmscan.c)
328 422
329Memo: 423Memo:
330 recent_rotated means recent frequency of lru rotation. 424 recent_rotated means recent frequency of LRU rotation.
331 recent_scanned means recent # of scans to lru. 425 recent_scanned means recent # of scans to LRU.
332 showing for better debug please see the code for meanings. 426 showing for better debug please see the code for meanings.
333 427
334Note: 428Note:
335 Only anonymous and swap cache memory is listed as part of 'rss' stat. 429 Only anonymous and swap cache memory is listed as part of 'rss' stat.
336 This should not be confused with the true 'resident set size' or the 430 This should not be confused with the true 'resident set size' or the
337 amount of physical memory used by the cgroup. Per-cgroup rss 431 amount of physical memory used by the cgroup.
338 accounting is not done yet. 432 'rss + file_mapped" will give you resident set size of cgroup.
433 (Note: file and shmem may be shared among other cgroups. In that case,
434 file_mapped is accounted only when the memory cgroup is owner of page
435 cache.)
339 436
3405.3 swappiness 4375.3 swappiness
341 Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
342 438
343 Following cgroups' swappiness can't be changed. 439Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
344 - root cgroup (uses /proc/sys/vm/swappiness).
345 - a cgroup which uses hierarchy and it has child cgroup.
346 - a cgroup which uses hierarchy and not the root of hierarchy.
347 440
441Following cgroups' swappiness can't be changed.
442- root cgroup (uses /proc/sys/vm/swappiness).
443- a cgroup which uses hierarchy and it has other cgroup(s) below it.
444- a cgroup which uses hierarchy and not the root of hierarchy.
445
4465.4 failcnt
447
448A memory cgroup provides memory.failcnt and memory.memsw.failcnt files.
449This failcnt(== failure count) shows the number of times that a usage counter
450hit its limit. When a memory cgroup hits a limit, failcnt increases and
451memory under it will be reclaimed.
452
453You can reset failcnt by writing 0 to failcnt file.
454# echo 0 > .../memory.failcnt
348 455
3496. Hierarchy support 4566. Hierarchy support
350 457
@@ -363,13 +470,13 @@ hierarchy
363 470
364In the diagram above, with hierarchical accounting enabled, all memory 471In the diagram above, with hierarchical accounting enabled, all memory
365usage of e, is accounted to its ancestors up until the root (i.e, c and root), 472usage of e, is accounted to its ancestors up until the root (i.e, c and root),
366that has memory.use_hierarchy enabled. If one of the ancestors goes over its 473that has memory.use_hierarchy enabled. If one of the ancestors goes over its
367limit, the reclaim algorithm reclaims from the tasks in the ancestor and the 474limit, the reclaim algorithm reclaims from the tasks in the ancestor and the
368children of the ancestor. 475children of the ancestor.
369 476
3706.1 Enabling hierarchical accounting and reclaim 4776.1 Enabling hierarchical accounting and reclaim
371 478
372The memory controller by default disables the hierarchy feature. Support 479A memory cgroup by default disables the hierarchy feature. Support
373can be enabled by writing 1 to memory.use_hierarchy file of the root cgroup 480can be enabled by writing 1 to memory.use_hierarchy file of the root cgroup
374 481
375# echo 1 > memory.use_hierarchy 482# echo 1 > memory.use_hierarchy
@@ -379,10 +486,10 @@ The feature can be disabled by
379# echo 0 > memory.use_hierarchy 486# echo 0 > memory.use_hierarchy
380 487
381NOTE1: Enabling/disabling will fail if the cgroup already has other 488NOTE1: Enabling/disabling will fail if the cgroup already has other
382cgroups created below it. 489 cgroups created below it.
383 490
384NOTE2: When panic_on_oom is set to "2", the whole system will panic in 491NOTE2: When panic_on_oom is set to "2", the whole system will panic in
385case of an oom event in any cgroup. 492 case of an OOM event in any cgroup.
386 493
3877. Soft limits 4947. Soft limits
388 495
@@ -392,7 +499,7 @@ is to allow control groups to use as much of the memory as needed, provided
392a. There is no memory contention 499a. There is no memory contention
393b. They do not exceed their hard limit 500b. They do not exceed their hard limit
394 501
395When the system detects memory contention or low memory control groups 502When the system detects memory contention or low memory, control groups
396are pushed back to their soft limits. If the soft limit of each control 503are pushed back to their soft limits. If the soft limit of each control
397group is very high, they are pushed back as much as possible to make 504group is very high, they are pushed back as much as possible to make
398sure that one control group does not starve the others of memory. 505sure that one control group does not starve the others of memory.
@@ -406,7 +513,7 @@ it gets invoked from balance_pgdat (kswapd).
4067.1 Interface 5137.1 Interface
407 514
408Soft limits can be setup by using the following commands (in this example we 515Soft limits can be setup by using the following commands (in this example we
409assume a soft limit of 256 megabytes) 516assume a soft limit of 256 MiB)
410 517
411# echo 256M > memory.soft_limit_in_bytes 518# echo 256M > memory.soft_limit_in_bytes
412 519
@@ -442,7 +549,7 @@ Note: Charges are moved only when you move mm->owner, IOW, a leader of a thread
442Note: If we cannot find enough space for the task in the destination cgroup, we 549Note: If we cannot find enough space for the task in the destination cgroup, we
443 try to make space by reclaiming memory. Task migration may fail if we 550 try to make space by reclaiming memory. Task migration may fail if we
444 cannot make enough space. 551 cannot make enough space.
445Note: It can take several seconds if you move charges in giga bytes order. 552Note: It can take several seconds if you move charges much.
446 553
447And if you want disable it again: 554And if you want disable it again:
448 555
@@ -451,21 +558,27 @@ And if you want disable it again:
4518.2 Type of charges which can be move 5588.2 Type of charges which can be move
452 559
453Each bits of move_charge_at_immigrate has its own meaning about what type of 560Each bits of move_charge_at_immigrate has its own meaning about what type of
454charges should be moved. 561charges should be moved. But in any cases, it must be noted that an account of
562a page or a swap can be moved only when it is charged to the task's current(old)
563memory cgroup.
455 564
456 bit | what type of charges would be moved ? 565 bit | what type of charges would be moved ?
457 -----+------------------------------------------------------------------------ 566 -----+------------------------------------------------------------------------
458 0 | A charge of an anonymous page(or swap of it) used by the target task. 567 0 | A charge of an anonymous page(or swap of it) used by the target task.
459 | Those pages and swaps must be used only by the target task. You must 568 | Those pages and swaps must be used only by the target task. You must
460 | enable Swap Extension(see 2.4) to enable move of swap charges. 569 | enable Swap Extension(see 2.4) to enable move of swap charges.
461 570 -----+------------------------------------------------------------------------
462Note: Those pages and swaps must be charged to the old cgroup. 571 1 | A charge of file pages(normal file, tmpfs file(e.g. ipc shared memory)
463Note: More type of pages(e.g. file cache, shmem,) will be supported by other 572 | and swaps of tmpfs file) mmapped by the target task. Unlike the case of
464 bits in future. 573 | anonymous pages, file pages(and swaps) in the range mmapped by the task
574 | will be moved even if the task hasn't done page fault, i.e. they might
575 | not be the task's "RSS", but other task's "RSS" that maps the same file.
576 | And mapcount of the page is ignored(the page can be moved even if
577 | page_mapcount(page) > 1). You must enable Swap Extension(see 2.4) to
578 | enable move of swap charges.
465 579
4668.3 TODO 5808.3 TODO
467 581
468- Add support for other types of pages(e.g. file cache, shmem, etc.).
469- Implement madvise(2) to let users decide the vma to be moved or not to be 582- Implement madvise(2) to let users decide the vma to be moved or not to be
470 moved. 583 moved.
471- All of moving charge operations are done under cgroup_mutex. It's not good 584- All of moving charge operations are done under cgroup_mutex. It's not good
@@ -473,22 +586,61 @@ Note: More type of pages(e.g. file cache, shmem,) will be supported by other
473 586
4749. Memory thresholds 5879. Memory thresholds
475 588
476Memory controler implements memory thresholds using cgroups notification 589Memory cgroup implements memory thresholds using cgroups notification
477API (see cgroups.txt). It allows to register multiple memory and memsw 590API (see cgroups.txt). It allows to register multiple memory and memsw
478thresholds and gets notifications when it crosses. 591thresholds and gets notifications when it crosses.
479 592
480To register a threshold application need: 593To register a threshold application need:
481 - create an eventfd using eventfd(2); 594- create an eventfd using eventfd(2);
482 - open memory.usage_in_bytes or memory.memsw.usage_in_bytes; 595- open memory.usage_in_bytes or memory.memsw.usage_in_bytes;
483 - write string like "<event_fd> <memory.usage_in_bytes> <threshold>" to 596- write string like "<event_fd> <fd of memory.usage_in_bytes> <threshold>" to
484 cgroup.event_control. 597 cgroup.event_control.
485 598
486Application will be notified through eventfd when memory usage crosses 599Application will be notified through eventfd when memory usage crosses
487threshold in any direction. 600threshold in any direction.
488 601
489It's applicable for root and non-root cgroup. 602It's applicable for root and non-root cgroup.
490 603
49110. TODO 60410. OOM Control
605
606memory.oom_control file is for OOM notification and other controls.
607
608Memory cgroup implements OOM notifier using cgroup notification
609API (See cgroups.txt). It allows to register multiple OOM notification
610delivery and gets notification when OOM happens.
611
612To register a notifier, application need:
613 - create an eventfd using eventfd(2)
614 - open memory.oom_control file
615 - write string like "<event_fd> <fd of memory.oom_control>" to
616 cgroup.event_control
617
618Application will be notified through eventfd when OOM happens.
619OOM notification doesn't work for root cgroup.
620
621You can disable OOM-killer by writing "1" to memory.oom_control file, as:
622
623 #echo 1 > memory.oom_control
624
625This operation is only allowed to the top cgroup of sub-hierarchy.
626If OOM-killer is disabled, tasks under cgroup will hang/sleep
627in memory cgroup's OOM-waitqueue when they request accountable memory.
628
629For running them, you have to relax the memory cgroup's OOM status by
630 * enlarge limit or reduce usage.
631To reduce usage,
632 * kill some tasks.
633 * move some tasks to other group with account migration.
634 * remove some files (on tmpfs?)
635
636Then, stopped tasks will work again.
637
638At reading, current status of OOM is shown.
639 oom_kill_disable 0 or 1 (if 1, oom-killer is disabled)
640 under_oom 0 or 1 (if 1, the memory cgroup is under OOM, tasks may
641 be stopped.)
642
64311. TODO
492 644
4931. Add support for accounting huge pages (as a separate controller) 6451. Add support for accounting huge pages (as a separate controller)
4942. Make per-cgroup scanner reclaim not-shared pages first 6462. Make per-cgroup scanner reclaim not-shared pages first
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index af1608070cd5..61c98f03baa1 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -429,8 +429,9 @@ check_flags: no
429implementations. If your fs is not using generic_file_llseek, you 429implementations. If your fs is not using generic_file_llseek, you
430need to acquire and release the appropriate locks in your ->llseek(). 430need to acquire and release the appropriate locks in your ->llseek().
431For many filesystems, it is probably safe to acquire the inode 431For many filesystems, it is probably safe to acquire the inode
432mutex. Note some filesystems (i.e. remote ones) provide no 432mutex or just to use i_size_read() instead.
433protection for i_size so you will need to use the BKL. 433Note: this does not protect the file->f_pos against concurrent modifications
434since this is something the userspace has to take care about.
434 435
435Note: ext2_release() was *the* source of contention on fs-intensive 436Note: ext2_release() was *the* source of contention on fs-intensive
436loads and dropping BKL on ->release() helps to get rid of that (we still 437loads and dropping BKL on ->release() helps to get rid of that (we still
diff --git a/Documentation/filesystems/squashfs.txt b/Documentation/filesystems/squashfs.txt
index b324c033035a..203f7202cc9e 100644
--- a/Documentation/filesystems/squashfs.txt
+++ b/Documentation/filesystems/squashfs.txt
@@ -38,7 +38,8 @@ Hard link support: yes no
38Real inode numbers: yes no 38Real inode numbers: yes no
3932-bit uids/gids: yes no 3932-bit uids/gids: yes no
40File creation time: yes no 40File creation time: yes no
41Xattr and ACL support: no no 41Xattr support: yes no
42ACL support: no no
42 43
43Squashfs compresses data, inodes and directories. In addition, inode and 44Squashfs compresses data, inodes and directories. In addition, inode and
44directory data are highly compacted, and packed on byte boundaries. Each 45directory data are highly compacted, and packed on byte boundaries. Each
@@ -58,7 +59,7 @@ obtained from this site also.
583. SQUASHFS FILESYSTEM DESIGN 593. SQUASHFS FILESYSTEM DESIGN
59----------------------------- 60-----------------------------
60 61
61A squashfs filesystem consists of seven parts, packed together on a byte 62A squashfs filesystem consists of a maximum of eight parts, packed together on a byte
62alignment: 63alignment:
63 64
64 --------------- 65 ---------------
@@ -80,6 +81,9 @@ alignment:
80 |---------------| 81 |---------------|
81 | uid/gid | 82 | uid/gid |
82 | lookup table | 83 | lookup table |
84 |---------------|
85 | xattr |
86 | table |
83 --------------- 87 ---------------
84 88
85Compressed data blocks are written to the filesystem as files are read from 89Compressed data blocks are written to the filesystem as files are read from
@@ -192,6 +196,26 @@ This table is stored compressed into metadata blocks. A second index table is
192used to locate these. This second index table for speed of access (and because 196used to locate these. This second index table for speed of access (and because
193it is small) is read at mount time and cached in memory. 197it is small) is read at mount time and cached in memory.
194 198
1993.7 Xattr table
200---------------
201
202The xattr table contains extended attributes for each inode. The xattrs
203for each inode are stored in a list, each list entry containing a type,
204name and value field. The type field encodes the xattr prefix
205("user.", "trusted." etc) and it also encodes how the name/value fields
206should be interpreted. Currently the type indicates whether the value
207is stored inline (in which case the value field contains the xattr value),
208or if it is stored out of line (in which case the value field stores a
209reference to where the actual value is stored). This allows large values
210to be stored out of line improving scanning and lookup performance and it
211also allows values to be de-duplicated, the value being stored once, and
212all other occurences holding an out of line reference to that value.
213
214The xattr lists are packed into compressed 8K metadata blocks.
215To reduce overhead in inodes, rather than storing the on-disk
216location of the xattr list inside each inode, a 32-bit xattr id
217is stored. This xattr id is mapped into the location of the xattr
218list using a second xattr id lookup table.
195 219
1964. TODOS AND OUTSTANDING ISSUES 2204. TODOS AND OUTSTANDING ISSUES
197------------------------------- 221-------------------------------
@@ -199,9 +223,7 @@ it is small) is read at mount time and cached in memory.
1994.1 Todo list 2234.1 Todo list
200------------- 224-------------
201 225
202Implement Xattr and ACL support. The Squashfs 4.0 filesystem layout has hooks 226Implement ACL support.
203for these but the code has not been written. Once the code has been written
204the existing layout should not require modification.
205 227
2064.2 Squashfs internal cache 2284.2 Squashfs internal cache
207--------------------------- 229---------------------------
diff --git a/Documentation/vm/numa b/Documentation/vm/numa
index e93ad9425e2a..a200a386429d 100644
--- a/Documentation/vm/numa
+++ b/Documentation/vm/numa
@@ -1,41 +1,149 @@
1Started Nov 1999 by Kanoj Sarcar <kanoj@sgi.com> 1Started Nov 1999 by Kanoj Sarcar <kanoj@sgi.com>
2 2
3The intent of this file is to have an uptodate, running commentary 3What is NUMA?
4from different people about NUMA specific code in the Linux vm. 4
5 5This question can be answered from a couple of perspectives: the
6What is NUMA? It is an architecture where the memory access times 6hardware view and the Linux software view.
7for different regions of memory from a given processor varies 7
8according to the "distance" of the memory region from the processor. 8From the hardware perspective, a NUMA system is a computer platform that
9Each region of memory to which access times are the same from any 9comprises multiple components or assemblies each of which may contain 0
10cpu, is called a node. On such architectures, it is beneficial if 10or more CPUs, local memory, and/or IO buses. For brevity and to
11the kernel tries to minimize inter node communications. Schemes 11disambiguate the hardware view of these physical components/assemblies
12for this range from kernel text and read-only data replication 12from the software abstraction thereof, we'll call the components/assemblies
13across nodes, and trying to house all the data structures that 13'cells' in this document.
14key components of the kernel need on memory on that node. 14
15 15Each of the 'cells' may be viewed as an SMP [symmetric multi-processor] subset
16Currently, all the numa support is to provide efficient handling 16of the system--although some components necessary for a stand-alone SMP system
17of widely discontiguous physical memory, so architectures which 17may not be populated on any given cell. The cells of the NUMA system are
18are not NUMA but can have huge holes in the physical address space 18connected together with some sort of system interconnect--e.g., a crossbar or
19can use the same code. All this code is bracketed by CONFIG_DISCONTIGMEM. 19point-to-point link are common types of NUMA system interconnects. Both of
20 20these types of interconnects can be aggregated to create NUMA platforms with
21The initial port includes NUMAizing the bootmem allocator code by 21cells at multiple distances from other cells.
22encapsulating all the pieces of information into a bootmem_data_t 22
23structure. Node specific calls have been added to the allocator. 23For Linux, the NUMA platforms of interest are primarily what is known as Cache
24In theory, any platform which uses the bootmem allocator should 24Coherent NUMA or ccNUMA systems. With ccNUMA systems, all memory is visible
25be able to put the bootmem and mem_map data structures anywhere 25to and accessible from any CPU attached to any cell and cache coherency
26it deems best. 26is handled in hardware by the processor caches and/or the system interconnect.
27 27
28Each node's page allocation data structures have also been encapsulated 28Memory access time and effective memory bandwidth varies depending on how far
29into a pg_data_t. The bootmem_data_t is just one part of this. To 29away the cell containing the CPU or IO bus making the memory access is from the
30make the code look uniform between NUMA and regular UMA platforms, 30cell containing the target memory. For example, access to memory by CPUs
31UMA platforms have a statically allocated pg_data_t too (contig_page_data). 31attached to the same cell will experience faster access times and higher
32For the sake of uniformity, the function num_online_nodes() is also defined 32bandwidths than accesses to memory on other, remote cells. NUMA platforms
33for all platforms. As we run benchmarks, we might decide to NUMAize 33can have cells at multiple remote distances from any given cell.
34more variables like low_on_memory, nr_free_pages etc into the pg_data_t. 34
35 35Platform vendors don't build NUMA systems just to make software developers'
36The NUMA aware page allocation code currently tries to allocate pages 36lives interesting. Rather, this architecture is a means to provide scalable
37from different nodes in a round robin manner. This will be changed to 37memory bandwidth. However, to achieve scalable memory bandwidth, system and
38do concentratic circle search, starting from current node, once the 38application software must arrange for a large majority of the memory references
39NUMA port achieves more maturity. The call alloc_pages_node has been 39[cache misses] to be to "local" memory--memory on the same cell, if any--or
40added, so that drivers can make the call and not worry about whether 40to the closest cell with memory.
41it is running on a NUMA or UMA platform. 41
42This leads to the Linux software view of a NUMA system:
43
44Linux divides the system's hardware resources into multiple software
45abstractions called "nodes". Linux maps the nodes onto the physical cells
46of the hardware platform, abstracting away some of the details for some
47architectures. As with physical cells, software nodes may contain 0 or more
48CPUs, memory and/or IO buses. And, again, memory accesses to memory on
49"closer" nodes--nodes that map to closer cells--will generally experience
50faster access times and higher effective bandwidth than accesses to more
51remote cells.
52
53For some architectures, such as x86, Linux will "hide" any node representing a
54physical cell that has no memory attached, and reassign any CPUs attached to
55that cell to a node representing a cell that does have memory. Thus, on
56these architectures, one cannot assume that all CPUs that Linux associates with
57a given node will see the same local memory access times and bandwidth.
58
59In addition, for some architectures, again x86 is an example, Linux supports
60the emulation of additional nodes. For NUMA emulation, linux will carve up
61the existing nodes--or the system memory for non-NUMA platforms--into multiple
62nodes. Each emulated node will manage a fraction of the underlying cells'
63physical memory. NUMA emluation is useful for testing NUMA kernel and
64application features on non-NUMA platforms, and as a sort of memory resource
65management mechanism when used together with cpusets.
66[see Documentation/cgroups/cpusets.txt]
67
68For each node with memory, Linux constructs an independent memory management
69subsystem, complete with its own free page lists, in-use page lists, usage
70statistics and locks to mediate access. In addition, Linux constructs for
71each memory zone [one or more of DMA, DMA32, NORMAL, HIGH_MEMORY, MOVABLE],
72an ordered "zonelist". A zonelist specifies the zones/nodes to visit when a
73selected zone/node cannot satisfy the allocation request. This situation,
74when a zone has no available memory to satisfy a request, is called
75"overflow" or "fallback".
76
77Because some nodes contain multiple zones containing different types of
78memory, Linux must decide whether to order the zonelists such that allocations
79fall back to the same zone type on a different node, or to a different zone
80type on the same node. This is an important consideration because some zones,
81such as DMA or DMA32, represent relatively scarce resources. Linux chooses
82a default zonelist order based on the sizes of the various zone types relative
83to the total memory of the node and the total memory of the system. The
84default zonelist order may be overridden using the numa_zonelist_order kernel
85boot parameter or sysctl. [see Documentation/kernel-parameters.txt and
86Documentation/sysctl/vm.txt]
87
88By default, Linux will attempt to satisfy memory allocation requests from the
89node to which the CPU that executes the request is assigned. Specifically,
90Linux will attempt to allocate from the first node in the appropriate zonelist
91for the node where the request originates. This is called "local allocation."
92If the "local" node cannot satisfy the request, the kernel will examine other
93nodes' zones in the selected zonelist looking for the first zone in the list
94that can satisfy the request.
95
96Local allocation will tend to keep subsequent access to the allocated memory
97"local" to the underlying physical resources and off the system interconnect--
98as long as the task on whose behalf the kernel allocated some memory does not
99later migrate away from that memory. The Linux scheduler is aware of the
100NUMA topology of the platform--embodied in the "scheduling domains" data
101structures [see Documentation/scheduler/sched-domains.txt]--and the scheduler
102attempts to minimize task migration to distant scheduling domains. However,
103the scheduler does not take a task's NUMA footprint into account directly.
104Thus, under sufficient imbalance, tasks can migrate between nodes, remote
105from their initial node and kernel data structures.
106
107System administrators and application designers can restrict a task's migration
108to improve NUMA locality using various CPU affinity command line interfaces,
109such as taskset(1) and numactl(1), and program interfaces such as
110sched_setaffinity(2). Further, one can modify the kernel's default local
111allocation behavior using Linux NUMA memory policy.
112[see Documentation/vm/numa_memory_policy.]
113
114System administrators can restrict the CPUs and nodes' memories that a non-
115privileged user can specify in the scheduling or NUMA commands and functions
116using control groups and CPUsets. [see Documentation/cgroups/CPUsets.txt]
117
118On architectures that do not hide memoryless nodes, Linux will include only
119zones [nodes] with memory in the zonelists. This means that for a memoryless
120node the "local memory node"--the node of the first zone in CPU's node's
121zonelist--will not be the node itself. Rather, it will be the node that the
122kernel selected as the nearest node with memory when it built the zonelists.
123So, default, local allocations will succeed with the kernel supplying the
124closest available memory. This is a consequence of the same mechanism that
125allows such allocations to fallback to other nearby nodes when a node that
126does contain memory overflows.
127
128Some kernel allocations do not want or cannot tolerate this allocation fallback
129behavior. Rather they want to be sure they get memory from the specified node
130or get notified that the node has no free memory. This is usually the case when
131a subsystem allocates per CPU memory resources, for example.
132
133A typical model for making such an allocation is to obtain the node id of the
134node to which the "current CPU" is attached using one of the kernel's
135numa_node_id() or CPU_to_node() functions and then request memory from only
136the node id returned. When such an allocation fails, the requesting subsystem
137may revert to its own fallback path. The slab kernel memory allocator is an
138example of this. Or, the subsystem may choose to disable or not to enable
139itself on allocation failure. The kernel profiling subsystem is an example of
140this.
141
142If the architecture supports--does not hide--memoryless nodes, then CPUs
143attached to memoryless nodes would always incur the fallback path overhead
144or some subsystems would fail to initialize if they attempted to allocated
145memory exclusively from a node without memory. To support such
146architectures transparently, kernel subsystems can use the numa_mem_id()
147or cpu_to_mem() function to locate the "local memory node" for the calling or
148specified CPU. Again, this is the same node from which default, local page
149allocations will be attempted.
diff --git a/MAINTAINERS b/MAINTAINERS
index a8fe9b461e09..8b7eba2de603 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -969,6 +969,18 @@ M: Wan ZongShun <mcuos.com@gmail.com>
969L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 969L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
970W: http://www.mcuos.com 970W: http://www.mcuos.com
971S: Maintained 971S: Maintained
972F: arch/arm/mach-w90x900/
973F: arch/arm/mach-nuc93x/
974F: drivers/input/keyboard/w90p910_keypad.c
975F: drivers/input/touchscreen/w90p910_ts.c
976F: drivers/watchdog/nuc900_wdt.c
977F: drivers/net/arm/w90p910_ether.c
978F: drivers/mtd/nand/w90p910_nand.c
979F: drivers/rtc/rtc-nuc900.c
980F: drivers/spi/spi_nuc900.c
981F: drivers/usb/host/ehci-w90x900.c
982F: drivers/video/nuc900fb.c
983F: drivers/sound/soc/nuc900/
972 984
973ARM/U300 MACHINE SUPPORT 985ARM/U300 MACHINE SUPPORT
974M: Linus Walleij <linus.walleij@stericsson.com> 986M: Linus Walleij <linus.walleij@stericsson.com>
@@ -4992,6 +5004,12 @@ L: linux-mmc@vger.kernel.org
4992S: Maintained 5004S: Maintained
4993F: drivers/mmc/host/sdhci-s3c.c 5005F: drivers/mmc/host/sdhci-s3c.c
4994 5006
5007SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER
5008M: Viresh Kumar <viresh.kumar@st.com>
5009L: linux-mmc@vger.kernel.org
5010S: Maintained
5011F: drivers/mmc/host/sdhci-spear.c
5012
4995SECURITY SUBSYSTEM 5013SECURITY SUBSYSTEM
4996M: James Morris <jmorris@namei.org> 5014M: James Morris <jmorris@namei.org>
4997L: linux-security-module@vger.kernel.org (suggested Cc:) 5015L: linux-security-module@vger.kernel.org (suggested Cc:)
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 24efdfe277fc..3e2e540a0f2a 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -61,6 +61,9 @@ config ZONE_DMA
61config NEED_DMA_MAP_STATE 61config NEED_DMA_MAP_STATE
62 def_bool y 62 def_bool y
63 63
64config NEED_SG_DMA_LENGTH
65 def_bool y
66
64config GENERIC_ISA_DMA 67config GENERIC_ISA_DMA
65 bool 68 bool
66 default y 69 default y
diff --git a/arch/alpha/include/asm/scatterlist.h b/arch/alpha/include/asm/scatterlist.h
index 440747ca6349..5728c52a7412 100644
--- a/arch/alpha/include/asm/scatterlist.h
+++ b/arch/alpha/include/asm/scatterlist.h
@@ -1,24 +1,7 @@
1#ifndef _ALPHA_SCATTERLIST_H 1#ifndef _ALPHA_SCATTERLIST_H
2#define _ALPHA_SCATTERLIST_H 2#define _ALPHA_SCATTERLIST_H
3 3
4#include <asm/page.h> 4#include <asm-generic/scatterlist.h>
5#include <asm/types.h>
6
7struct scatterlist {
8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
12 unsigned int offset;
13
14 unsigned int length;
15
16 dma_addr_t dma_address;
17 __u32 dma_length;
18};
19
20#define sg_dma_address(sg) ((sg)->dma_address)
21#define sg_dma_len(sg) ((sg)->dma_length)
22 5
23#define ISA_DMA_THRESHOLD (~0UL) 6#define ISA_DMA_THRESHOLD (~0UL)
24 7
diff --git a/arch/alpha/math-emu/sfp-util.h b/arch/alpha/math-emu/sfp-util.h
index d4c6ae7fee47..f53707f77455 100644
--- a/arch/alpha/math-emu/sfp-util.h
+++ b/arch/alpha/math-emu/sfp-util.h
@@ -28,3 +28,8 @@ extern unsigned long __udiv_qrnnd (unsigned long *, unsigned long,
28#define UDIV_NEEDS_NORMALIZATION 1 28#define UDIV_NEEDS_NORMALIZATION 1
29 29
30#define abort() goto bad_insn 30#define abort() goto bad_insn
31
32#ifndef __LITTLE_ENDIAN
33#define __LITTLE_ENDIAN -1
34#endif
35#define __BYTE_ORDER __LITTLE_ENDIAN
diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h
index bcda59f39941..2f87870d9347 100644
--- a/arch/arm/include/asm/scatterlist.h
+++ b/arch/arm/include/asm/scatterlist.h
@@ -3,9 +3,6 @@
3 3
4#include <asm/memory.h> 4#include <asm/memory.h>
5#include <asm/types.h> 5#include <asm/types.h>
6
7#include <asm-generic/scatterlist.h> 6#include <asm-generic/scatterlist.h>
8 7
9#undef ARCH_HAS_SG_CHAIN
10
11#endif /* _ASMARM_SCATTERLIST_H */ 8#endif /* _ASMARM_SCATTERLIST_H */
diff --git a/arch/arm/mach-davinci/include/mach/mmc.h b/arch/arm/mach-davinci/include/mach/mmc.h
index 5a85e24f3673..d4f1e9675069 100644
--- a/arch/arm/mach-davinci/include/mach/mmc.h
+++ b/arch/arm/mach-davinci/include/mach/mmc.h
@@ -22,6 +22,9 @@ struct davinci_mmc_config {
22 22
23 /* Version of the MMC/SD controller */ 23 /* Version of the MMC/SD controller */
24 u8 version; 24 u8 version;
25
26 /* Number of sg segments */
27 u8 nr_sg;
25}; 28};
26void davinci_setup_mmc(int module, struct davinci_mmc_config *config); 29void davinci_setup_mmc(int module, struct davinci_mmc_config *config);
27 30
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index e7d629b3c76a..f474a80b8867 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -137,9 +137,7 @@ static void ads7846_dev_init(void)
137 } 137 }
138 138
139 gpio_direction_input(ts_gpio); 139 gpio_direction_input(ts_gpio);
140 140 gpio_set_debounce(ts_gpio, 310);
141 omap_set_gpio_debounce(ts_gpio, 1);
142 omap_set_gpio_debounce_time(ts_gpio, 0xa);
143} 141}
144 142
145static int ads7846_get_pendown_state(void) 143static int ads7846_get_pendown_state(void)
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 5fcb52e71298..fefd7e6e9779 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -209,8 +209,7 @@ static void ads7846_dev_init(void)
209 } 209 }
210 210
211 gpio_direction_input(ts_gpio); 211 gpio_direction_input(ts_gpio);
212 omap_set_gpio_debounce(ts_gpio, 1); 212 gpio_set_debounce(ts_gpio, 310);
213 omap_set_gpio_debounce_time(ts_gpio, 0xa);
214} 213}
215 214
216static int ads7846_get_pendown_state(void) 215static int ads7846_get_pendown_state(void)
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 81bba194b030..b95261013812 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -579,9 +579,7 @@ static void ads7846_dev_init(void)
579 printk(KERN_ERR "can't get ads7846 pen down GPIO\n"); 579 printk(KERN_ERR "can't get ads7846 pen down GPIO\n");
580 580
581 gpio_direction_input(OMAP3_EVM_TS_GPIO); 581 gpio_direction_input(OMAP3_EVM_TS_GPIO);
582 582 gpio_set_debounce(OMAP3_EVM_TS_GPIO, 310);
583 omap_set_gpio_debounce(OMAP3_EVM_TS_GPIO, 1);
584 omap_set_gpio_debounce_time(OMAP3_EVM_TS_GPIO, 0xa);
585} 583}
586 584
587static int ads7846_get_pendown_state(void) 585static int ads7846_get_pendown_state(void)
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 395d049bf010..db06dc910ba7 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -130,8 +130,8 @@ static struct platform_device pandora_keys_gpio = {
130static void __init pandora_keys_gpio_init(void) 130static void __init pandora_keys_gpio_init(void)
131{ 131{
132 /* set debounce time for GPIO banks 4 and 6 */ 132 /* set debounce time for GPIO banks 4 and 6 */
133 omap_set_gpio_debounce_time(32 * 3, GPIO_DEBOUNCE_TIME); 133 gpio_set_debounce(32 * 3, GPIO_DEBOUNCE_TIME);
134 omap_set_gpio_debounce_time(32 * 5, GPIO_DEBOUNCE_TIME); 134 gpio_set_debounce(32 * 5, GPIO_DEBOUNCE_TIME);
135} 135}
136 136
137static int board_keymap[] = { 137static int board_keymap[] = {
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index 2504d41f923e..2f5f8233dd5b 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -328,8 +328,7 @@ static void __init omap3_ads7846_init(void)
328 } 328 }
329 329
330 gpio_direction_input(OMAP3_TS_GPIO); 330 gpio_direction_input(OMAP3_TS_GPIO);
331 omap_set_gpio_debounce(OMAP3_TS_GPIO, 1); 331 gpio_set_debounce(OMAP3_TS_GPIO, 310);
332 omap_set_gpio_debounce_time(OMAP3_TS_GPIO, 0xa);
333} 332}
334 333
335static struct ads7846_platform_data ads7846_config = { 334static struct ads7846_platform_data ads7846_config = {
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index dc2ac42d6319..393e9219a5b6 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -624,79 +624,58 @@ do { \
624 __raw_writel(l, base + reg); \ 624 __raw_writel(l, base + reg); \
625} while(0) 625} while(0)
626 626
627void omap_set_gpio_debounce(int gpio, int enable) 627/**
628 * _set_gpio_debounce - low level gpio debounce time
629 * @bank: the gpio bank we're acting upon
630 * @gpio: the gpio number on this @gpio
631 * @debounce: debounce time to use
632 *
633 * OMAP's debounce time is in 31us steps so we need
634 * to convert and round up to the closest unit.
635 */
636static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
637 unsigned debounce)
628{ 638{
629 struct gpio_bank *bank; 639 void __iomem *reg = bank->base;
630 void __iomem *reg; 640 u32 val;
631 unsigned long flags; 641 u32 l;
632 u32 val, l = 1 << get_gpio_index(gpio); 642
643 if (debounce < 32)
644 debounce = 0x01;
645 else if (debounce > 7936)
646 debounce = 0xff;
647 else
648 debounce = (debounce / 0x1f) - 1;
633 649
634 if (cpu_class_is_omap1()) 650 l = 1 << get_gpio_index(gpio);
635 return;
636 651
637 bank = get_gpio_bank(gpio); 652 if (cpu_is_omap44xx())
638 reg = bank->base; 653 reg += OMAP4_GPIO_DEBOUNCINGTIME;
654 else
655 reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
656
657 __raw_writel(debounce, reg);
639 658
659 reg = bank->base;
640 if (cpu_is_omap44xx()) 660 if (cpu_is_omap44xx())
641 reg += OMAP4_GPIO_DEBOUNCENABLE; 661 reg += OMAP4_GPIO_DEBOUNCENABLE;
642 else 662 else
643 reg += OMAP24XX_GPIO_DEBOUNCE_EN; 663 reg += OMAP24XX_GPIO_DEBOUNCE_EN;
644 664
645 if (!(bank->mod_usage & l)) {
646 printk(KERN_ERR "GPIO %d not requested\n", gpio);
647 return;
648 }
649
650 spin_lock_irqsave(&bank->lock, flags);
651 val = __raw_readl(reg); 665 val = __raw_readl(reg);
652 666
653 if (enable && !(val & l)) 667 if (debounce) {
654 val |= l; 668 val |= l;
655 else if (!enable && (val & l)) 669 if (cpu_is_omap34xx() || cpu_is_omap44xx())
656 val &= ~l;
657 else
658 goto done;
659
660 if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
661 bank->dbck_enable_mask = val;
662 if (enable)
663 clk_enable(bank->dbck); 670 clk_enable(bank->dbck);
664 else 671 } else {
672 val &= ~l;
673 if (cpu_is_omap34xx() || cpu_is_omap44xx())
665 clk_disable(bank->dbck); 674 clk_disable(bank->dbck);
666 } 675 }
667 676
668 __raw_writel(val, reg); 677 __raw_writel(val, reg);
669done:
670 spin_unlock_irqrestore(&bank->lock, flags);
671} 678}
672EXPORT_SYMBOL(omap_set_gpio_debounce);
673
674void omap_set_gpio_debounce_time(int gpio, int enc_time)
675{
676 struct gpio_bank *bank;
677 void __iomem *reg;
678
679 if (cpu_class_is_omap1())
680 return;
681
682 bank = get_gpio_bank(gpio);
683 reg = bank->base;
684
685 if (!bank->mod_usage) {
686 printk(KERN_ERR "GPIO not requested\n");
687 return;
688 }
689
690 enc_time &= 0xff;
691
692 if (cpu_is_omap44xx())
693 reg += OMAP4_GPIO_DEBOUNCINGTIME;
694 else
695 reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
696
697 __raw_writel(enc_time, reg);
698}
699EXPORT_SYMBOL(omap_set_gpio_debounce_time);
700 679
701#ifdef CONFIG_ARCH_OMAP2PLUS 680#ifdef CONFIG_ARCH_OMAP2PLUS
702static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio, 681static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
@@ -1656,6 +1635,20 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
1656 return 0; 1635 return 0;
1657} 1636}
1658 1637
1638static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
1639 unsigned debounce)
1640{
1641 struct gpio_bank *bank;
1642 unsigned long flags;
1643
1644 bank = container_of(chip, struct gpio_bank, chip);
1645 spin_lock_irqsave(&bank->lock, flags);
1646 _set_gpio_debounce(bank, offset, debounce);
1647 spin_unlock_irqrestore(&bank->lock, flags);
1648
1649 return 0;
1650}
1651
1659static void gpio_set(struct gpio_chip *chip, unsigned offset, int value) 1652static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
1660{ 1653{
1661 struct gpio_bank *bank; 1654 struct gpio_bank *bank;
@@ -1909,6 +1902,7 @@ static int __init _omap_gpio_init(void)
1909 bank->chip.direction_input = gpio_input; 1902 bank->chip.direction_input = gpio_input;
1910 bank->chip.get = gpio_get; 1903 bank->chip.get = gpio_get;
1911 bank->chip.direction_output = gpio_output; 1904 bank->chip.direction_output = gpio_output;
1905 bank->chip.set_debounce = gpio_debounce;
1912 bank->chip.set = gpio_set; 1906 bank->chip.set = gpio_set;
1913 bank->chip.to_irq = gpio_2irq; 1907 bank->chip.to_irq = gpio_2irq;
1914 if (bank_is_mpuio(bank)) { 1908 if (bank_is_mpuio(bank)) {
diff --git a/arch/avr32/include/asm/scatterlist.h b/arch/avr32/include/asm/scatterlist.h
index 377320e3bd17..06394e5ead6c 100644
--- a/arch/avr32/include/asm/scatterlist.h
+++ b/arch/avr32/include/asm/scatterlist.h
@@ -1,25 +1,7 @@
1#ifndef __ASM_AVR32_SCATTERLIST_H 1#ifndef __ASM_AVR32_SCATTERLIST_H
2#define __ASM_AVR32_SCATTERLIST_H 2#define __ASM_AVR32_SCATTERLIST_H
3 3
4#include <asm/types.h> 4#include <asm-generic/scatterlist.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12 dma_addr_t dma_address;
13 unsigned int length;
14};
15
16/* These macros should be used after a pci_map_sg call has been done
17 * to get bus addresses of each of the SG entries and their lengths.
18 * You should only work with the number of sg entries pci_map_sg
19 * returns.
20 */
21#define sg_dma_address(sg) ((sg)->dma_address)
22#define sg_dma_len(sg) ((sg)->length)
23 5
24#define ISA_DMA_THRESHOLD (0xffffffff) 6#define ISA_DMA_THRESHOLD (0xffffffff)
25 7
diff --git a/arch/blackfin/include/asm/scatterlist.h b/arch/blackfin/include/asm/scatterlist.h
index 04f448711cd0..64d41d34ab0b 100644
--- a/arch/blackfin/include/asm/scatterlist.h
+++ b/arch/blackfin/include/asm/scatterlist.h
@@ -1,27 +1,7 @@
1#ifndef _BLACKFIN_SCATTERLIST_H 1#ifndef _BLACKFIN_SCATTERLIST_H
2#define _BLACKFIN_SCATTERLIST_H 2#define _BLACKFIN_SCATTERLIST_H
3 3
4#include <linux/mm.h> 4#include <asm-generic/scatterlist.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12 dma_addr_t dma_address;
13 unsigned int length;
14};
15
16/*
17 * These macros should be used after a pci_map_sg call has been done
18 * to get bus addresses of each of the SG entries and their lengths.
19 * You should only work with the number of sg entries pci_map_sg
20 * returns, or alternatively stop on the first sg_dma_len(sg) which
21 * is 0.
22 */
23#define sg_dma_address(sg) ((sg)->dma_address)
24#define sg_dma_len(sg) ((sg)->length)
25 5
26#define ISA_DMA_THRESHOLD (0xffffffff) 6#define ISA_DMA_THRESHOLD (0xffffffff)
27 7
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 43eb969405d1..6ec77685df52 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -292,28 +292,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
292 break; 292 break;
293 } 293 }
294 294
295#ifdef CONFIG_BINFMT_ELF_FDPIC
296 case PTRACE_GETFDPIC: {
297 unsigned long tmp = 0;
298
299 switch (addr) {
300 case_PTRACE_GETFDPIC_EXEC:
301 case PTRACE_GETFDPIC_EXEC:
302 tmp = child->mm->context.exec_fdpic_loadmap;
303 break;
304 case_PTRACE_GETFDPIC_INTERP:
305 case PTRACE_GETFDPIC_INTERP:
306 tmp = child->mm->context.interp_fdpic_loadmap;
307 break;
308 default:
309 break;
310 }
311
312 ret = put_user(tmp, datap);
313 break;
314 }
315#endif
316
317 /* when I and D space are separate, this will have to be fixed. */ 295 /* when I and D space are separate, this will have to be fixed. */
318 case PTRACE_POKEDATA: 296 case PTRACE_POKEDATA:
319 pr_debug("ptrace: PTRACE_PEEKDATA\n"); 297 pr_debug("ptrace: PTRACE_PEEKDATA\n");
@@ -357,8 +335,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
357 case PTRACE_PEEKUSR: 335 case PTRACE_PEEKUSR:
358 switch (addr) { 336 switch (addr) {
359#ifdef CONFIG_BINFMT_ELF_FDPIC /* backwards compat */ 337#ifdef CONFIG_BINFMT_ELF_FDPIC /* backwards compat */
360 case PT_FDPIC_EXEC: goto case_PTRACE_GETFDPIC_EXEC; 338 case PT_FDPIC_EXEC:
361 case PT_FDPIC_INTERP: goto case_PTRACE_GETFDPIC_INTERP; 339 request = PTRACE_GETFDPIC;
340 addr = PTRACE_GETFDPIC_EXEC;
341 goto case_default;
342 case PT_FDPIC_INTERP:
343 request = PTRACE_GETFDPIC;
344 addr = PTRACE_GETFDPIC_INTERP;
345 goto case_default;
362#endif 346#endif
363 default: 347 default:
364 ret = get_reg(child, addr, datap); 348 ret = get_reg(child, addr, datap);
@@ -385,6 +369,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
385 0, sizeof(struct pt_regs), 369 0, sizeof(struct pt_regs),
386 (const void __user *)data); 370 (const void __user *)data);
387 371
372 case_default:
388 default: 373 default:
389 ret = ptrace_request(child, request, addr, data); 374 ret = ptrace_request(child, request, addr, data);
390 break; 375 break;
diff --git a/arch/cris/include/asm/scatterlist.h b/arch/cris/include/asm/scatterlist.h
index faff53ad1f96..249a7842ff5f 100644
--- a/arch/cris/include/asm/scatterlist.h
+++ b/arch/cris/include/asm/scatterlist.h
@@ -1,22 +1,7 @@
1#ifndef __ASM_CRIS_SCATTERLIST_H 1#ifndef __ASM_CRIS_SCATTERLIST_H
2#define __ASM_CRIS_SCATTERLIST_H 2#define __ASM_CRIS_SCATTERLIST_H
3 3
4struct scatterlist { 4#include <asm-generic/scatterlist.h>
5#ifdef CONFIG_DEBUG_SG
6 unsigned long sg_magic;
7#endif
8 char * address; /* Location data is to be transferred to */
9 unsigned int length;
10
11 /* The following is i386 highmem junk - not used by us */
12 unsigned long page_link;
13 unsigned int offset;/* for highmem, page offset */
14
15};
16
17#define sg_dma_address(sg) ((sg)->address)
18#define sg_dma_len(sg) ((sg)->length)
19/* i386 junk */
20 5
21#define ISA_DMA_THRESHOLD (0x1fffffff) 6#define ISA_DMA_THRESHOLD (0x1fffffff)
22 7
diff --git a/arch/frv/include/asm/scatterlist.h b/arch/frv/include/asm/scatterlist.h
index 4bca8a28546c..1614bfd7e3a4 100644
--- a/arch/frv/include/asm/scatterlist.h
+++ b/arch/frv/include/asm/scatterlist.h
@@ -1,45 +1,7 @@
1#ifndef _ASM_SCATTERLIST_H 1#ifndef _ASM_SCATTERLIST_H
2#define _ASM_SCATTERLIST_H 2#define _ASM_SCATTERLIST_H
3 3
4#include <asm/types.h> 4#include <asm-generic/scatterlist.h>
5
6/*
7 * Drivers must set either ->address or (preferred) page and ->offset
8 * to indicate where data must be transferred to/from.
9 *
10 * Using page is recommended since it handles highmem data as well as
11 * low mem. ->address is restricted to data which has a virtual mapping, and
12 * it will go away in the future. Updating to page can be automated very
13 * easily -- something like
14 *
15 * sg->address = some_ptr;
16 *
17 * can be rewritten as
18 *
19 * sg_set_buf(sg, some_ptr, length);
20 *
21 * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens
22 */
23struct scatterlist {
24#ifdef CONFIG_DEBUG_SG
25 unsigned long sg_magic;
26#endif
27 unsigned long page_link;
28 unsigned int offset; /* for highmem, page offset */
29
30 dma_addr_t dma_address;
31 unsigned int length;
32};
33
34/*
35 * These macros should be used after a pci_map_sg call has been done
36 * to get bus addresses of each of the SG entries and their lengths.
37 * You should only work with the number of sg entries pci_map_sg
38 * returns, or alternatively stop on the first sg_dma_len(sg) which
39 * is 0.
40 */
41#define sg_dma_address(sg) ((sg)->dma_address)
42#define sg_dma_len(sg) ((sg)->length)
43 5
44#define ISA_DMA_THRESHOLD (0xffffffffUL) 6#define ISA_DMA_THRESHOLD (0xffffffffUL)
45 7
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
index 60eeed3694c0..fac028936a04 100644
--- a/arch/frv/kernel/ptrace.c
+++ b/arch/frv/kernel/ptrace.c
@@ -344,26 +344,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
344 0, sizeof(child->thread.user->f), 344 0, sizeof(child->thread.user->f),
345 (const void __user *)data); 345 (const void __user *)data);
346 346
347 case PTRACE_GETFDPIC:
348 tmp = 0;
349 switch (addr) {
350 case PTRACE_GETFDPIC_EXEC:
351 tmp = child->mm->context.exec_fdpic_loadmap;
352 break;
353 case PTRACE_GETFDPIC_INTERP:
354 tmp = child->mm->context.interp_fdpic_loadmap;
355 break;
356 default:
357 break;
358 }
359
360 ret = 0;
361 if (put_user(tmp, (unsigned long *) data)) {
362 ret = -EFAULT;
363 break;
364 }
365 break;
366
367 default: 347 default:
368 ret = ptrace_request(child, request, addr, data); 348 ret = ptrace_request(child, request, addr, data);
369 break; 349 break;
diff --git a/arch/frv/kernel/sysctl.c b/arch/frv/kernel/sysctl.c
index 71abd1510a59..6c155d69da29 100644
--- a/arch/frv/kernel/sysctl.c
+++ b/arch/frv/kernel/sysctl.c
@@ -46,8 +46,9 @@ static void frv_change_dcache_mode(unsigned long newmode)
46/* 46/*
47 * handle requests to dynamically switch the write caching mode delivered by /proc 47 * handle requests to dynamically switch the write caching mode delivered by /proc
48 */ 48 */
49static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp, 49static int procctl_frv_cachemode(ctl_table *table, int write,
50 void __user *buffer, size_t *lenp, loff_t *ppos) 50 void __user *buffer, size_t *lenp,
51 loff_t *ppos)
51{ 52{
52 unsigned long hsr0; 53 unsigned long hsr0;
53 char buff[8]; 54 char buff[8];
@@ -84,7 +85,7 @@ static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
84 } 85 }
85 86
86 /* read the state */ 87 /* read the state */
87 if (filp->f_pos > 0) { 88 if (*ppos > 0) {
88 *lenp = 0; 89 *lenp = 0;
89 return 0; 90 return 0;
90 } 91 }
@@ -110,7 +111,7 @@ static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
110 return -EFAULT; 111 return -EFAULT;
111 112
112 *lenp = len; 113 *lenp = len;
113 filp->f_pos = len; 114 *ppos = len;
114 return 0; 115 return 0;
115 116
116} /* end procctl_frv_cachemode() */ 117} /* end procctl_frv_cachemode() */
@@ -120,8 +121,9 @@ static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
120 * permit the mm_struct the nominated process is using have its MMU context ID pinned 121 * permit the mm_struct the nominated process is using have its MMU context ID pinned
121 */ 122 */
122#ifdef CONFIG_MMU 123#ifdef CONFIG_MMU
123static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp, 124static int procctl_frv_pin_cxnr(ctl_table *table, int write,
124 void __user *buffer, size_t *lenp, loff_t *ppos) 125 void __user *buffer, size_t *lenp,
126 loff_t *ppos)
125{ 127{
126 pid_t pid; 128 pid_t pid;
127 char buff[16], *p; 129 char buff[16], *p;
@@ -150,7 +152,7 @@ static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp,
150 } 152 }
151 153
152 /* read the currently pinned CXN */ 154 /* read the currently pinned CXN */
153 if (filp->f_pos > 0) { 155 if (*ppos > 0) {
154 *lenp = 0; 156 *lenp = 0;
155 return 0; 157 return 0;
156 } 158 }
@@ -163,7 +165,7 @@ static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp,
163 return -EFAULT; 165 return -EFAULT;
164 166
165 *lenp = len; 167 *lenp = len;
166 filp->f_pos = len; 168 *ppos = len;
167 return 0; 169 return 0;
168 170
169} /* end procctl_frv_pin_cxnr() */ 171} /* end procctl_frv_pin_cxnr() */
diff --git a/arch/h8300/include/asm/scatterlist.h b/arch/h8300/include/asm/scatterlist.h
index d3ecdd87ac90..de08a4a2cc1c 100644
--- a/arch/h8300/include/asm/scatterlist.h
+++ b/arch/h8300/include/asm/scatterlist.h
@@ -1,17 +1,7 @@
1#ifndef _H8300_SCATTERLIST_H 1#ifndef _H8300_SCATTERLIST_H
2#define _H8300_SCATTERLIST_H 2#define _H8300_SCATTERLIST_H
3 3
4#include <asm/types.h> 4#include <asm-generic/scatterlist.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12 dma_addr_t dma_address;
13 unsigned int length;
14};
15 5
16#define ISA_DMA_THRESHOLD (0xffffffff) 6#define ISA_DMA_THRESHOLD (0xffffffff)
17 7
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 9676100b83ee..95610820041e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -56,6 +56,9 @@ config MMU
56config NEED_DMA_MAP_STATE 56config NEED_DMA_MAP_STATE
57 def_bool y 57 def_bool y
58 58
59config NEED_SG_DMA_LENGTH
60 def_bool y
61
59config SWIOTLB 62config SWIOTLB
60 bool 63 bool
61 64
@@ -495,6 +498,14 @@ config HAVE_ARCH_NODEDATA_EXTENSION
495 def_bool y 498 def_bool y
496 depends on NUMA 499 depends on NUMA
497 500
501config USE_PERCPU_NUMA_NODE_ID
502 def_bool y
503 depends on NUMA
504
505config HAVE_MEMORYLESS_NODES
506 def_bool y
507 depends on NUMA
508
498config ARCH_PROC_KCORE_TEXT 509config ARCH_PROC_KCORE_TEXT
499 def_bool y 510 def_bool y
500 depends on PROC_KCORE 511 depends on PROC_KCORE
diff --git a/arch/ia64/include/asm/scatterlist.h b/arch/ia64/include/asm/scatterlist.h
index d8e98961dec7..f299a4fb25c8 100644
--- a/arch/ia64/include/asm/scatterlist.h
+++ b/arch/ia64/include/asm/scatterlist.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_IA64_SCATTERLIST_H 1#ifndef _ASM_IA64_SCATTERLIST_H
2#define _ASM_IA64_SCATTERLIST_H 2#define _ASM_IA64_SCATTERLIST_H
3 3
4#include <asm-generic/scatterlist.h>
4/* 5/*
5 * It used to be that ISA_DMA_THRESHOLD had something to do with the 6 * It used to be that ISA_DMA_THRESHOLD had something to do with the
6 * DMA-limits of ISA-devices. Nowadays, its only remaining use (apart 7 * DMA-limits of ISA-devices. Nowadays, its only remaining use (apart
@@ -10,7 +11,6 @@
10 * that's 4GB - 1. 11 * that's 4GB - 1.
11 */ 12 */
12#define ISA_DMA_THRESHOLD 0xffffffff 13#define ISA_DMA_THRESHOLD 0xffffffff
13 14#define ARCH_HAS_SG_CHAIN
14#include <asm-generic/scatterlist.h>
15 15
16#endif /* _ASM_IA64_SCATTERLIST_H */ 16#endif /* _ASM_IA64_SCATTERLIST_H */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index d323071d0f91..09f646753d1a 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -26,11 +26,6 @@
26#define RECLAIM_DISTANCE 15 26#define RECLAIM_DISTANCE 15
27 27
28/* 28/*
29 * Returns the number of the node containing CPU 'cpu'
30 */
31#define cpu_to_node(cpu) (int)(cpu_to_node_map[cpu])
32
33/*
34 * Returns a bitmask of CPUs on Node 'node'. 29 * Returns a bitmask of CPUs on Node 'node'.
35 */ 30 */
36#define cpumask_of_node(node) ((node) == -1 ? \ 31#define cpumask_of_node(node) ((node) == -1 ? \
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 3095654f9ab3..d9485d952ed0 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -31,8 +31,6 @@ struct dma_map_ops swiotlb_dma_ops = {
31 .unmap_sg = swiotlb_unmap_sg_attrs, 31 .unmap_sg = swiotlb_unmap_sg_attrs,
32 .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 32 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
33 .sync_single_for_device = swiotlb_sync_single_for_device, 33 .sync_single_for_device = swiotlb_sync_single_for_device,
34 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
35 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
36 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 34 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
37 .sync_sg_for_device = swiotlb_sync_sg_for_device, 35 .sync_sg_for_device = swiotlb_sync_sg_for_device,
38 .dma_supported = swiotlb_dma_supported, 36 .dma_supported = swiotlb_dma_supported,
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 0dec7f702448..7c7909f9bc93 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -638,7 +638,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
638 */ 638 */
639 639
640 read_lock(&tasklist_lock); 640 read_lock(&tasklist_lock);
641 if (child->signal) { 641 if (child->sighand) {
642 spin_lock_irq(&child->sighand->siglock); 642 spin_lock_irq(&child->sighand->siglock);
643 if (child->state == TASK_STOPPED && 643 if (child->state == TASK_STOPPED &&
644 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { 644 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
@@ -662,7 +662,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
662 * job control stop, so that SIGCONT can be used to wake it up. 662 * job control stop, so that SIGCONT can be used to wake it up.
663 */ 663 */
664 read_lock(&tasklist_lock); 664 read_lock(&tasklist_lock);
665 if (child->signal) { 665 if (child->sighand) {
666 spin_lock_irq(&child->sighand->siglock); 666 spin_lock_irq(&child->sighand->siglock);
667 if (child->state == TASK_TRACED && 667 if (child->state == TASK_TRACED &&
668 (child->signal->flags & SIGNAL_STOP_STOPPED)) { 668 (child->signal->flags & SIGNAL_STOP_STOPPED)) {
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index e5230b2ff2c5..518e876a410d 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -390,6 +390,12 @@ smp_callin (void)
390 390
391 fix_b0_for_bsp(); 391 fix_b0_for_bsp();
392 392
393 /*
394 * numa_node_id() works after this.
395 */
396 set_numa_node(cpu_to_node_map[cpuid]);
397 set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
398
393 ipi_call_lock_irq(); 399 ipi_call_lock_irq();
394 spin_lock(&vector_lock); 400 spin_lock(&vector_lock);
395 /* Setup the per cpu irq handling data structures */ 401 /* Setup the per cpu irq handling data structures */
@@ -632,6 +638,7 @@ void __devinit smp_prepare_boot_cpu(void)
632{ 638{
633 cpu_set(smp_processor_id(), cpu_online_map); 639 cpu_set(smp_processor_id(), cpu_online_map);
634 cpu_set(smp_processor_id(), cpu_callin_map); 640 cpu_set(smp_processor_id(), cpu_callin_map);
641 set_numa_node(cpu_to_node_map[smp_processor_id()]);
635 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 642 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
636 paravirt_post_smp_prepare_boot_cpu(); 643 paravirt_post_smp_prepare_boot_cpu();
637} 644}
diff --git a/arch/m32r/include/asm/scatterlist.h b/arch/m32r/include/asm/scatterlist.h
index 1ed372c73d0b..aeeddd8dac17 100644
--- a/arch/m32r/include/asm/scatterlist.h
+++ b/arch/m32r/include/asm/scatterlist.h
@@ -1,20 +1,7 @@
1#ifndef _ASM_M32R_SCATTERLIST_H 1#ifndef _ASM_M32R_SCATTERLIST_H
2#define _ASM_M32R_SCATTERLIST_H 2#define _ASM_M32R_SCATTERLIST_H
3 3
4#include <asm/types.h> 4#include <asm-generic/scatterlist.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 char * address; /* Location data is to be transferred to, NULL for
11 * highmem page */
12 unsigned long page_link;
13 unsigned int offset;/* for highmem, page offset */
14
15 dma_addr_t dma_address;
16 unsigned int length;
17};
18 5
19#define ISA_DMA_THRESHOLD (0x1fffffff) 6#define ISA_DMA_THRESHOLD (0x1fffffff)
20 7
diff --git a/arch/m68k/include/asm/scatterlist.h b/arch/m68k/include/asm/scatterlist.h
index e27ad902b1cf..175da06c6b95 100644
--- a/arch/m68k/include/asm/scatterlist.h
+++ b/arch/m68k/include/asm/scatterlist.h
@@ -1,23 +1,9 @@
1#ifndef _M68K_SCATTERLIST_H 1#ifndef _M68K_SCATTERLIST_H
2#define _M68K_SCATTERLIST_H 2#define _M68K_SCATTERLIST_H
3 3
4#include <linux/types.h> 4#include <asm-generic/scatterlist.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12 unsigned int length;
13
14 dma_addr_t dma_address; /* A place to hang host-specific addresses at. */
15};
16 5
17/* This is bogus and should go away. */ 6/* This is bogus and should go away. */
18#define ISA_DMA_THRESHOLD (0x00ffffff) 7#define ISA_DMA_THRESHOLD (0x00ffffff)
19 8
20#define sg_dma_address(sg) ((sg)->dma_address)
21#define sg_dma_len(sg) ((sg)->length)
22
23#endif /* !(_M68K_SCATTERLIST_H) */ 9#endif /* !(_M68K_SCATTERLIST_H) */
diff --git a/arch/microblaze/include/asm/scatterlist.h b/arch/microblaze/include/asm/scatterlist.h
index 35d786fe93ae..dc4a8900cc80 100644
--- a/arch/microblaze/include/asm/scatterlist.h
+++ b/arch/microblaze/include/asm/scatterlist.h
@@ -1 +1,3 @@
1#include <asm-generic/scatterlist.h> 1#include <asm-generic/scatterlist.h>
2
3#define ISA_DMA_THRESHOLD (~0UL)
diff --git a/arch/mips/include/asm/scatterlist.h b/arch/mips/include/asm/scatterlist.h
index 83d69fe17c9f..9af65e79be36 100644
--- a/arch/mips/include/asm/scatterlist.h
+++ b/arch/mips/include/asm/scatterlist.h
@@ -1,27 +1,7 @@
1#ifndef __ASM_SCATTERLIST_H 1#ifndef __ASM_SCATTERLIST_H
2#define __ASM_SCATTERLIST_H 2#define __ASM_SCATTERLIST_H
3 3
4#include <asm/types.h> 4#include <asm-generic/scatterlist.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12 dma_addr_t dma_address;
13 unsigned int length;
14};
15
16/*
17 * These macros should be used after a pci_map_sg call has been done
18 * to get bus addresses of each of the SG entries and their lengths.
19 * You should only work with the number of sg entries pci_map_sg
20 * returns, or alternatively stop on the first sg_dma_len(sg) which
21 * is 0.
22 */
23#define sg_dma_address(sg) ((sg)->dma_address)
24#define sg_dma_len(sg) ((sg)->length)
25 5
26#define ISA_DMA_THRESHOLD (0x00ffffffUL) 6#define ISA_DMA_THRESHOLD (0x00ffffffUL)
27 7
diff --git a/arch/mn10300/include/asm/scatterlist.h b/arch/mn10300/include/asm/scatterlist.h
index 67535901b9ff..7bd00b9e030d 100644
--- a/arch/mn10300/include/asm/scatterlist.h
+++ b/arch/mn10300/include/asm/scatterlist.h
@@ -11,45 +11,8 @@
11#ifndef _ASM_SCATTERLIST_H 11#ifndef _ASM_SCATTERLIST_H
12#define _ASM_SCATTERLIST_H 12#define _ASM_SCATTERLIST_H
13 13
14#include <asm/types.h> 14#include <asm-generic/scatterlist.h>
15
16/*
17 * Drivers must set either ->address or (preferred) page and ->offset
18 * to indicate where data must be transferred to/from.
19 *
20 * Using page is recommended since it handles highmem data as well as
21 * low mem. ->address is restricted to data which has a virtual mapping, and
22 * it will go away in the future. Updating to page can be automated very
23 * easily -- something like
24 *
25 * sg->address = some_ptr;
26 *
27 * can be rewritten as
28 *
29 * sg_set_page(virt_to_page(some_ptr));
30 * sg->offset = (unsigned long) some_ptr & ~PAGE_MASK;
31 *
32 * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens
33 */
34struct scatterlist {
35#ifdef CONFIG_DEBUG_SG
36 unsigned long sg_magic;
37#endif
38 unsigned long page_link;
39 unsigned int offset; /* for highmem, page offset */
40 dma_addr_t dma_address;
41 unsigned int length;
42};
43 15
44#define ISA_DMA_THRESHOLD (0x00ffffff) 16#define ISA_DMA_THRESHOLD (0x00ffffff)
45 17
46/*
47 * These macros should be used after a pci_map_sg call has been done
48 * to get bus addresses of each of the SG entries and their lengths.
49 * You should only work with the number of sg entries pci_map_sg
50 * returns.
51 */
52#define sg_dma_address(sg) ((sg)->dma_address)
53#define sg_dma_len(sg) ((sg)->length)
54
55#endif /* _ASM_SCATTERLIST_H */ 18#endif /* _ASM_SCATTERLIST_H */
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 9c4da3d63bfb..05a366a5c4d5 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -98,6 +98,9 @@ config STACKTRACE_SUPPORT
98config NEED_DMA_MAP_STATE 98config NEED_DMA_MAP_STATE
99 def_bool y 99 def_bool y
100 100
101config NEED_SG_DMA_LENGTH
102 def_bool y
103
101config ISA_DMA_API 104config ISA_DMA_API
102 bool 105 bool
103 106
diff --git a/arch/parisc/include/asm/scatterlist.h b/arch/parisc/include/asm/scatterlist.h
index 62269b31ebf4..2c3b79b54b28 100644
--- a/arch/parisc/include/asm/scatterlist.h
+++ b/arch/parisc/include/asm/scatterlist.h
@@ -3,25 +3,9 @@
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5#include <asm/types.h> 5#include <asm/types.h>
6 6#include <asm-generic/scatterlist.h>
7struct scatterlist {
8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
12 unsigned int offset;
13
14 unsigned int length;
15
16 /* an IOVA can be 64-bits on some PA-Risc platforms. */
17 dma_addr_t iova; /* I/O Virtual Address */
18 __u32 iova_length; /* bytes mapped */
19};
20
21#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
22#define sg_dma_address(sg) ((sg)->iova)
23#define sg_dma_len(sg) ((sg)->iova_length)
24 7
25#define ISA_DMA_THRESHOLD (~0UL) 8#define ISA_DMA_THRESHOLD (~0UL)
9#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
26 10
27#endif /* _ASM_PARISC_SCATTERLIST_H */ 11#endif /* _ASM_PARISC_SCATTERLIST_H */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c4c4549c22bb..66a315e06dce 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -663,6 +663,9 @@ config ZONE_DMA
663config NEED_DMA_MAP_STATE 663config NEED_DMA_MAP_STATE
664 def_bool (PPC64 || NOT_COHERENT_CACHE) 664 def_bool (PPC64 || NOT_COHERENT_CACHE)
665 665
666config NEED_SG_DMA_LENGTH
667 def_bool y
668
666config GENERIC_ISA_DMA 669config GENERIC_ISA_DMA
667 bool 670 bool
668 depends on PPC64 || POWER4 || 6xx && !CPM2 671 depends on PPC64 || POWER4 || 6xx && !CPM2
diff --git a/arch/powerpc/include/asm/scatterlist.h b/arch/powerpc/include/asm/scatterlist.h
index 912bf597870f..34cc78fd0ef4 100644
--- a/arch/powerpc/include/asm/scatterlist.h
+++ b/arch/powerpc/include/asm/scatterlist.h
@@ -9,38 +9,12 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#ifdef __KERNEL__
13#include <linux/types.h>
14#include <asm/dma.h> 12#include <asm/dma.h>
15 13#include <asm-generic/scatterlist.h>
16struct scatterlist {
17#ifdef CONFIG_DEBUG_SG
18 unsigned long sg_magic;
19#endif
20 unsigned long page_link;
21 unsigned int offset;
22 unsigned int length;
23
24 /* For TCE or SWIOTLB support */
25 dma_addr_t dma_address;
26 u32 dma_length;
27};
28
29/*
30 * These macros should be used after a dma_map_sg call has been done
31 * to get bus addresses of each of the SG entries and their lengths.
32 * You should only work with the number of sg entries pci_map_sg
33 * returns, or alternatively stop on the first sg_dma_len(sg) which
34 * is 0.
35 */
36#define sg_dma_address(sg) ((sg)->dma_address)
37#define sg_dma_len(sg) ((sg)->dma_length)
38 14
39#ifdef __powerpc64__ 15#ifdef __powerpc64__
40#define ISA_DMA_THRESHOLD (~0UL) 16#define ISA_DMA_THRESHOLD (~0UL)
41#endif 17#endif
42
43#define ARCH_HAS_SG_CHAIN 18#define ARCH_HAS_SG_CHAIN
44 19
45#endif /* __KERNEL__ */
46#endif /* _ASM_POWERPC_SCATTERLIST_H */ 20#endif /* _ASM_POWERPC_SCATTERLIST_H */
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
index 8b8fab91ad1e..3a7a67a0d006 100644
--- a/arch/powerpc/include/asm/sfp-machine.h
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -353,6 +353,12 @@
353#define abort() \ 353#define abort() \
354 return 0 354 return 0
355 355
356#ifdef __BIG_ENDIAN
357#define __BYTE_ORDER __BIG_ENDIAN
358#else
359#define __BYTE_ORDER __LITTLE_ENDIAN
360#endif
361
356/* Exception flags. */ 362/* Exception flags. */
357#define EFLAG_INVALID (1 << (31 - 2)) 363#define EFLAG_INVALID (1 << (31 - 2))
358#define EFLAG_OVERFLOW (1 << (31 - 3)) 364#define EFLAG_OVERFLOW (1 << (31 - 3))
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 4ff4da2c238b..e7fe218b8697 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -39,8 +39,8 @@ struct dma_map_ops swiotlb_dma_ops = {
39 .dma_supported = swiotlb_dma_supported, 39 .dma_supported = swiotlb_dma_supported,
40 .map_page = swiotlb_map_page, 40 .map_page = swiotlb_map_page,
41 .unmap_page = swiotlb_unmap_page, 41 .unmap_page = swiotlb_unmap_page,
42 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, 42 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
43 .sync_single_range_for_device = swiotlb_sync_single_range_for_device, 43 .sync_single_for_device = swiotlb_sync_single_for_device,
44 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 44 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
45 .sync_sg_for_device = swiotlb_sync_sg_for_device, 45 .sync_sg_for_device = swiotlb_sync_sg_for_device,
46 .mapping_error = swiotlb_dma_mapping_error, 46 .mapping_error = swiotlb_dma_mapping_error,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 6c1df5757cd6..8d1de6f31d5a 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -127,11 +127,11 @@ static inline void dma_direct_sync_sg(struct device *dev,
127 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 127 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
128} 128}
129 129
130static inline void dma_direct_sync_single_range(struct device *dev, 130static inline void dma_direct_sync_single(struct device *dev,
131 dma_addr_t dma_handle, unsigned long offset, size_t size, 131 dma_addr_t dma_handle, size_t size,
132 enum dma_data_direction direction) 132 enum dma_data_direction direction)
133{ 133{
134 __dma_sync(bus_to_virt(dma_handle+offset), size, direction); 134 __dma_sync(bus_to_virt(dma_handle), size, direction);
135} 135}
136#endif 136#endif
137 137
@@ -144,8 +144,8 @@ struct dma_map_ops dma_direct_ops = {
144 .map_page = dma_direct_map_page, 144 .map_page = dma_direct_map_page,
145 .unmap_page = dma_direct_unmap_page, 145 .unmap_page = dma_direct_unmap_page,
146#ifdef CONFIG_NOT_COHERENT_CACHE 146#ifdef CONFIG_NOT_COHERENT_CACHE
147 .sync_single_range_for_cpu = dma_direct_sync_single_range, 147 .sync_single_for_cpu = dma_direct_sync_single,
148 .sync_single_range_for_device = dma_direct_sync_single_range, 148 .sync_single_for_device = dma_direct_sync_single,
149 .sync_sg_for_cpu = dma_direct_sync_sg, 149 .sync_sg_for_cpu = dma_direct_sync_sg,
150 .sync_sg_for_device = dma_direct_sync_sg, 150 .sync_sg_for_device = dma_direct_sync_sg,
151#endif 151#endif
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 6a1fde0d22b0..cd37e49e7034 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -1,6 +1,15 @@
1/* 1/*
2 * Freescale MPC85xx/MPC86xx RapidIO support 2 * Freescale MPC85xx/MPC86xx RapidIO support
3 * 3 *
4 * Copyright 2009 Sysgo AG
5 * Thomas Moll <thomas.moll@sysgo.com>
6 * - fixed maintenance access routines, check for aligned access
7 *
8 * Copyright 2009 Integrated Device Technology, Inc.
9 * Alex Bounine <alexandre.bounine@idt.com>
10 * - Added Port-Write message handling
11 * - Added Machine Check exception handling
12 *
4 * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc. 13 * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc.
5 * Zhang Wei <wei.zhang@freescale.com> 14 * Zhang Wei <wei.zhang@freescale.com>
6 * 15 *
@@ -24,19 +33,30 @@
24#include <linux/of_platform.h> 33#include <linux/of_platform.h>
25#include <linux/delay.h> 34#include <linux/delay.h>
26#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/kfifo.h>
27 37
28#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/machdep.h>
40#include <asm/uaccess.h>
41
42#undef DEBUG_PW /* Port-Write debugging */
29 43
30/* RapidIO definition irq, which read from OF-tree */ 44/* RapidIO definition irq, which read from OF-tree */
31#define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq) 45#define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq)
32#define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq) 46#define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq)
33#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) 47#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq)
48#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq)
34 49
35#define RIO_ATMU_REGS_OFFSET 0x10c00 50#define RIO_ATMU_REGS_OFFSET 0x10c00
36#define RIO_P_MSG_REGS_OFFSET 0x11000 51#define RIO_P_MSG_REGS_OFFSET 0x11000
37#define RIO_S_MSG_REGS_OFFSET 0x13000 52#define RIO_S_MSG_REGS_OFFSET 0x13000
38#define RIO_ESCSR 0x158 53#define RIO_ESCSR 0x158
39#define RIO_CCSR 0x15c 54#define RIO_CCSR 0x15c
55#define RIO_LTLEDCSR 0x0608
56#define RIO_LTLEDCSR_IER 0x80000000
57#define RIO_LTLEDCSR_PRT 0x01000000
58#define RIO_LTLEECSR 0x060c
59#define RIO_EPWISR 0x10010
40#define RIO_ISR_AACR 0x10120 60#define RIO_ISR_AACR 0x10120
41#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ 61#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */
42#define RIO_MAINT_WIN_SIZE 0x400000 62#define RIO_MAINT_WIN_SIZE 0x400000
@@ -55,6 +75,18 @@
55#define RIO_MSG_ISR_QFI 0x00000010 75#define RIO_MSG_ISR_QFI 0x00000010
56#define RIO_MSG_ISR_DIQI 0x00000001 76#define RIO_MSG_ISR_DIQI 0x00000001
57 77
78#define RIO_IPWMR_SEN 0x00100000
79#define RIO_IPWMR_QFIE 0x00000100
80#define RIO_IPWMR_EIE 0x00000020
81#define RIO_IPWMR_CQ 0x00000002
82#define RIO_IPWMR_PWE 0x00000001
83
84#define RIO_IPWSR_QF 0x00100000
85#define RIO_IPWSR_TE 0x00000080
86#define RIO_IPWSR_QFI 0x00000010
87#define RIO_IPWSR_PWD 0x00000008
88#define RIO_IPWSR_PWB 0x00000004
89
58#define RIO_MSG_DESC_SIZE 32 90#define RIO_MSG_DESC_SIZE 32
59#define RIO_MSG_BUFFER_SIZE 4096 91#define RIO_MSG_BUFFER_SIZE 4096
60#define RIO_MIN_TX_RING_SIZE 2 92#define RIO_MIN_TX_RING_SIZE 2
@@ -121,7 +153,7 @@ struct rio_msg_regs {
121 u32 pad10[26]; 153 u32 pad10[26];
122 u32 pwmr; 154 u32 pwmr;
123 u32 pwsr; 155 u32 pwsr;
124 u32 pad11; 156 u32 epwqbar;
125 u32 pwqbar; 157 u32 pwqbar;
126}; 158};
127 159
@@ -160,6 +192,14 @@ struct rio_msg_rx_ring {
160 void *dev_id; 192 void *dev_id;
161}; 193};
162 194
195struct rio_port_write_msg {
196 void *virt;
197 dma_addr_t phys;
198 u32 msg_count;
199 u32 err_count;
200 u32 discard_count;
201};
202
163struct rio_priv { 203struct rio_priv {
164 struct device *dev; 204 struct device *dev;
165 void __iomem *regs_win; 205 void __iomem *regs_win;
@@ -172,11 +212,64 @@ struct rio_priv {
172 struct rio_dbell_ring dbell_ring; 212 struct rio_dbell_ring dbell_ring;
173 struct rio_msg_tx_ring msg_tx_ring; 213 struct rio_msg_tx_ring msg_tx_ring;
174 struct rio_msg_rx_ring msg_rx_ring; 214 struct rio_msg_rx_ring msg_rx_ring;
215 struct rio_port_write_msg port_write_msg;
175 int bellirq; 216 int bellirq;
176 int txirq; 217 int txirq;
177 int rxirq; 218 int rxirq;
219 int pwirq;
220 struct work_struct pw_work;
221 struct kfifo pw_fifo;
222 spinlock_t pw_fifo_lock;
178}; 223};
179 224
225#define __fsl_read_rio_config(x, addr, err, op) \
226 __asm__ __volatile__( \
227 "1: "op" %1,0(%2)\n" \
228 " eieio\n" \
229 "2:\n" \
230 ".section .fixup,\"ax\"\n" \
231 "3: li %1,-1\n" \
232 " li %0,%3\n" \
233 " b 2b\n" \
234 ".section __ex_table,\"a\"\n" \
235 " .align 2\n" \
236 " .long 1b,3b\n" \
237 ".text" \
238 : "=r" (err), "=r" (x) \
239 : "b" (addr), "i" (-EFAULT), "0" (err))
240
241static void __iomem *rio_regs_win;
242
243static int (*saved_mcheck_exception)(struct pt_regs *regs);
244
245static int fsl_rio_mcheck_exception(struct pt_regs *regs)
246{
247 const struct exception_table_entry *entry = NULL;
248 unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK);
249
250 if (reason & MCSR_BUS_RBERR) {
251 reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
252 if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
253 /* Check if we are prepared to handle this fault */
254 entry = search_exception_tables(regs->nip);
255 if (entry) {
256 pr_debug("RIO: %s - MC Exception handled\n",
257 __func__);
258 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
259 0);
260 regs->msr |= MSR_RI;
261 regs->nip = entry->fixup;
262 return 1;
263 }
264 }
265 }
266
267 if (saved_mcheck_exception)
268 return saved_mcheck_exception(regs);
269 else
270 return cur_cpu_spec->machine_check(regs);
271}
272
180/** 273/**
181 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message 274 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
182 * @mport: RapidIO master port info 275 * @mport: RapidIO master port info
@@ -277,27 +370,44 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
277{ 370{
278 struct rio_priv *priv = mport->priv; 371 struct rio_priv *priv = mport->priv;
279 u8 *data; 372 u8 *data;
373 u32 rval, err = 0;
280 374
281 pr_debug 375 pr_debug
282 ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n", 376 ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n",
283 index, destid, hopcount, offset, len); 377 index, destid, hopcount, offset, len);
378
379 /* 16MB maintenance window possible */
380 /* allow only aligned access to maintenance registers */
381 if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
382 return -EINVAL;
383
284 out_be32(&priv->maint_atmu_regs->rowtar, 384 out_be32(&priv->maint_atmu_regs->rowtar,
285 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9)); 385 (destid << 22) | (hopcount << 12) | (offset >> 12));
386 out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
286 387
287 data = (u8 *) priv->maint_win + offset; 388 data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
288 switch (len) { 389 switch (len) {
289 case 1: 390 case 1:
290 *val = in_8((u8 *) data); 391 __fsl_read_rio_config(rval, data, err, "lbz");
291 break; 392 break;
292 case 2: 393 case 2:
293 *val = in_be16((u16 *) data); 394 __fsl_read_rio_config(rval, data, err, "lhz");
294 break; 395 break;
295 default: 396 case 4:
296 *val = in_be32((u32 *) data); 397 __fsl_read_rio_config(rval, data, err, "lwz");
297 break; 398 break;
399 default:
400 return -EINVAL;
298 } 401 }
299 402
300 return 0; 403 if (err) {
404 pr_debug("RIO: cfg_read error %d for %x:%x:%x\n",
405 err, destid, hopcount, offset);
406 }
407
408 *val = rval;
409
410 return err;
301} 411}
302 412
303/** 413/**
@@ -322,10 +432,17 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
322 pr_debug 432 pr_debug
323 ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", 433 ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
324 index, destid, hopcount, offset, len, val); 434 index, destid, hopcount, offset, len, val);
435
436 /* 16MB maintenance windows possible */
437 /* allow only aligned access to maintenance registers */
438 if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
439 return -EINVAL;
440
325 out_be32(&priv->maint_atmu_regs->rowtar, 441 out_be32(&priv->maint_atmu_regs->rowtar,
326 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9)); 442 (destid << 22) | (hopcount << 12) | (offset >> 12));
443 out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
327 444
328 data = (u8 *) priv->maint_win + offset; 445 data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
329 switch (len) { 446 switch (len) {
330 case 1: 447 case 1:
331 out_8((u8 *) data, val); 448 out_8((u8 *) data, val);
@@ -333,9 +450,11 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
333 case 2: 450 case 2:
334 out_be16((u16 *) data, val); 451 out_be16((u16 *) data, val);
335 break; 452 break;
336 default: 453 case 4:
337 out_be32((u32 *) data, val); 454 out_be32((u32 *) data, val);
338 break; 455 break;
456 default:
457 return -EINVAL;
339 } 458 }
340 459
341 return 0; 460 return 0;
@@ -930,6 +1049,223 @@ static int fsl_rio_doorbell_init(struct rio_mport *mport)
930 return rc; 1049 return rc;
931} 1050}
932 1051
1052/**
1053 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
1054 * @irq: Linux interrupt number
1055 * @dev_instance: Pointer to interrupt-specific data
1056 *
1057 * Handles port write interrupts. Parses a list of registered
1058 * port write event handlers and executes a matching event handler.
1059 */
1060static irqreturn_t
1061fsl_rio_port_write_handler(int irq, void *dev_instance)
1062{
1063 u32 ipwmr, ipwsr;
1064 struct rio_mport *port = (struct rio_mport *)dev_instance;
1065 struct rio_priv *priv = port->priv;
1066 u32 epwisr, tmp;
1067
1068 ipwmr = in_be32(&priv->msg_regs->pwmr);
1069 ipwsr = in_be32(&priv->msg_regs->pwsr);
1070
1071 epwisr = in_be32(priv->regs_win + RIO_EPWISR);
1072 if (epwisr & 0x80000000) {
1073 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1074 pr_info("RIO_LTLEDCSR = 0x%x\n", tmp);
1075 out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
1076 }
1077
1078 if (!(epwisr & 0x00000001))
1079 return IRQ_HANDLED;
1080
1081#ifdef DEBUG_PW
1082 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
1083 if (ipwsr & RIO_IPWSR_QF)
1084 pr_debug(" QF");
1085 if (ipwsr & RIO_IPWSR_TE)
1086 pr_debug(" TE");
1087 if (ipwsr & RIO_IPWSR_QFI)
1088 pr_debug(" QFI");
1089 if (ipwsr & RIO_IPWSR_PWD)
1090 pr_debug(" PWD");
1091 if (ipwsr & RIO_IPWSR_PWB)
1092 pr_debug(" PWB");
1093 pr_debug(" )\n");
1094#endif
1095 out_be32(&priv->msg_regs->pwsr,
1096 ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
1097
1098 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
1099 priv->port_write_msg.err_count++;
1100 pr_info("RIO: Port-Write Transaction Err (%d)\n",
1101 priv->port_write_msg.err_count);
1102 }
1103 if (ipwsr & RIO_IPWSR_PWD) {
1104 priv->port_write_msg.discard_count++;
1105 pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
1106 priv->port_write_msg.discard_count);
1107 }
1108
1109 /* Schedule deferred processing if PW was received */
1110 if (ipwsr & RIO_IPWSR_QFI) {
1111 /* Save PW message (if there is room in FIFO),
1112 * otherwise discard it.
1113 */
1114 if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) {
1115 priv->port_write_msg.msg_count++;
1116 kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt,
1117 RIO_PW_MSG_SIZE);
1118 } else {
1119 priv->port_write_msg.discard_count++;
1120 pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
1121 priv->port_write_msg.discard_count);
1122 }
1123 schedule_work(&priv->pw_work);
1124 }
1125
1126 /* Issue Clear Queue command. This allows another
1127 * port-write to be received.
1128 */
1129 out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
1130
1131 return IRQ_HANDLED;
1132}
1133
1134static void fsl_pw_dpc(struct work_struct *work)
1135{
1136 struct rio_priv *priv = container_of(work, struct rio_priv, pw_work);
1137 unsigned long flags;
1138 u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)];
1139
1140 /*
1141 * Process port-write messages
1142 */
1143 spin_lock_irqsave(&priv->pw_fifo_lock, flags);
1144 while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer,
1145 RIO_PW_MSG_SIZE)) {
1146 /* Process one message */
1147 spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
1148#ifdef DEBUG_PW
1149 {
1150 u32 i;
1151 pr_debug("%s : Port-Write Message:", __func__);
1152 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
1153 if ((i%4) == 0)
1154 pr_debug("\n0x%02x: 0x%08x", i*4,
1155 msg_buffer[i]);
1156 else
1157 pr_debug(" 0x%08x", msg_buffer[i]);
1158 }
1159 pr_debug("\n");
1160 }
1161#endif
1162 /* Pass the port-write message to RIO core for processing */
1163 rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
1164 spin_lock_irqsave(&priv->pw_fifo_lock, flags);
1165 }
1166 spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
1167}
1168
1169/**
1170 * fsl_rio_pw_enable - enable/disable port-write interface init
1171 * @mport: Master port implementing the port write unit
1172 * @enable: 1=enable; 0=disable port-write message handling
1173 */
1174static int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
1175{
1176 struct rio_priv *priv = mport->priv;
1177 u32 rval;
1178
1179 rval = in_be32(&priv->msg_regs->pwmr);
1180
1181 if (enable)
1182 rval |= RIO_IPWMR_PWE;
1183 else
1184 rval &= ~RIO_IPWMR_PWE;
1185
1186 out_be32(&priv->msg_regs->pwmr, rval);
1187
1188 return 0;
1189}
1190
1191/**
1192 * fsl_rio_port_write_init - MPC85xx port write interface init
1193 * @mport: Master port implementing the port write unit
1194 *
1195 * Initializes port write unit hardware and DMA buffer
1196 * ring. Called from fsl_rio_setup(). Returns %0 on success
1197 * or %-ENOMEM on failure.
1198 */
1199static int fsl_rio_port_write_init(struct rio_mport *mport)
1200{
1201 struct rio_priv *priv = mport->priv;
1202 int rc = 0;
1203
1204 /* Following configurations require a disabled port write controller */
1205 out_be32(&priv->msg_regs->pwmr,
1206 in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE);
1207
1208 /* Initialize port write */
1209 priv->port_write_msg.virt = dma_alloc_coherent(priv->dev,
1210 RIO_PW_MSG_SIZE,
1211 &priv->port_write_msg.phys, GFP_KERNEL);
1212 if (!priv->port_write_msg.virt) {
1213 pr_err("RIO: unable allocate port write queue\n");
1214 return -ENOMEM;
1215 }
1216
1217 priv->port_write_msg.err_count = 0;
1218 priv->port_write_msg.discard_count = 0;
1219
1220 /* Point dequeue/enqueue pointers at first entry */
1221 out_be32(&priv->msg_regs->epwqbar, 0);
1222 out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys);
1223
1224 pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
1225 in_be32(&priv->msg_regs->epwqbar),
1226 in_be32(&priv->msg_regs->pwqbar));
1227
1228 /* Clear interrupt status IPWSR */
1229 out_be32(&priv->msg_regs->pwsr,
1230 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
1231
1232 /* Configure port write contoller for snooping enable all reporting,
1233 clear queue full */
1234 out_be32(&priv->msg_regs->pwmr,
1235 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
1236
1237
1238 /* Hook up port-write handler */
1239 rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0,
1240 "port-write", (void *)mport);
1241 if (rc < 0) {
1242 pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
1243 goto err_out;
1244 }
1245
1246 INIT_WORK(&priv->pw_work, fsl_pw_dpc);
1247 spin_lock_init(&priv->pw_fifo_lock);
1248 if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
1249 pr_err("FIFO allocation failed\n");
1250 rc = -ENOMEM;
1251 goto err_out_irq;
1252 }
1253
1254 pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
1255 in_be32(&priv->msg_regs->pwmr),
1256 in_be32(&priv->msg_regs->pwsr));
1257
1258 return rc;
1259
1260err_out_irq:
1261 free_irq(IRQ_RIO_PW(mport), (void *)mport);
1262err_out:
1263 dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE,
1264 priv->port_write_msg.virt,
1265 priv->port_write_msg.phys);
1266 return rc;
1267}
1268
933static char *cmdline = NULL; 1269static char *cmdline = NULL;
934 1270
935static int fsl_rio_get_hdid(int index) 1271static int fsl_rio_get_hdid(int index)
@@ -1057,7 +1393,7 @@ int fsl_rio_setup(struct of_device *dev)
1057 dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n", 1393 dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n",
1058 law_start, law_size); 1394 law_start, law_size);
1059 1395
1060 ops = kmalloc(sizeof(struct rio_ops), GFP_KERNEL); 1396 ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL);
1061 if (!ops) { 1397 if (!ops) {
1062 rc = -ENOMEM; 1398 rc = -ENOMEM;
1063 goto err_ops; 1399 goto err_ops;
@@ -1067,6 +1403,7 @@ int fsl_rio_setup(struct of_device *dev)
1067 ops->cread = fsl_rio_config_read; 1403 ops->cread = fsl_rio_config_read;
1068 ops->cwrite = fsl_rio_config_write; 1404 ops->cwrite = fsl_rio_config_write;
1069 ops->dsend = fsl_rio_doorbell_send; 1405 ops->dsend = fsl_rio_doorbell_send;
1406 ops->pwenable = fsl_rio_pw_enable;
1070 1407
1071 port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); 1408 port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
1072 if (!port) { 1409 if (!port) {
@@ -1089,11 +1426,12 @@ int fsl_rio_setup(struct of_device *dev)
1089 port->iores.flags = IORESOURCE_MEM; 1426 port->iores.flags = IORESOURCE_MEM;
1090 port->iores.name = "rio_io_win"; 1427 port->iores.name = "rio_io_win";
1091 1428
1429 priv->pwirq = irq_of_parse_and_map(dev->node, 0);
1092 priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); 1430 priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2);
1093 priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); 1431 priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3);
1094 priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4); 1432 priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4);
1095 dev_info(&dev->dev, "bellirq: %d, txirq: %d, rxirq %d\n", priv->bellirq, 1433 dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n",
1096 priv->txirq, priv->rxirq); 1434 priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq);
1097 1435
1098 rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); 1436 rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
1099 rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0); 1437 rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
@@ -1109,6 +1447,7 @@ int fsl_rio_setup(struct of_device *dev)
1109 rio_register_mport(port); 1447 rio_register_mport(port);
1110 1448
1111 priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); 1449 priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
1450 rio_regs_win = priv->regs_win;
1112 1451
1113 /* Probe the master port phy type */ 1452 /* Probe the master port phy type */
1114 ccsr = in_be32(priv->regs_win + RIO_CCSR); 1453 ccsr = in_be32(priv->regs_win + RIO_CCSR);
@@ -1166,7 +1505,8 @@ int fsl_rio_setup(struct of_device *dev)
1166 1505
1167 /* Configure maintenance transaction window */ 1506 /* Configure maintenance transaction window */
1168 out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12); 1507 out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12);
1169 out_be32(&priv->maint_atmu_regs->rowar, 0x80077015); /* 4M */ 1508 out_be32(&priv->maint_atmu_regs->rowar,
1509 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1));
1170 1510
1171 priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE); 1511 priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE);
1172 1512
@@ -1175,6 +1515,12 @@ int fsl_rio_setup(struct of_device *dev)
1175 (law_start + RIO_MAINT_WIN_SIZE) >> 12); 1515 (law_start + RIO_MAINT_WIN_SIZE) >> 12);
1176 out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */ 1516 out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */
1177 fsl_rio_doorbell_init(port); 1517 fsl_rio_doorbell_init(port);
1518 fsl_rio_port_write_init(port);
1519
1520 saved_mcheck_exception = ppc_md.machine_check_exception;
1521 ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
1522 /* Ensure that RFXE is set */
1523 mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
1178 1524
1179 return 0; 1525 return 0;
1180err: 1526err:
diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h
index 35d786fe93ae..be44d94cba54 100644
--- a/arch/s390/include/asm/scatterlist.h
+++ b/arch/s390/include/asm/scatterlist.h
@@ -1 +1,3 @@
1#define ISA_DMA_THRESHOLD (~0UL)
2
1#include <asm-generic/scatterlist.h> 3#include <asm-generic/scatterlist.h>
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h
index 7d43fee17e32..0addc6466d95 100644
--- a/arch/s390/include/asm/sfp-util.h
+++ b/arch/s390/include/asm/sfp-util.h
@@ -73,3 +73,5 @@ extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int,
73#define UDIV_NEEDS_NORMALIZATION 0 73#define UDIV_NEEDS_NORMALIZATION 0
74 74
75#define abort() return 0 75#define abort() return 0
76
77#define __BYTE_ORDER __BIG_ENDIAN
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index e4d98de83dd8..541053ed234e 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -944,21 +944,21 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
944 struct cpu *c = &per_cpu(cpu_devices, cpu); 944 struct cpu *c = &per_cpu(cpu_devices, cpu);
945 struct sys_device *s = &c->sysdev; 945 struct sys_device *s = &c->sysdev;
946 struct s390_idle_data *idle; 946 struct s390_idle_data *idle;
947 int err = 0;
947 948
948 switch (action) { 949 switch (action) {
949 case CPU_ONLINE: 950 case CPU_ONLINE:
950 case CPU_ONLINE_FROZEN: 951 case CPU_ONLINE_FROZEN:
951 idle = &per_cpu(s390_idle, cpu); 952 idle = &per_cpu(s390_idle, cpu);
952 memset(idle, 0, sizeof(struct s390_idle_data)); 953 memset(idle, 0, sizeof(struct s390_idle_data));
953 if (sysfs_create_group(&s->kobj, &cpu_online_attr_group)) 954 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
954 return NOTIFY_BAD;
955 break; 955 break;
956 case CPU_DEAD: 956 case CPU_DEAD:
957 case CPU_DEAD_FROZEN: 957 case CPU_DEAD_FROZEN:
958 sysfs_remove_group(&s->kobj, &cpu_online_attr_group); 958 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
959 break; 959 break;
960 } 960 }
961 return NOTIFY_OK; 961 return notifier_from_errno(err);
962} 962}
963 963
964static struct notifier_block __cpuinitdata smp_cpu_nb = { 964static struct notifier_block __cpuinitdata smp_cpu_nb = {
diff --git a/arch/score/include/asm/scatterlist.h b/arch/score/include/asm/scatterlist.h
index 9f533b8362c7..4fa1a6658215 100644
--- a/arch/score/include/asm/scatterlist.h
+++ b/arch/score/include/asm/scatterlist.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_SCORE_SCATTERLIST_H 1#ifndef _ASM_SCORE_SCATTERLIST_H
2#define _ASM_SCORE_SCATTERLIST_H 2#define _ASM_SCORE_SCATTERLIST_H
3 3
4#define ISA_DMA_THRESHOLD (~0UL)
5
4#include <asm-generic/scatterlist.h> 6#include <asm-generic/scatterlist.h>
5 7
6#endif /* _ASM_SCORE_SCATTERLIST_H */ 8#endif /* _ASM_SCORE_SCATTERLIST_H */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 0e318c905eea..c5ee4ce60b57 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -186,6 +186,9 @@ config DMA_NONCOHERENT
186config NEED_DMA_MAP_STATE 186config NEED_DMA_MAP_STATE
187 def_bool DMA_NONCOHERENT 187 def_bool DMA_NONCOHERENT
188 188
189config NEED_SG_DMA_LENGTH
190 def_bool y
191
189source "init/Kconfig" 192source "init/Kconfig"
190 193
191source "kernel/Kconfig.freezer" 194source "kernel/Kconfig.freezer"
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index d4104ce9fe53..6c4bbba2a675 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -436,29 +436,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
436 0, sizeof(struct pt_dspregs), 436 0, sizeof(struct pt_dspregs),
437 (const void __user *)data); 437 (const void __user *)data);
438#endif 438#endif
439#ifdef CONFIG_BINFMT_ELF_FDPIC
440 case PTRACE_GETFDPIC: {
441 unsigned long tmp = 0;
442
443 switch (addr) {
444 case PTRACE_GETFDPIC_EXEC:
445 tmp = child->mm->context.exec_fdpic_loadmap;
446 break;
447 case PTRACE_GETFDPIC_INTERP:
448 tmp = child->mm->context.interp_fdpic_loadmap;
449 break;
450 default:
451 break;
452 }
453
454 ret = 0;
455 if (put_user(tmp, datap)) {
456 ret = -EFAULT;
457 break;
458 }
459 break;
460 }
461#endif
462 default: 439 default:
463 ret = ptrace_request(child, request, addr, data); 440 ret = ptrace_request(child, request, addr, data);
464 break; 441 break;
diff --git a/arch/sh/math-emu/sfp-util.h b/arch/sh/math-emu/sfp-util.h
index e8526021892f..8ae1bd310ad0 100644
--- a/arch/sh/math-emu/sfp-util.h
+++ b/arch/sh/math-emu/sfp-util.h
@@ -66,3 +66,7 @@
66 } while (0) 66 } while (0)
67 67
68#define abort() return 0 68#define abort() return 0
69
70#define __BYTE_ORDER __LITTLE_ENDIAN
71
72
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index d6781ce687e2..6f1470baa314 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -133,6 +133,9 @@ config ZONE_DMA
133config NEED_DMA_MAP_STATE 133config NEED_DMA_MAP_STATE
134 def_bool y 134 def_bool y
135 135
136config NEED_SG_DMA_LENGTH
137 def_bool y
138
136config GENERIC_ISA_DMA 139config GENERIC_ISA_DMA
137 bool 140 bool
138 default y if SPARC32 141 default y if SPARC32
diff --git a/arch/sparc/include/asm/scatterlist.h b/arch/sparc/include/asm/scatterlist.h
index d1120257b033..433e45f05fd4 100644
--- a/arch/sparc/include/asm/scatterlist.h
+++ b/arch/sparc/include/asm/scatterlist.h
@@ -1,8 +1,9 @@
1#ifndef _SPARC_SCATTERLIST_H 1#ifndef _SPARC_SCATTERLIST_H
2#define _SPARC_SCATTERLIST_H 2#define _SPARC_SCATTERLIST_H
3 3
4#define sg_dma_len(sg) ((sg)->dma_length)
5
6#include <asm-generic/scatterlist.h> 4#include <asm-generic/scatterlist.h>
7 5
6#define ISA_DMA_THRESHOLD (~0UL)
7#define ARCH_HAS_SG_CHAIN
8
8#endif /* !(_SPARC_SCATTERLIST_H) */ 9#endif /* !(_SPARC_SCATTERLIST_H) */
diff --git a/arch/sparc/math-emu/sfp-util_32.h b/arch/sparc/math-emu/sfp-util_32.h
index 0ea35afbb914..d1b2aff3c259 100644
--- a/arch/sparc/math-emu/sfp-util_32.h
+++ b/arch/sparc/math-emu/sfp-util_32.h
@@ -107,3 +107,9 @@
107 107
108#define abort() \ 108#define abort() \
109 return 0 109 return 0
110
111#ifdef __BIG_ENDIAN
112#define __BYTE_ORDER __BIG_ENDIAN
113#else
114#define __BYTE_ORDER __LITTLE_ENDIAN
115#endif
diff --git a/arch/sparc/math-emu/sfp-util_64.h b/arch/sparc/math-emu/sfp-util_64.h
index d17c9bc72181..425d3cf01af4 100644
--- a/arch/sparc/math-emu/sfp-util_64.h
+++ b/arch/sparc/math-emu/sfp-util_64.h
@@ -112,3 +112,9 @@
112 112
113#define abort() \ 113#define abort() \
114 return 0 114 return 0
115
116#ifdef __BIG_ENDIAN
117#define __BYTE_ORDER __BIG_ENDIAN
118#else
119#define __BYTE_ORDER __LITTLE_ENDIAN
120#endif
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e0c619c55b4e..dcb0593b4a66 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -109,6 +109,9 @@ config SBUS
109config NEED_DMA_MAP_STATE 109config NEED_DMA_MAP_STATE
110 def_bool (X86_64 || DMAR || DMA_API_DEBUG) 110 def_bool (X86_64 || DMAR || DMA_API_DEBUG)
111 111
112config NEED_SG_DMA_LENGTH
113 def_bool y
114
112config GENERIC_ISA_DMA 115config GENERIC_ISA_DMA
113 def_bool y 116 def_bool y
114 117
@@ -1703,6 +1706,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
1703 def_bool X86_64 1706 def_bool X86_64
1704 depends on NUMA 1707 depends on NUMA
1705 1708
1709config USE_PERCPU_NUMA_NODE_ID
1710 def_bool X86_64
1711 depends on NUMA
1712
1706menu "Power management and ACPI options" 1713menu "Power management and ACPI options"
1707 1714
1708config ARCH_HIBERNATION_HEADER 1715config ARCH_HIBERNATION_HEADER
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
index 7b1aaa20c7b5..89bbf4e4d05d 100644
--- a/arch/x86/boot/compressed/relocs.c
+++ b/arch/x86/boot/compressed/relocs.c
@@ -195,11 +195,11 @@ static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym)
195 195
196 196
197 197
198#if __BYTE_ORDER == __LITTLE_ENDIAN 198#if BYTE_ORDER == LITTLE_ENDIAN
199#define le16_to_cpu(val) (val) 199#define le16_to_cpu(val) (val)
200#define le32_to_cpu(val) (val) 200#define le32_to_cpu(val) (val)
201#endif 201#endif
202#if __BYTE_ORDER == __BIG_ENDIAN 202#if BYTE_ORDER == BIG_ENDIAN
203#define le16_to_cpu(val) bswap_16(val) 203#define le16_to_cpu(val) bswap_16(val)
204#define le32_to_cpu(val) bswap_32(val) 204#define le32_to_cpu(val) bswap_32(val)
205#endif 205#endif
diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h
index 75af592677ec..fb0b1874396f 100644
--- a/arch/x86/include/asm/scatterlist.h
+++ b/arch/x86/include/asm/scatterlist.h
@@ -1,8 +1,9 @@
1#ifndef _ASM_X86_SCATTERLIST_H 1#ifndef _ASM_X86_SCATTERLIST_H
2#define _ASM_X86_SCATTERLIST_H 2#define _ASM_X86_SCATTERLIST_H
3 3
4#define ISA_DMA_THRESHOLD (0x00ffffff)
5
6#include <asm-generic/scatterlist.h> 4#include <asm-generic/scatterlist.h>
7 5
6#define ISA_DMA_THRESHOLD (0x00ffffff)
7#define ARCH_HAS_SG_CHAIN
8
8#endif /* _ASM_X86_SCATTERLIST_H */ 9#endif /* _ASM_X86_SCATTERLIST_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index c5087d796587..21899cc31e52 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -53,33 +53,29 @@
53extern int cpu_to_node_map[]; 53extern int cpu_to_node_map[];
54 54
55/* Returns the number of the node containing CPU 'cpu' */ 55/* Returns the number of the node containing CPU 'cpu' */
56static inline int cpu_to_node(int cpu) 56static inline int __cpu_to_node(int cpu)
57{ 57{
58 return cpu_to_node_map[cpu]; 58 return cpu_to_node_map[cpu];
59} 59}
60#define early_cpu_to_node(cpu) cpu_to_node(cpu) 60#define early_cpu_to_node __cpu_to_node
61#define cpu_to_node __cpu_to_node
61 62
62#else /* CONFIG_X86_64 */ 63#else /* CONFIG_X86_64 */
63 64
64/* Mappings between logical cpu number and node number */ 65/* Mappings between logical cpu number and node number */
65DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); 66DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
66 67
67/* Returns the number of the current Node. */
68DECLARE_PER_CPU(int, node_number);
69#define numa_node_id() percpu_read(node_number)
70
71#ifdef CONFIG_DEBUG_PER_CPU_MAPS 68#ifdef CONFIG_DEBUG_PER_CPU_MAPS
72extern int cpu_to_node(int cpu); 69/*
70 * override generic percpu implementation of cpu_to_node
71 */
72extern int __cpu_to_node(int cpu);
73#define cpu_to_node __cpu_to_node
74
73extern int early_cpu_to_node(int cpu); 75extern int early_cpu_to_node(int cpu);
74 76
75#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 77#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
76 78
77/* Returns the number of the node containing CPU 'cpu' */
78static inline int cpu_to_node(int cpu)
79{
80 return per_cpu(x86_cpu_to_node_map, cpu);
81}
82
83/* Same function but used if called before per_cpu areas are setup */ 79/* Same function but used if called before per_cpu areas are setup */
84static inline int early_cpu_to_node(int cpu) 80static inline int early_cpu_to_node(int cpu)
85{ 81{
@@ -170,6 +166,10 @@ static inline int numa_node_id(void)
170{ 166{
171 return 0; 167 return 0;
172} 168}
169/*
170 * indicate override:
171 */
172#define numa_node_id numa_node_id
173 173
174static inline int early_cpu_to_node(int cpu) 174static inline int early_cpu_to_node(int cpu)
175{ 175{
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index cc83a002786e..68e4a6f2211e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1121,9 +1121,9 @@ void __cpuinit cpu_init(void)
1121 oist = &per_cpu(orig_ist, cpu); 1121 oist = &per_cpu(orig_ist, cpu);
1122 1122
1123#ifdef CONFIG_NUMA 1123#ifdef CONFIG_NUMA
1124 if (cpu != 0 && percpu_read(node_number) == 0 && 1124 if (cpu != 0 && percpu_read(numa_node) == 0 &&
1125 cpu_to_node(cpu) != NUMA_NO_NODE) 1125 early_cpu_to_node(cpu) != NUMA_NO_NODE)
1126 percpu_write(node_number, cpu_to_node(cpu)); 1126 set_numa_node(early_cpu_to_node(cpu));
1127#endif 1127#endif
1128 1128
1129 me = current; 1129 me = current;
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 81c499eceb21..e1a0a3bf9716 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -190,7 +190,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
190 mutex_unlock(&therm_cpu_lock); 190 mutex_unlock(&therm_cpu_lock);
191 break; 191 break;
192 } 192 }
193 return err ? NOTIFY_BAD : NOTIFY_OK; 193 return notifier_from_errno(err);
194} 194}
195 195
196static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = 196static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 8b862d5900fe..1b7b31ab7d86 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -170,7 +170,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
170 cpuid_device_destroy(cpu); 170 cpuid_device_destroy(cpu);
171 break; 171 break;
172 } 172 }
173 return err ? NOTIFY_BAD : NOTIFY_OK; 173 return notifier_from_errno(err);
174} 174}
175 175
176static struct notifier_block __refdata cpuid_class_cpu_notifier = 176static struct notifier_block __refdata cpuid_class_cpu_notifier =
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 4d4468e9f47c..7bf2dc4c8f70 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -230,7 +230,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
230 msr_device_destroy(cpu); 230 msr_device_destroy(cpu);
231 break; 231 break;
232 } 232 }
233 return err ? NOTIFY_BAD : NOTIFY_OK; 233 return notifier_from_errno(err);
234} 234}
235 235
236static struct notifier_block __refdata msr_class_cpu_notifier = { 236static struct notifier_block __refdata msr_class_cpu_notifier = {
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 7d2829dde20e..a5bc528d4328 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -31,8 +31,6 @@ static struct dma_map_ops swiotlb_dma_ops = {
31 .free_coherent = swiotlb_free_coherent, 31 .free_coherent = swiotlb_free_coherent,
32 .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 32 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
33 .sync_single_for_device = swiotlb_sync_single_for_device, 33 .sync_single_for_device = swiotlb_sync_single_for_device,
34 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
35 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
36 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 34 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
37 .sync_sg_for_device = swiotlb_sync_sg_for_device, 35 .sync_sg_for_device = swiotlb_sync_sg_for_device,
38 .map_sg = swiotlb_map_sg_attrs, 36 .map_sg = swiotlb_map_sg_attrs,
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index ef6370b00e70..a867940a6dfc 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -265,10 +265,10 @@ void __init setup_per_cpu_areas(void)
265 265
266#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) 266#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
267 /* 267 /*
268 * make sure boot cpu node_number is right, when boot cpu is on the 268 * make sure boot cpu numa_node is right, when boot cpu is on the
269 * node that doesn't have mem installed 269 * node that doesn't have mem installed
270 */ 270 */
271 per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id); 271 set_cpu_numa_node(boot_cpu_id, early_cpu_to_node(boot_cpu_id));
272#endif 272#endif
273 273
274 /* Setup node to cpumask map */ 274 /* Setup node to cpumask map */
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 550df481accd..10c27bb1e95f 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -2,6 +2,7 @@
2#include <linux/topology.h> 2#include <linux/topology.h>
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/bootmem.h> 4#include <linux/bootmem.h>
5#include <linux/random.h>
5 6
6#ifdef CONFIG_DEBUG_PER_CPU_MAPS 7#ifdef CONFIG_DEBUG_PER_CPU_MAPS
7# define DBG(x...) printk(KERN_DEBUG x) 8# define DBG(x...) printk(KERN_DEBUG x)
@@ -65,3 +66,19 @@ const struct cpumask *cpumask_of_node(int node)
65} 66}
66EXPORT_SYMBOL(cpumask_of_node); 67EXPORT_SYMBOL(cpumask_of_node);
67#endif 68#endif
69
70/*
71 * Return the bit number of a random bit set in the nodemask.
72 * (returns -1 if nodemask is empty)
73 */
74int __node_random(const nodemask_t *maskp)
75{
76 int w, bit = -1;
77
78 w = nodes_weight(*maskp);
79 if (w)
80 bit = bitmap_ord_to_pos(maskp->bits,
81 get_random_int() % w, MAX_NUMNODES);
82 return bit;
83}
84EXPORT_SYMBOL(__node_random);
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 8948f47fde05..a7bcc23ef96c 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -33,9 +33,6 @@ int numa_off __initdata;
33static unsigned long __initdata nodemap_addr; 33static unsigned long __initdata nodemap_addr;
34static unsigned long __initdata nodemap_size; 34static unsigned long __initdata nodemap_size;
35 35
36DEFINE_PER_CPU(int, node_number) = 0;
37EXPORT_PER_CPU_SYMBOL(node_number);
38
39/* 36/*
40 * Map cpu index to node index 37 * Map cpu index to node index
41 */ 38 */
@@ -809,7 +806,7 @@ void __cpuinit numa_set_node(int cpu, int node)
809 per_cpu(x86_cpu_to_node_map, cpu) = node; 806 per_cpu(x86_cpu_to_node_map, cpu) = node;
810 807
811 if (node != NUMA_NO_NODE) 808 if (node != NUMA_NO_NODE)
812 per_cpu(node_number, cpu) = node; 809 set_cpu_numa_node(cpu, node);
813} 810}
814 811
815void __cpuinit numa_clear_node(int cpu) 812void __cpuinit numa_clear_node(int cpu)
@@ -867,7 +864,7 @@ void __cpuinit numa_remove_cpu(int cpu)
867 numa_set_cpumask(cpu, 0); 864 numa_set_cpumask(cpu, 0);
868} 865}
869 866
870int cpu_to_node(int cpu) 867int __cpu_to_node(int cpu)
871{ 868{
872 if (early_per_cpu_ptr(x86_cpu_to_node_map)) { 869 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
873 printk(KERN_WARNING 870 printk(KERN_WARNING
@@ -877,7 +874,7 @@ int cpu_to_node(int cpu)
877 } 874 }
878 return per_cpu(x86_cpu_to_node_map, cpu); 875 return per_cpu(x86_cpu_to_node_map, cpu);
879} 876}
880EXPORT_SYMBOL(cpu_to_node); 877EXPORT_SYMBOL(__cpu_to_node);
881 878
882/* 879/*
883 * Same function as cpu_to_node() but used if called before the 880 * Same function as cpu_to_node() but used if called before the
diff --git a/arch/xtensa/include/asm/scatterlist.h b/arch/xtensa/include/asm/scatterlist.h
index 810080bb0a2b..b1f9fdc1d5ba 100644
--- a/arch/xtensa/include/asm/scatterlist.h
+++ b/arch/xtensa/include/asm/scatterlist.h
@@ -11,28 +11,7 @@
11#ifndef _XTENSA_SCATTERLIST_H 11#ifndef _XTENSA_SCATTERLIST_H
12#define _XTENSA_SCATTERLIST_H 12#define _XTENSA_SCATTERLIST_H
13 13
14#include <asm/types.h> 14#include <asm-generic/scatterlist.h>
15
16struct scatterlist {
17#ifdef CONFIG_DEBUG_SG
18 unsigned long sg_magic;
19#endif
20 unsigned long page_link;
21 unsigned int offset;
22 dma_addr_t dma_address;
23 unsigned int length;
24};
25
26/*
27 * These macros should be used after a pci_map_sg call has been done
28 * to get bus addresses of each of the SG entries and their lengths.
29 * You should only work with the number of sg entries pci_map_sg
30 * returns, or alternatively stop on the first sg_dma_len(sg) which
31 * is 0.
32 */
33#define sg_dma_address(sg) ((sg)->dma_address)
34#define sg_dma_len(sg) ((sg)->length)
35
36 15
37#define ISA_DMA_THRESHOLD (~0UL) 16#define ISA_DMA_THRESHOLD (~0UL)
38 17
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index bf6b13206d00..9fc630ce1ddb 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -162,7 +162,7 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
162 topology_remove_dev(cpu); 162 topology_remove_dev(cpu);
163 break; 163 break;
164 } 164 }
165 return rc ? NOTIFY_BAD : NOTIFY_OK; 165 return notifier_from_errno(rc);
166} 166}
167 167
168static int __cpuinit topology_sysfs_init(void) 168static int __cpuinit topology_sysfs_init(void)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index e21175be25d0..f09fc0e2062d 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1121,5 +1121,12 @@ config DEVPORT
1121 1121
1122source "drivers/s390/char/Kconfig" 1122source "drivers/s390/char/Kconfig"
1123 1123
1124config RAMOOPS
1125 tristate "Log panic/oops to a RAM buffer"
1126 default n
1127 help
1128 This enables panic and oops messages to be logged to a circular
1129 buffer in RAM where it can be read back at some later point.
1130
1124endmenu 1131endmenu
1125 1132
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d39be4cf1f5d..88d6eac69754 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -108,6 +108,7 @@ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
108obj-$(CONFIG_TCG_TPM) += tpm/ 108obj-$(CONFIG_TCG_TPM) += tpm/
109 109
110obj-$(CONFIG_PS3_FLASH) += ps3flash.o 110obj-$(CONFIG_PS3_FLASH) += ps3flash.o
111obj-$(CONFIG_RAMOOPS) += ramoops.o
111 112
112obj-$(CONFIG_JS_RTC) += js-rtc.o 113obj-$(CONFIG_JS_RTC) += js-rtc.o
113js-rtc-y = rtc.o 114js-rtc-y = rtc.o
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 67ea3a60de74..70312da4c968 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -384,7 +384,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
384{ 384{
385 u32 httfea,baseaddr,enuscr; 385 u32 httfea,baseaddr,enuscr;
386 struct pci_dev *dev1; 386 struct pci_dev *dev1;
387 int i; 387 int i, ret;
388 unsigned size = amd64_fetch_size(); 388 unsigned size = amd64_fetch_size();
389 389
390 dev_info(&pdev->dev, "setting up ULi AGP\n"); 390 dev_info(&pdev->dev, "setting up ULi AGP\n");
@@ -400,15 +400,18 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
400 400
401 if (i == ARRAY_SIZE(uli_sizes)) { 401 if (i == ARRAY_SIZE(uli_sizes)) {
402 dev_info(&pdev->dev, "no ULi size found for %d\n", size); 402 dev_info(&pdev->dev, "no ULi size found for %d\n", size);
403 return -ENODEV; 403 ret = -ENODEV;
404 goto put;
404 } 405 }
405 406
406 /* shadow x86-64 registers into ULi registers */ 407 /* shadow x86-64 registers into ULi registers */
407 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); 408 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea);
408 409
409 /* if x86-64 aperture base is beyond 4G, exit here */ 410 /* if x86-64 aperture base is beyond 4G, exit here */
410 if ((httfea & 0x7fff) >> (32 - 25)) 411 if ((httfea & 0x7fff) >> (32 - 25)) {
411 return -ENODEV; 412 ret = -ENODEV;
413 goto put;
414 }
412 415
413 httfea = (httfea& 0x7fff) << 25; 416 httfea = (httfea& 0x7fff) << 25;
414 417
@@ -420,9 +423,10 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
420 enuscr= httfea+ (size * 1024 * 1024) - 1; 423 enuscr= httfea+ (size * 1024 * 1024) - 1;
421 pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea); 424 pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
422 pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr); 425 pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
423 426 ret = 0;
427put:
424 pci_dev_put(dev1); 428 pci_dev_put(dev1);
425 return 0; 429 return ret;
426} 430}
427 431
428 432
@@ -441,7 +445,7 @@ static int nforce3_agp_init(struct pci_dev *pdev)
441{ 445{
442 u32 tmp, apbase, apbar, aplimit; 446 u32 tmp, apbase, apbar, aplimit;
443 struct pci_dev *dev1; 447 struct pci_dev *dev1;
444 int i; 448 int i, ret;
445 unsigned size = amd64_fetch_size(); 449 unsigned size = amd64_fetch_size();
446 450
447 dev_info(&pdev->dev, "setting up Nforce3 AGP\n"); 451 dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
@@ -458,7 +462,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
458 462
459 if (i == ARRAY_SIZE(nforce3_sizes)) { 463 if (i == ARRAY_SIZE(nforce3_sizes)) {
460 dev_info(&pdev->dev, "no NForce3 size found for %d\n", size); 464 dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
461 return -ENODEV; 465 ret = -ENODEV;
466 goto put;
462 } 467 }
463 468
464 pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp); 469 pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
@@ -472,7 +477,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
472 /* if x86-64 aperture base is beyond 4G, exit here */ 477 /* if x86-64 aperture base is beyond 4G, exit here */
473 if ( (apbase & 0x7fff) >> (32 - 25) ) { 478 if ( (apbase & 0x7fff) >> (32 - 25) ) {
474 dev_info(&pdev->dev, "aperture base > 4G\n"); 479 dev_info(&pdev->dev, "aperture base > 4G\n");
475 return -ENODEV; 480 ret = -ENODEV;
481 goto put;
476 } 482 }
477 483
478 apbase = (apbase & 0x7fff) << 25; 484 apbase = (apbase & 0x7fff) << 25;
@@ -488,9 +494,11 @@ static int nforce3_agp_init(struct pci_dev *pdev)
488 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase); 494 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
489 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit); 495 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
490 496
497 ret = 0;
498put:
491 pci_dev_put(dev1); 499 pci_dev_put(dev1);
492 500
493 return 0; 501 return ret;
494} 502}
495 503
496static int __devinit agp_amd64_probe(struct pci_dev *pdev, 504static int __devinit agp_amd64_probe(struct pci_dev *pdev,
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 63313a33ba5f..f4ae0e0fb631 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -703,14 +703,9 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
703 /* In general, the device is only openable by root anyway, so we're not 703 /* In general, the device is only openable by root anyway, so we're not
704 particularly concerned that bogus ioctls can flood the console. */ 704 particularly concerned that bogus ioctls can flood the console. */
705 705
706 adgl = kmalloc(sizeof(struct st_ram_io), GFP_KERNEL); 706 adgl = memdup_user(argp, sizeof(struct st_ram_io));
707 if (!adgl) 707 if (IS_ERR(adgl))
708 return -ENOMEM; 708 return PTR_ERR(adgl);
709
710 if (copy_from_user(adgl, argp, sizeof(struct st_ram_io))) {
711 kfree(adgl);
712 return -EFAULT;
713 }
714 709
715 lock_kernel(); 710 lock_kernel();
716 IndexCard = adgl->num_card-1; 711 IndexCard = adgl->num_card-1;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c6ad4234378d..4f3f8c9ec262 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2505,12 +2505,11 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2505 return rv; 2505 return rv;
2506 } 2506 }
2507 2507
2508 printk(KERN_INFO 2508 dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
2509 "ipmi: Found new BMC (man_id: 0x%6.6x, " 2509 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2510 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 2510 bmc->id.manufacturer_id,
2511 bmc->id.manufacturer_id, 2511 bmc->id.product_id,
2512 bmc->id.product_id, 2512 bmc->id.device_id);
2513 bmc->id.device_id);
2514 } 2513 }
2515 2514
2516 /* 2515 /*
@@ -4037,8 +4036,8 @@ static void ipmi_request_event(void)
4037 4036
4038static struct timer_list ipmi_timer; 4037static struct timer_list ipmi_timer;
4039 4038
4040/* Call every ~100 ms. */ 4039/* Call every ~1000 ms. */
4041#define IPMI_TIMEOUT_TIME 100 4040#define IPMI_TIMEOUT_TIME 1000
4042 4041
4043/* How many jiffies does it take to get to the timeout time. */ 4042/* How many jiffies does it take to get to the timeout time. */
4044#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 4043#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 47ffe4a90a95..35603dd4e6c5 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -107,6 +107,14 @@ enum si_type {
107}; 107};
108static char *si_to_str[] = { "kcs", "smic", "bt" }; 108static char *si_to_str[] = { "kcs", "smic", "bt" };
109 109
110enum ipmi_addr_src {
111 SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
112 SI_PCI, SI_DEVICETREE, SI_DEFAULT
113};
114static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
115 "ACPI", "SMBIOS", "PCI",
116 "device-tree", "default" };
117
110#define DEVICE_NAME "ipmi_si" 118#define DEVICE_NAME "ipmi_si"
111 119
112static struct platform_driver ipmi_driver = { 120static struct platform_driver ipmi_driver = {
@@ -188,7 +196,7 @@ struct smi_info {
188 int (*irq_setup)(struct smi_info *info); 196 int (*irq_setup)(struct smi_info *info);
189 void (*irq_cleanup)(struct smi_info *info); 197 void (*irq_cleanup)(struct smi_info *info);
190 unsigned int io_size; 198 unsigned int io_size;
191 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */ 199 enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
192 void (*addr_source_cleanup)(struct smi_info *info); 200 void (*addr_source_cleanup)(struct smi_info *info);
193 void *addr_source_data; 201 void *addr_source_data;
194 202
@@ -300,6 +308,7 @@ static int num_max_busy_us;
300 308
301static int unload_when_empty = 1; 309static int unload_when_empty = 1;
302 310
311static int add_smi(struct smi_info *smi);
303static int try_smi_init(struct smi_info *smi); 312static int try_smi_init(struct smi_info *smi);
304static void cleanup_one_si(struct smi_info *to_clean); 313static void cleanup_one_si(struct smi_info *to_clean);
305 314
@@ -314,9 +323,14 @@ static void deliver_recv_msg(struct smi_info *smi_info,
314{ 323{
315 /* Deliver the message to the upper layer with the lock 324 /* Deliver the message to the upper layer with the lock
316 released. */ 325 released. */
317 spin_unlock(&(smi_info->si_lock)); 326
318 ipmi_smi_msg_received(smi_info->intf, msg); 327 if (smi_info->run_to_completion) {
319 spin_lock(&(smi_info->si_lock)); 328 ipmi_smi_msg_received(smi_info->intf, msg);
329 } else {
330 spin_unlock(&(smi_info->si_lock));
331 ipmi_smi_msg_received(smi_info->intf, msg);
332 spin_lock(&(smi_info->si_lock));
333 }
320} 334}
321 335
322static void return_hosed_msg(struct smi_info *smi_info, int cCode) 336static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -445,6 +459,9 @@ static inline void disable_si_irq(struct smi_info *smi_info)
445 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 459 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
446 start_disable_irq(smi_info); 460 start_disable_irq(smi_info);
447 smi_info->interrupt_disabled = 1; 461 smi_info->interrupt_disabled = 1;
462 if (!atomic_read(&smi_info->stop_operation))
463 mod_timer(&smi_info->si_timer,
464 jiffies + SI_TIMEOUT_JIFFIES);
448 } 465 }
449} 466}
450 467
@@ -576,9 +593,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
576 smi_info->handlers->get_result(smi_info->si_sm, msg, 3); 593 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
577 if (msg[2] != 0) { 594 if (msg[2] != 0) {
578 /* Error clearing flags */ 595 /* Error clearing flags */
579 printk(KERN_WARNING 596 dev_warn(smi_info->dev,
580 "ipmi_si: Error clearing flags: %2.2x\n", 597 "Error clearing flags: %2.2x\n", msg[2]);
581 msg[2]);
582 } 598 }
583 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) 599 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
584 start_enable_irq(smi_info); 600 start_enable_irq(smi_info);
@@ -670,9 +686,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
670 /* We got the flags from the SMI, now handle them. */ 686 /* We got the flags from the SMI, now handle them. */
671 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 687 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
672 if (msg[2] != 0) { 688 if (msg[2] != 0) {
673 printk(KERN_WARNING 689 dev_warn(smi_info->dev, "Could not enable interrupts"
674 "ipmi_si: Could not enable interrupts" 690 ", failed get, using polled mode.\n");
675 ", failed get, using polled mode.\n");
676 smi_info->si_state = SI_NORMAL; 691 smi_info->si_state = SI_NORMAL;
677 } else { 692 } else {
678 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 693 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -693,11 +708,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
693 708
694 /* We got the flags from the SMI, now handle them. */ 709 /* We got the flags from the SMI, now handle them. */
695 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 710 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
696 if (msg[2] != 0) { 711 if (msg[2] != 0)
697 printk(KERN_WARNING 712 dev_warn(smi_info->dev, "Could not enable interrupts"
698 "ipmi_si: Could not enable interrupts" 713 ", failed set, using polled mode.\n");
699 ", failed set, using polled mode.\n"); 714 else
700 } 715 smi_info->interrupt_disabled = 0;
701 smi_info->si_state = SI_NORMAL; 716 smi_info->si_state = SI_NORMAL;
702 break; 717 break;
703 } 718 }
@@ -709,9 +724,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
709 /* We got the flags from the SMI, now handle them. */ 724 /* We got the flags from the SMI, now handle them. */
710 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 725 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
711 if (msg[2] != 0) { 726 if (msg[2] != 0) {
712 printk(KERN_WARNING 727 dev_warn(smi_info->dev, "Could not disable interrupts"
713 "ipmi_si: Could not disable interrupts" 728 ", failed get.\n");
714 ", failed get.\n");
715 smi_info->si_state = SI_NORMAL; 729 smi_info->si_state = SI_NORMAL;
716 } else { 730 } else {
717 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 731 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -733,9 +747,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
733 /* We got the flags from the SMI, now handle them. */ 747 /* We got the flags from the SMI, now handle them. */
734 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 748 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
735 if (msg[2] != 0) { 749 if (msg[2] != 0) {
736 printk(KERN_WARNING 750 dev_warn(smi_info->dev, "Could not disable interrupts"
737 "ipmi_si: Could not disable interrupts" 751 ", failed set.\n");
738 ", failed set.\n");
739 } 752 }
740 smi_info->si_state = SI_NORMAL; 753 smi_info->si_state = SI_NORMAL;
741 break; 754 break;
@@ -877,6 +890,11 @@ static void sender(void *send_info,
877 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 890 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
878#endif 891#endif
879 892
893 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
894
895 if (smi_info->thread)
896 wake_up_process(smi_info->thread);
897
880 if (smi_info->run_to_completion) { 898 if (smi_info->run_to_completion) {
881 /* 899 /*
882 * If we are running to completion, then throw it in 900 * If we are running to completion, then throw it in
@@ -997,6 +1015,8 @@ static int ipmi_thread(void *data)
997 ; /* do nothing */ 1015 ; /* do nothing */
998 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) 1016 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
999 schedule(); 1017 schedule();
1018 else if (smi_result == SI_SM_IDLE)
1019 schedule_timeout_interruptible(100);
1000 else 1020 else
1001 schedule_timeout_interruptible(0); 1021 schedule_timeout_interruptible(0);
1002 } 1022 }
@@ -1039,6 +1059,7 @@ static void smi_timeout(unsigned long data)
1039 unsigned long flags; 1059 unsigned long flags;
1040 unsigned long jiffies_now; 1060 unsigned long jiffies_now;
1041 long time_diff; 1061 long time_diff;
1062 long timeout;
1042#ifdef DEBUG_TIMING 1063#ifdef DEBUG_TIMING
1043 struct timeval t; 1064 struct timeval t;
1044#endif 1065#endif
@@ -1059,9 +1080,9 @@ static void smi_timeout(unsigned long data)
1059 1080
1060 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 1081 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1061 /* Running with interrupts, only do long timeouts. */ 1082 /* Running with interrupts, only do long timeouts. */
1062 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 1083 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1063 smi_inc_stat(smi_info, long_timeouts); 1084 smi_inc_stat(smi_info, long_timeouts);
1064 goto do_add_timer; 1085 goto do_mod_timer;
1065 } 1086 }
1066 1087
1067 /* 1088 /*
@@ -1070,14 +1091,15 @@ static void smi_timeout(unsigned long data)
1070 */ 1091 */
1071 if (smi_result == SI_SM_CALL_WITH_DELAY) { 1092 if (smi_result == SI_SM_CALL_WITH_DELAY) {
1072 smi_inc_stat(smi_info, short_timeouts); 1093 smi_inc_stat(smi_info, short_timeouts);
1073 smi_info->si_timer.expires = jiffies + 1; 1094 timeout = jiffies + 1;
1074 } else { 1095 } else {
1075 smi_inc_stat(smi_info, long_timeouts); 1096 smi_inc_stat(smi_info, long_timeouts);
1076 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 1097 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1077 } 1098 }
1078 1099
1079 do_add_timer: 1100 do_mod_timer:
1080 add_timer(&(smi_info->si_timer)); 1101 if (smi_result != SI_SM_IDLE)
1102 mod_timer(&(smi_info->si_timer), timeout);
1081} 1103}
1082 1104
1083static irqreturn_t si_irq_handler(int irq, void *data) 1105static irqreturn_t si_irq_handler(int irq, void *data)
@@ -1144,10 +1166,10 @@ static int smi_start_processing(void *send_info,
1144 new_smi->thread = kthread_run(ipmi_thread, new_smi, 1166 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1145 "kipmi%d", new_smi->intf_num); 1167 "kipmi%d", new_smi->intf_num);
1146 if (IS_ERR(new_smi->thread)) { 1168 if (IS_ERR(new_smi->thread)) {
1147 printk(KERN_NOTICE "ipmi_si_intf: Could not start" 1169 dev_notice(new_smi->dev, "Could not start"
1148 " kernel thread due to error %ld, only using" 1170 " kernel thread due to error %ld, only using"
1149 " timers to drive the interface\n", 1171 " timers to drive the interface\n",
1150 PTR_ERR(new_smi->thread)); 1172 PTR_ERR(new_smi->thread));
1151 new_smi->thread = NULL; 1173 new_smi->thread = NULL;
1152 } 1174 }
1153 } 1175 }
@@ -1308,14 +1330,13 @@ static int std_irq_setup(struct smi_info *info)
1308 DEVICE_NAME, 1330 DEVICE_NAME,
1309 info); 1331 info);
1310 if (rv) { 1332 if (rv) {
1311 printk(KERN_WARNING 1333 dev_warn(info->dev, "%s unable to claim interrupt %d,"
1312 "ipmi_si: %s unable to claim interrupt %d," 1334 " running polled\n",
1313 " running polled\n", 1335 DEVICE_NAME, info->irq);
1314 DEVICE_NAME, info->irq);
1315 info->irq = 0; 1336 info->irq = 0;
1316 } else { 1337 } else {
1317 info->irq_cleanup = std_irq_cleanup; 1338 info->irq_cleanup = std_irq_cleanup;
1318 printk(" Using irq %d\n", info->irq); 1339 dev_info(info->dev, "Using irq %d\n", info->irq);
1319 } 1340 }
1320 1341
1321 return rv; 1342 return rv;
@@ -1406,8 +1427,8 @@ static int port_setup(struct smi_info *info)
1406 info->io.outputb = port_outl; 1427 info->io.outputb = port_outl;
1407 break; 1428 break;
1408 default: 1429 default:
1409 printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", 1430 dev_warn(info->dev, "Invalid register size: %d\n",
1410 info->io.regsize); 1431 info->io.regsize);
1411 return -EINVAL; 1432 return -EINVAL;
1412 } 1433 }
1413 1434
@@ -1529,8 +1550,8 @@ static int mem_setup(struct smi_info *info)
1529 break; 1550 break;
1530#endif 1551#endif
1531 default: 1552 default:
1532 printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", 1553 dev_warn(info->dev, "Invalid register size: %d\n",
1533 info->io.regsize); 1554 info->io.regsize);
1534 return -EINVAL; 1555 return -EINVAL;
1535 } 1556 }
1536 1557
@@ -1755,7 +1776,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
1755 goto out; 1776 goto out;
1756 } 1777 }
1757 1778
1758 info->addr_source = "hotmod"; 1779 info->addr_source = SI_HOTMOD;
1759 info->si_type = si_type; 1780 info->si_type = si_type;
1760 info->io.addr_data = addr; 1781 info->io.addr_data = addr;
1761 info->io.addr_type = addr_space; 1782 info->io.addr_type = addr_space;
@@ -1777,7 +1798,9 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
1777 info->irq_setup = std_irq_setup; 1798 info->irq_setup = std_irq_setup;
1778 info->slave_addr = ipmb; 1799 info->slave_addr = ipmb;
1779 1800
1780 try_smi_init(info); 1801 if (!add_smi(info))
1802 if (try_smi_init(info))
1803 cleanup_one_si(info);
1781 } else { 1804 } else {
1782 /* remove */ 1805 /* remove */
1783 struct smi_info *e, *tmp_e; 1806 struct smi_info *e, *tmp_e;
@@ -1813,7 +1836,8 @@ static __devinit void hardcode_find_bmc(void)
1813 if (!info) 1836 if (!info)
1814 return; 1837 return;
1815 1838
1816 info->addr_source = "hardcoded"; 1839 info->addr_source = SI_HARDCODED;
1840 printk(KERN_INFO PFX "probing via hardcoded address\n");
1817 1841
1818 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { 1842 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1819 info->si_type = SI_KCS; 1843 info->si_type = SI_KCS;
@@ -1822,8 +1846,7 @@ static __devinit void hardcode_find_bmc(void)
1822 } else if (strcmp(si_type[i], "bt") == 0) { 1846 } else if (strcmp(si_type[i], "bt") == 0) {
1823 info->si_type = SI_BT; 1847 info->si_type = SI_BT;
1824 } else { 1848 } else {
1825 printk(KERN_WARNING 1849 printk(KERN_WARNING PFX "Interface type specified "
1826 "ipmi_si: Interface type specified "
1827 "for interface %d, was invalid: %s\n", 1850 "for interface %d, was invalid: %s\n",
1828 i, si_type[i]); 1851 i, si_type[i]);
1829 kfree(info); 1852 kfree(info);
@@ -1841,11 +1864,9 @@ static __devinit void hardcode_find_bmc(void)
1841 info->io.addr_data = addrs[i]; 1864 info->io.addr_data = addrs[i];
1842 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 1865 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1843 } else { 1866 } else {
1844 printk(KERN_WARNING 1867 printk(KERN_WARNING PFX "Interface type specified "
1845 "ipmi_si: Interface type specified " 1868 "for interface %d, but port and address were "
1846 "for interface %d, " 1869 "not set or set to zero.\n", i);
1847 "but port and address were not set or "
1848 "set to zero.\n", i);
1849 kfree(info); 1870 kfree(info);
1850 continue; 1871 continue;
1851 } 1872 }
@@ -1863,7 +1884,9 @@ static __devinit void hardcode_find_bmc(void)
1863 info->irq_setup = std_irq_setup; 1884 info->irq_setup = std_irq_setup;
1864 info->slave_addr = slave_addrs[i]; 1885 info->slave_addr = slave_addrs[i];
1865 1886
1866 try_smi_init(info); 1887 if (!add_smi(info))
1888 if (try_smi_init(info))
1889 cleanup_one_si(info);
1867 } 1890 }
1868} 1891}
1869 1892
@@ -1923,15 +1946,13 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
1923 &ipmi_acpi_gpe, 1946 &ipmi_acpi_gpe,
1924 info); 1947 info);
1925 if (status != AE_OK) { 1948 if (status != AE_OK) {
1926 printk(KERN_WARNING 1949 dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
1927 "ipmi_si: %s unable to claim ACPI GPE %d," 1950 " running polled\n", DEVICE_NAME, info->irq);
1928 " running polled\n",
1929 DEVICE_NAME, info->irq);
1930 info->irq = 0; 1951 info->irq = 0;
1931 return -EINVAL; 1952 return -EINVAL;
1932 } else { 1953 } else {
1933 info->irq_cleanup = acpi_gpe_irq_cleanup; 1954 info->irq_cleanup = acpi_gpe_irq_cleanup;
1934 printk(" Using ACPI GPE %d\n", info->irq); 1955 dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
1935 return 0; 1956 return 0;
1936 } 1957 }
1937} 1958}
@@ -1989,8 +2010,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
1989 u8 addr_space; 2010 u8 addr_space;
1990 2011
1991 if (spmi->IPMIlegacy != 1) { 2012 if (spmi->IPMIlegacy != 1) {
1992 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); 2013 printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1993 return -ENODEV; 2014 return -ENODEV;
1994 } 2015 }
1995 2016
1996 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 2017 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
@@ -2000,11 +2021,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
2000 2021
2001 info = kzalloc(sizeof(*info), GFP_KERNEL); 2022 info = kzalloc(sizeof(*info), GFP_KERNEL);
2002 if (!info) { 2023 if (!info) {
2003 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n"); 2024 printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
2004 return -ENOMEM; 2025 return -ENOMEM;
2005 } 2026 }
2006 2027
2007 info->addr_source = "SPMI"; 2028 info->addr_source = SI_SPMI;
2029 printk(KERN_INFO PFX "probing via SPMI\n");
2008 2030
2009 /* Figure out the interface type. */ 2031 /* Figure out the interface type. */
2010 switch (spmi->InterfaceType) { 2032 switch (spmi->InterfaceType) {
@@ -2018,8 +2040,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
2018 info->si_type = SI_BT; 2040 info->si_type = SI_BT;
2019 break; 2041 break;
2020 default: 2042 default:
2021 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n", 2043 printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
2022 spmi->InterfaceType); 2044 spmi->InterfaceType);
2023 kfree(info); 2045 kfree(info);
2024 return -EIO; 2046 return -EIO;
2025 } 2047 }
@@ -2055,13 +2077,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
2055 info->io.addr_type = IPMI_IO_ADDR_SPACE; 2077 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2056 } else { 2078 } else {
2057 kfree(info); 2079 kfree(info);
2058 printk(KERN_WARNING 2080 printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
2059 "ipmi_si: Unknown ACPI I/O Address type\n");
2060 return -EIO; 2081 return -EIO;
2061 } 2082 }
2062 info->io.addr_data = spmi->addr.address; 2083 info->io.addr_data = spmi->addr.address;
2063 2084
2064 try_smi_init(info); 2085 add_smi(info);
2065 2086
2066 return 0; 2087 return 0;
2067} 2088}
@@ -2093,6 +2114,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2093{ 2114{
2094 struct acpi_device *acpi_dev; 2115 struct acpi_device *acpi_dev;
2095 struct smi_info *info; 2116 struct smi_info *info;
2117 struct resource *res;
2096 acpi_handle handle; 2118 acpi_handle handle;
2097 acpi_status status; 2119 acpi_status status;
2098 unsigned long long tmp; 2120 unsigned long long tmp;
@@ -2105,7 +2127,8 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2105 if (!info) 2127 if (!info)
2106 return -ENOMEM; 2128 return -ENOMEM;
2107 2129
2108 info->addr_source = "ACPI"; 2130 info->addr_source = SI_ACPI;
2131 printk(KERN_INFO PFX "probing via ACPI\n");
2109 2132
2110 handle = acpi_dev->handle; 2133 handle = acpi_dev->handle;
2111 2134
@@ -2125,22 +2148,26 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2125 info->si_type = SI_BT; 2148 info->si_type = SI_BT;
2126 break; 2149 break;
2127 default: 2150 default:
2128 dev_info(&dev->dev, "unknown interface type %lld\n", tmp); 2151 dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
2129 goto err_free; 2152 goto err_free;
2130 } 2153 }
2131 2154
2132 if (pnp_port_valid(dev, 0)) { 2155 res = pnp_get_resource(dev, IORESOURCE_IO, 0);
2156 if (res) {
2133 info->io_setup = port_setup; 2157 info->io_setup = port_setup;
2134 info->io.addr_type = IPMI_IO_ADDR_SPACE; 2158 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2135 info->io.addr_data = pnp_port_start(dev, 0);
2136 } else if (pnp_mem_valid(dev, 0)) {
2137 info->io_setup = mem_setup;
2138 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2139 info->io.addr_data = pnp_mem_start(dev, 0);
2140 } else { 2159 } else {
2160 res = pnp_get_resource(dev, IORESOURCE_MEM, 0);
2161 if (res) {
2162 info->io_setup = mem_setup;
2163 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2164 }
2165 }
2166 if (!res) {
2141 dev_err(&dev->dev, "no I/O or memory address\n"); 2167 dev_err(&dev->dev, "no I/O or memory address\n");
2142 goto err_free; 2168 goto err_free;
2143 } 2169 }
2170 info->io.addr_data = res->start;
2144 2171
2145 info->io.regspacing = DEFAULT_REGSPACING; 2172 info->io.regspacing = DEFAULT_REGSPACING;
2146 info->io.regsize = DEFAULT_REGSPACING; 2173 info->io.regsize = DEFAULT_REGSPACING;
@@ -2156,10 +2183,14 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2156 info->irq_setup = std_irq_setup; 2183 info->irq_setup = std_irq_setup;
2157 } 2184 }
2158 2185
2159 info->dev = &acpi_dev->dev; 2186 info->dev = &dev->dev;
2160 pnp_set_drvdata(dev, info); 2187 pnp_set_drvdata(dev, info);
2161 2188
2162 return try_smi_init(info); 2189 dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
2190 res, info->io.regsize, info->io.regspacing,
2191 info->irq);
2192
2193 return add_smi(info);
2163 2194
2164err_free: 2195err_free:
2165 kfree(info); 2196 kfree(info);
@@ -2264,12 +2295,12 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2264 2295
2265 info = kzalloc(sizeof(*info), GFP_KERNEL); 2296 info = kzalloc(sizeof(*info), GFP_KERNEL);
2266 if (!info) { 2297 if (!info) {
2267 printk(KERN_ERR 2298 printk(KERN_ERR PFX "Could not allocate SI data\n");
2268 "ipmi_si: Could not allocate SI data\n");
2269 return; 2299 return;
2270 } 2300 }
2271 2301
2272 info->addr_source = "SMBIOS"; 2302 info->addr_source = SI_SMBIOS;
2303 printk(KERN_INFO PFX "probing via SMBIOS\n");
2273 2304
2274 switch (ipmi_data->type) { 2305 switch (ipmi_data->type) {
2275 case 0x01: /* KCS */ 2306 case 0x01: /* KCS */
@@ -2299,8 +2330,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2299 2330
2300 default: 2331 default:
2301 kfree(info); 2332 kfree(info);
2302 printk(KERN_WARNING 2333 printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
2303 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2304 ipmi_data->addr_space); 2334 ipmi_data->addr_space);
2305 return; 2335 return;
2306 } 2336 }
@@ -2318,7 +2348,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2318 if (info->irq) 2348 if (info->irq)
2319 info->irq_setup = std_irq_setup; 2349 info->irq_setup = std_irq_setup;
2320 2350
2321 try_smi_init(info); 2351 add_smi(info);
2322} 2352}
2323 2353
2324static void __devinit dmi_find_bmc(void) 2354static void __devinit dmi_find_bmc(void)
@@ -2368,7 +2398,8 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2368 if (!info) 2398 if (!info)
2369 return -ENOMEM; 2399 return -ENOMEM;
2370 2400
2371 info->addr_source = "PCI"; 2401 info->addr_source = SI_PCI;
2402 dev_info(&pdev->dev, "probing via PCI");
2372 2403
2373 switch (class_type) { 2404 switch (class_type) {
2374 case PCI_ERMC_CLASSCODE_TYPE_SMIC: 2405 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
@@ -2385,15 +2416,13 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2385 2416
2386 default: 2417 default:
2387 kfree(info); 2418 kfree(info);
2388 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n", 2419 dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
2389 pci_name(pdev), class_type);
2390 return -ENOMEM; 2420 return -ENOMEM;
2391 } 2421 }
2392 2422
2393 rv = pci_enable_device(pdev); 2423 rv = pci_enable_device(pdev);
2394 if (rv) { 2424 if (rv) {
2395 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n", 2425 dev_err(&pdev->dev, "couldn't enable PCI device\n");
2396 pci_name(pdev));
2397 kfree(info); 2426 kfree(info);
2398 return rv; 2427 return rv;
2399 } 2428 }
@@ -2421,7 +2450,11 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2421 info->dev = &pdev->dev; 2450 info->dev = &pdev->dev;
2422 pci_set_drvdata(pdev, info); 2451 pci_set_drvdata(pdev, info);
2423 2452
2424 return try_smi_init(info); 2453 dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
2454 &pdev->resource[0], info->io.regsize, info->io.regspacing,
2455 info->irq);
2456
2457 return add_smi(info);
2425} 2458}
2426 2459
2427static void __devexit ipmi_pci_remove(struct pci_dev *pdev) 2460static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
@@ -2473,7 +2506,7 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
2473 int ret; 2506 int ret;
2474 int proplen; 2507 int proplen;
2475 2508
2476 dev_info(&dev->dev, PFX "probing via device tree\n"); 2509 dev_info(&dev->dev, "probing via device tree\n");
2477 2510
2478 ret = of_address_to_resource(np, 0, &resource); 2511 ret = of_address_to_resource(np, 0, &resource);
2479 if (ret) { 2512 if (ret) {
@@ -2503,12 +2536,12 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
2503 2536
2504 if (!info) { 2537 if (!info) {
2505 dev_err(&dev->dev, 2538 dev_err(&dev->dev,
2506 PFX "could not allocate memory for OF probe\n"); 2539 "could not allocate memory for OF probe\n");
2507 return -ENOMEM; 2540 return -ENOMEM;
2508 } 2541 }
2509 2542
2510 info->si_type = (enum si_type) match->data; 2543 info->si_type = (enum si_type) match->data;
2511 info->addr_source = "device-tree"; 2544 info->addr_source = SI_DEVICETREE;
2512 info->irq_setup = std_irq_setup; 2545 info->irq_setup = std_irq_setup;
2513 2546
2514 if (resource.flags & IORESOURCE_IO) { 2547 if (resource.flags & IORESOURCE_IO) {
@@ -2528,13 +2561,13 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
2528 info->irq = irq_of_parse_and_map(dev->dev.of_node, 0); 2561 info->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
2529 info->dev = &dev->dev; 2562 info->dev = &dev->dev;
2530 2563
2531 dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n", 2564 dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
2532 info->io.addr_data, info->io.regsize, info->io.regspacing, 2565 info->io.addr_data, info->io.regsize, info->io.regspacing,
2533 info->irq); 2566 info->irq);
2534 2567
2535 dev_set_drvdata(&dev->dev, info); 2568 dev_set_drvdata(&dev->dev, info);
2536 2569
2537 return try_smi_init(info); 2570 return add_smi(info);
2538} 2571}
2539 2572
2540static int __devexit ipmi_of_remove(struct of_device *dev) 2573static int __devexit ipmi_of_remove(struct of_device *dev)
@@ -2643,9 +2676,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2643 2676
2644 rv = wait_for_msg_done(smi_info); 2677 rv = wait_for_msg_done(smi_info);
2645 if (rv) { 2678 if (rv) {
2646 printk(KERN_WARNING 2679 printk(KERN_WARNING PFX "Error getting response from get"
2647 "ipmi_si: Error getting response from get global," 2680 " global enables command, the event buffer is not"
2648 " enables command, the event buffer is not"
2649 " enabled.\n"); 2681 " enabled.\n");
2650 goto out; 2682 goto out;
2651 } 2683 }
@@ -2657,10 +2689,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2657 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 2689 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2658 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || 2690 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
2659 resp[2] != 0) { 2691 resp[2] != 0) {
2660 printk(KERN_WARNING 2692 printk(KERN_WARNING PFX "Invalid return from get global"
2661 "ipmi_si: Invalid return from get global" 2693 " enables command, cannot enable the event buffer.\n");
2662 " enables command, cannot enable the event"
2663 " buffer.\n");
2664 rv = -EINVAL; 2694 rv = -EINVAL;
2665 goto out; 2695 goto out;
2666 } 2696 }
@@ -2676,9 +2706,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2676 2706
2677 rv = wait_for_msg_done(smi_info); 2707 rv = wait_for_msg_done(smi_info);
2678 if (rv) { 2708 if (rv) {
2679 printk(KERN_WARNING 2709 printk(KERN_WARNING PFX "Error getting response from set"
2680 "ipmi_si: Error getting response from set global," 2710 " global, enables command, the event buffer is not"
2681 " enables command, the event buffer is not"
2682 " enabled.\n"); 2711 " enabled.\n");
2683 goto out; 2712 goto out;
2684 } 2713 }
@@ -2689,10 +2718,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2689 if (resp_len < 3 || 2718 if (resp_len < 3 ||
2690 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 2719 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2691 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { 2720 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
2692 printk(KERN_WARNING 2721 printk(KERN_WARNING PFX "Invalid return from get global,"
2693 "ipmi_si: Invalid return from get global," 2722 "enables command, not enable the event buffer.\n");
2694 "enables command, not enable the event"
2695 " buffer.\n");
2696 rv = -EINVAL; 2723 rv = -EINVAL;
2697 goto out; 2724 goto out;
2698 } 2725 }
@@ -2951,7 +2978,7 @@ static __devinit void default_find_bmc(void)
2951 if (!info) 2978 if (!info)
2952 return; 2979 return;
2953 2980
2954 info->addr_source = NULL; 2981 info->addr_source = SI_DEFAULT;
2955 2982
2956 info->si_type = ipmi_defaults[i].type; 2983 info->si_type = ipmi_defaults[i].type;
2957 info->io_setup = port_setup; 2984 info->io_setup = port_setup;
@@ -2963,14 +2990,16 @@ static __devinit void default_find_bmc(void)
2963 info->io.regsize = DEFAULT_REGSPACING; 2990 info->io.regsize = DEFAULT_REGSPACING;
2964 info->io.regshift = 0; 2991 info->io.regshift = 0;
2965 2992
2966 if (try_smi_init(info) == 0) { 2993 if (add_smi(info) == 0) {
2967 /* Found one... */ 2994 if ((try_smi_init(info)) == 0) {
2968 printk(KERN_INFO "ipmi_si: Found default %s state" 2995 /* Found one... */
2969 " machine at %s address 0x%lx\n", 2996 printk(KERN_INFO PFX "Found default %s"
2970 si_to_str[info->si_type], 2997 " state machine at %s address 0x%lx\n",
2971 addr_space_to_str[info->io.addr_type], 2998 si_to_str[info->si_type],
2972 info->io.addr_data); 2999 addr_space_to_str[info->io.addr_type],
2973 return; 3000 info->io.addr_data);
3001 } else
3002 cleanup_one_si(info);
2974 } 3003 }
2975 } 3004 }
2976} 3005}
@@ -2989,34 +3018,48 @@ static int is_new_interface(struct smi_info *info)
2989 return 1; 3018 return 1;
2990} 3019}
2991 3020
2992static int try_smi_init(struct smi_info *new_smi) 3021static int add_smi(struct smi_info *new_smi)
2993{ 3022{
2994 int rv; 3023 int rv = 0;
2995 int i;
2996
2997 if (new_smi->addr_source) {
2998 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2999 " machine at %s address 0x%lx, slave address 0x%x,"
3000 " irq %d\n",
3001 new_smi->addr_source,
3002 si_to_str[new_smi->si_type],
3003 addr_space_to_str[new_smi->io.addr_type],
3004 new_smi->io.addr_data,
3005 new_smi->slave_addr, new_smi->irq);
3006 }
3007 3024
3025 printk(KERN_INFO PFX "Adding %s-specified %s state machine",
3026 ipmi_addr_src_to_str[new_smi->addr_source],
3027 si_to_str[new_smi->si_type]);
3008 mutex_lock(&smi_infos_lock); 3028 mutex_lock(&smi_infos_lock);
3009 if (!is_new_interface(new_smi)) { 3029 if (!is_new_interface(new_smi)) {
3010 printk(KERN_WARNING "ipmi_si: duplicate interface\n"); 3030 printk(KERN_CONT PFX "duplicate interface\n");
3011 rv = -EBUSY; 3031 rv = -EBUSY;
3012 goto out_err; 3032 goto out_err;
3013 } 3033 }
3014 3034
3035 printk(KERN_CONT "\n");
3036
3015 /* So we know not to free it unless we have allocated one. */ 3037 /* So we know not to free it unless we have allocated one. */
3016 new_smi->intf = NULL; 3038 new_smi->intf = NULL;
3017 new_smi->si_sm = NULL; 3039 new_smi->si_sm = NULL;
3018 new_smi->handlers = NULL; 3040 new_smi->handlers = NULL;
3019 3041
3042 list_add_tail(&new_smi->link, &smi_infos);
3043
3044out_err:
3045 mutex_unlock(&smi_infos_lock);
3046 return rv;
3047}
3048
3049static int try_smi_init(struct smi_info *new_smi)
3050{
3051 int rv = 0;
3052 int i;
3053
3054 printk(KERN_INFO PFX "Trying %s-specified %s state"
3055 " machine at %s address 0x%lx, slave address 0x%x,"
3056 " irq %d\n",
3057 ipmi_addr_src_to_str[new_smi->addr_source],
3058 si_to_str[new_smi->si_type],
3059 addr_space_to_str[new_smi->io.addr_type],
3060 new_smi->io.addr_data,
3061 new_smi->slave_addr, new_smi->irq);
3062
3020 switch (new_smi->si_type) { 3063 switch (new_smi->si_type) {
3021 case SI_KCS: 3064 case SI_KCS:
3022 new_smi->handlers = &kcs_smi_handlers; 3065 new_smi->handlers = &kcs_smi_handlers;
@@ -3039,7 +3082,8 @@ static int try_smi_init(struct smi_info *new_smi)
3039 /* Allocate the state machine's data and initialize it. */ 3082 /* Allocate the state machine's data and initialize it. */
3040 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); 3083 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
3041 if (!new_smi->si_sm) { 3084 if (!new_smi->si_sm) {
3042 printk(KERN_ERR "Could not allocate state machine memory\n"); 3085 printk(KERN_ERR PFX
3086 "Could not allocate state machine memory\n");
3043 rv = -ENOMEM; 3087 rv = -ENOMEM;
3044 goto out_err; 3088 goto out_err;
3045 } 3089 }
@@ -3049,7 +3093,7 @@ static int try_smi_init(struct smi_info *new_smi)
3049 /* Now that we know the I/O size, we can set up the I/O. */ 3093 /* Now that we know the I/O size, we can set up the I/O. */
3050 rv = new_smi->io_setup(new_smi); 3094 rv = new_smi->io_setup(new_smi);
3051 if (rv) { 3095 if (rv) {
3052 printk(KERN_ERR "Could not set up I/O space\n"); 3096 printk(KERN_ERR PFX "Could not set up I/O space\n");
3053 goto out_err; 3097 goto out_err;
3054 } 3098 }
3055 3099
@@ -3059,8 +3103,7 @@ static int try_smi_init(struct smi_info *new_smi)
3059 /* Do low-level detection first. */ 3103 /* Do low-level detection first. */
3060 if (new_smi->handlers->detect(new_smi->si_sm)) { 3104 if (new_smi->handlers->detect(new_smi->si_sm)) {
3061 if (new_smi->addr_source) 3105 if (new_smi->addr_source)
3062 printk(KERN_INFO "ipmi_si: Interface detection" 3106 printk(KERN_INFO PFX "Interface detection failed\n");
3063 " failed\n");
3064 rv = -ENODEV; 3107 rv = -ENODEV;
3065 goto out_err; 3108 goto out_err;
3066 } 3109 }
@@ -3072,7 +3115,7 @@ static int try_smi_init(struct smi_info *new_smi)
3072 rv = try_get_dev_id(new_smi); 3115 rv = try_get_dev_id(new_smi);
3073 if (rv) { 3116 if (rv) {
3074 if (new_smi->addr_source) 3117 if (new_smi->addr_source)
3075 printk(KERN_INFO "ipmi_si: There appears to be no BMC" 3118 printk(KERN_INFO PFX "There appears to be no BMC"
3076 " at this location\n"); 3119 " at this location\n");
3077 goto out_err; 3120 goto out_err;
3078 } 3121 }
@@ -3088,7 +3131,7 @@ static int try_smi_init(struct smi_info *new_smi)
3088 for (i = 0; i < SI_NUM_STATS; i++) 3131 for (i = 0; i < SI_NUM_STATS; i++)
3089 atomic_set(&new_smi->stats[i], 0); 3132 atomic_set(&new_smi->stats[i], 0);
3090 3133
3091 new_smi->interrupt_disabled = 0; 3134 new_smi->interrupt_disabled = 1;
3092 atomic_set(&new_smi->stop_operation, 0); 3135 atomic_set(&new_smi->stop_operation, 0);
3093 new_smi->intf_num = smi_num; 3136 new_smi->intf_num = smi_num;
3094 smi_num++; 3137 smi_num++;
@@ -3114,9 +3157,8 @@ static int try_smi_init(struct smi_info *new_smi)
3114 new_smi->pdev = platform_device_alloc("ipmi_si", 3157 new_smi->pdev = platform_device_alloc("ipmi_si",
3115 new_smi->intf_num); 3158 new_smi->intf_num);
3116 if (!new_smi->pdev) { 3159 if (!new_smi->pdev) {
3117 printk(KERN_ERR 3160 printk(KERN_ERR PFX
3118 "ipmi_si_intf:" 3161 "Unable to allocate platform device\n");
3119 " Unable to allocate platform device\n");
3120 goto out_err; 3162 goto out_err;
3121 } 3163 }
3122 new_smi->dev = &new_smi->pdev->dev; 3164 new_smi->dev = &new_smi->pdev->dev;
@@ -3124,9 +3166,8 @@ static int try_smi_init(struct smi_info *new_smi)
3124 3166
3125 rv = platform_device_add(new_smi->pdev); 3167 rv = platform_device_add(new_smi->pdev);
3126 if (rv) { 3168 if (rv) {
3127 printk(KERN_ERR 3169 printk(KERN_ERR PFX
3128 "ipmi_si_intf:" 3170 "Unable to register system interface device:"
3129 " Unable to register system interface device:"
3130 " %d\n", 3171 " %d\n",
3131 rv); 3172 rv);
3132 goto out_err; 3173 goto out_err;
@@ -3141,9 +3182,8 @@ static int try_smi_init(struct smi_info *new_smi)
3141 "bmc", 3182 "bmc",
3142 new_smi->slave_addr); 3183 new_smi->slave_addr);
3143 if (rv) { 3184 if (rv) {
3144 printk(KERN_ERR 3185 dev_err(new_smi->dev, "Unable to register device: error %d\n",
3145 "ipmi_si: Unable to register device: error %d\n", 3186 rv);
3146 rv);
3147 goto out_err_stop_timer; 3187 goto out_err_stop_timer;
3148 } 3188 }
3149 3189
@@ -3151,9 +3191,7 @@ static int try_smi_init(struct smi_info *new_smi)
3151 type_file_read_proc, 3191 type_file_read_proc,
3152 new_smi); 3192 new_smi);
3153 if (rv) { 3193 if (rv) {
3154 printk(KERN_ERR 3194 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3155 "ipmi_si: Unable to create proc entry: %d\n",
3156 rv);
3157 goto out_err_stop_timer; 3195 goto out_err_stop_timer;
3158 } 3196 }
3159 3197
@@ -3161,9 +3199,7 @@ static int try_smi_init(struct smi_info *new_smi)
3161 stat_file_read_proc, 3199 stat_file_read_proc,
3162 new_smi); 3200 new_smi);
3163 if (rv) { 3201 if (rv) {
3164 printk(KERN_ERR 3202 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3165 "ipmi_si: Unable to create proc entry: %d\n",
3166 rv);
3167 goto out_err_stop_timer; 3203 goto out_err_stop_timer;
3168 } 3204 }
3169 3205
@@ -3171,18 +3207,12 @@ static int try_smi_init(struct smi_info *new_smi)
3171 param_read_proc, 3207 param_read_proc,
3172 new_smi); 3208 new_smi);
3173 if (rv) { 3209 if (rv) {
3174 printk(KERN_ERR 3210 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3175 "ipmi_si: Unable to create proc entry: %d\n",
3176 rv);
3177 goto out_err_stop_timer; 3211 goto out_err_stop_timer;
3178 } 3212 }
3179 3213
3180 list_add_tail(&new_smi->link, &smi_infos); 3214 dev_info(new_smi->dev, "IPMI %s interface initialized\n",
3181 3215 si_to_str[new_smi->si_type]);
3182 mutex_unlock(&smi_infos_lock);
3183
3184 printk(KERN_INFO "IPMI %s interface initialized\n",
3185 si_to_str[new_smi->si_type]);
3186 3216
3187 return 0; 3217 return 0;
3188 3218
@@ -3191,11 +3221,17 @@ static int try_smi_init(struct smi_info *new_smi)
3191 wait_for_timer_and_thread(new_smi); 3221 wait_for_timer_and_thread(new_smi);
3192 3222
3193 out_err: 3223 out_err:
3194 if (new_smi->intf) 3224 new_smi->interrupt_disabled = 1;
3225
3226 if (new_smi->intf) {
3195 ipmi_unregister_smi(new_smi->intf); 3227 ipmi_unregister_smi(new_smi->intf);
3228 new_smi->intf = NULL;
3229 }
3196 3230
3197 if (new_smi->irq_cleanup) 3231 if (new_smi->irq_cleanup) {
3198 new_smi->irq_cleanup(new_smi); 3232 new_smi->irq_cleanup(new_smi);
3233 new_smi->irq_cleanup = NULL;
3234 }
3199 3235
3200 /* 3236 /*
3201 * Wait until we know that we are out of any interrupt 3237 * Wait until we know that we are out of any interrupt
@@ -3208,18 +3244,21 @@ static int try_smi_init(struct smi_info *new_smi)
3208 if (new_smi->handlers) 3244 if (new_smi->handlers)
3209 new_smi->handlers->cleanup(new_smi->si_sm); 3245 new_smi->handlers->cleanup(new_smi->si_sm);
3210 kfree(new_smi->si_sm); 3246 kfree(new_smi->si_sm);
3247 new_smi->si_sm = NULL;
3211 } 3248 }
3212 if (new_smi->addr_source_cleanup) 3249 if (new_smi->addr_source_cleanup) {
3213 new_smi->addr_source_cleanup(new_smi); 3250 new_smi->addr_source_cleanup(new_smi);
3214 if (new_smi->io_cleanup) 3251 new_smi->addr_source_cleanup = NULL;
3252 }
3253 if (new_smi->io_cleanup) {
3215 new_smi->io_cleanup(new_smi); 3254 new_smi->io_cleanup(new_smi);
3255 new_smi->io_cleanup = NULL;
3256 }
3216 3257
3217 if (new_smi->dev_registered) 3258 if (new_smi->dev_registered) {
3218 platform_device_unregister(new_smi->pdev); 3259 platform_device_unregister(new_smi->pdev);
3219 3260 new_smi->dev_registered = 0;
3220 kfree(new_smi); 3261 }
3221
3222 mutex_unlock(&smi_infos_lock);
3223 3262
3224 return rv; 3263 return rv;
3225} 3264}
@@ -3229,6 +3268,8 @@ static __devinit int init_ipmi_si(void)
3229 int i; 3268 int i;
3230 char *str; 3269 char *str;
3231 int rv; 3270 int rv;
3271 struct smi_info *e;
3272 enum ipmi_addr_src type = SI_INVALID;
3232 3273
3233 if (initialized) 3274 if (initialized)
3234 return 0; 3275 return 0;
@@ -3237,9 +3278,7 @@ static __devinit int init_ipmi_si(void)
3237 /* Register the device drivers. */ 3278 /* Register the device drivers. */
3238 rv = driver_register(&ipmi_driver.driver); 3279 rv = driver_register(&ipmi_driver.driver);
3239 if (rv) { 3280 if (rv) {
3240 printk(KERN_ERR 3281 printk(KERN_ERR PFX "Unable to register driver: %d\n", rv);
3241 "init_ipmi_si: Unable to register driver: %d\n",
3242 rv);
3243 return rv; 3282 return rv;
3244 } 3283 }
3245 3284
@@ -3263,38 +3302,81 @@ static __devinit int init_ipmi_si(void)
3263 3302
3264 hardcode_find_bmc(); 3303 hardcode_find_bmc();
3265 3304
3266#ifdef CONFIG_DMI 3305 /* If the user gave us a device, they presumably want us to use it */
3267 dmi_find_bmc(); 3306 mutex_lock(&smi_infos_lock);
3268#endif 3307 if (!list_empty(&smi_infos)) {
3308 mutex_unlock(&smi_infos_lock);
3309 return 0;
3310 }
3311 mutex_unlock(&smi_infos_lock);
3269 3312
3270#ifdef CONFIG_ACPI 3313#ifdef CONFIG_PCI
3271 spmi_find_bmc(); 3314 rv = pci_register_driver(&ipmi_pci_driver);
3315 if (rv)
3316 printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv);
3272#endif 3317#endif
3318
3273#ifdef CONFIG_ACPI 3319#ifdef CONFIG_ACPI
3274 pnp_register_driver(&ipmi_pnp_driver); 3320 pnp_register_driver(&ipmi_pnp_driver);
3275#endif 3321#endif
3276 3322
3277#ifdef CONFIG_PCI 3323#ifdef CONFIG_DMI
3278 rv = pci_register_driver(&ipmi_pci_driver); 3324 dmi_find_bmc();
3279 if (rv) 3325#endif
3280 printk(KERN_ERR 3326
3281 "init_ipmi_si: Unable to register PCI driver: %d\n", 3327#ifdef CONFIG_ACPI
3282 rv); 3328 spmi_find_bmc();
3283#endif 3329#endif
3284 3330
3285#ifdef CONFIG_PPC_OF 3331#ifdef CONFIG_PPC_OF
3286 of_register_platform_driver(&ipmi_of_platform_driver); 3332 of_register_platform_driver(&ipmi_of_platform_driver);
3287#endif 3333#endif
3288 3334
3335 /* We prefer devices with interrupts, but in the case of a machine
3336 with multiple BMCs we assume that there will be several instances
3337 of a given type so if we succeed in registering a type then also
3338 try to register everything else of the same type */
3339
3340 mutex_lock(&smi_infos_lock);
3341 list_for_each_entry(e, &smi_infos, link) {
3342 /* Try to register a device if it has an IRQ and we either
3343 haven't successfully registered a device yet or this
3344 device has the same type as one we successfully registered */
3345 if (e->irq && (!type || e->addr_source == type)) {
3346 if (!try_smi_init(e)) {
3347 type = e->addr_source;
3348 }
3349 }
3350 }
3351
3352 /* type will only have been set if we successfully registered an si */
3353 if (type) {
3354 mutex_unlock(&smi_infos_lock);
3355 return 0;
3356 }
3357
3358 /* Fall back to the preferred device */
3359
3360 list_for_each_entry(e, &smi_infos, link) {
3361 if (!e->irq && (!type || e->addr_source == type)) {
3362 if (!try_smi_init(e)) {
3363 type = e->addr_source;
3364 }
3365 }
3366 }
3367 mutex_unlock(&smi_infos_lock);
3368
3369 if (type)
3370 return 0;
3371
3289 if (si_trydefaults) { 3372 if (si_trydefaults) {
3290 mutex_lock(&smi_infos_lock); 3373 mutex_lock(&smi_infos_lock);
3291 if (list_empty(&smi_infos)) { 3374 if (list_empty(&smi_infos)) {
3292 /* No BMC was found, try defaults. */ 3375 /* No BMC was found, try defaults. */
3293 mutex_unlock(&smi_infos_lock); 3376 mutex_unlock(&smi_infos_lock);
3294 default_find_bmc(); 3377 default_find_bmc();
3295 } else { 3378 } else
3296 mutex_unlock(&smi_infos_lock); 3379 mutex_unlock(&smi_infos_lock);
3297 }
3298 } 3380 }
3299 3381
3300 mutex_lock(&smi_infos_lock); 3382 mutex_lock(&smi_infos_lock);
@@ -3308,8 +3390,8 @@ static __devinit int init_ipmi_si(void)
3308 of_unregister_platform_driver(&ipmi_of_platform_driver); 3390 of_unregister_platform_driver(&ipmi_of_platform_driver);
3309#endif 3391#endif
3310 driver_unregister(&ipmi_driver.driver); 3392 driver_unregister(&ipmi_driver.driver);
3311 printk(KERN_WARNING 3393 printk(KERN_WARNING PFX
3312 "ipmi_si: Unable to find any System Interface(s)\n"); 3394 "Unable to find any System Interface(s)\n");
3313 return -ENODEV; 3395 return -ENODEV;
3314 } else { 3396 } else {
3315 mutex_unlock(&smi_infos_lock); 3397 mutex_unlock(&smi_infos_lock);
@@ -3320,7 +3402,7 @@ module_init(init_ipmi_si);
3320 3402
3321static void cleanup_one_si(struct smi_info *to_clean) 3403static void cleanup_one_si(struct smi_info *to_clean)
3322{ 3404{
3323 int rv; 3405 int rv = 0;
3324 unsigned long flags; 3406 unsigned long flags;
3325 3407
3326 if (!to_clean) 3408 if (!to_clean)
@@ -3364,14 +3446,16 @@ static void cleanup_one_si(struct smi_info *to_clean)
3364 schedule_timeout_uninterruptible(1); 3446 schedule_timeout_uninterruptible(1);
3365 } 3447 }
3366 3448
3367 rv = ipmi_unregister_smi(to_clean->intf); 3449 if (to_clean->intf)
3450 rv = ipmi_unregister_smi(to_clean->intf);
3451
3368 if (rv) { 3452 if (rv) {
3369 printk(KERN_ERR 3453 printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
3370 "ipmi_si: Unable to unregister device: errno=%d\n",
3371 rv); 3454 rv);
3372 } 3455 }
3373 3456
3374 to_clean->handlers->cleanup(to_clean->si_sm); 3457 if (to_clean->handlers)
3458 to_clean->handlers->cleanup(to_clean->si_sm);
3375 3459
3376 kfree(to_clean->si_sm); 3460 kfree(to_clean->si_sm);
3377 3461
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index fdd37543aa79..02abfddce45a 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -287,12 +287,10 @@ static int register_device (int minor, struct pp_struct *pp)
287 char *name; 287 char *name;
288 int fl; 288 int fl;
289 289
290 name = kmalloc (strlen (CHRDEV) + 3, GFP_KERNEL); 290 name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
291 if (name == NULL) 291 if (name == NULL)
292 return -ENOMEM; 292 return -ENOMEM;
293 293
294 sprintf (name, CHRDEV "%x", minor);
295
296 port = parport_find_number (minor); 294 port = parport_find_number (minor);
297 if (!port) { 295 if (!port) {
298 printk (KERN_WARNING "%s: no associated port!\n", name); 296 printk (KERN_WARNING "%s: no associated port!\n", name);
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
new file mode 100644
index 000000000000..74f00b5ffa36
--- /dev/null
+++ b/drivers/char/ramoops.c
@@ -0,0 +1,162 @@
1/*
2 * RAM Oops/Panic logger
3 *
4 * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/kmsg_dump.h>
25#include <linux/time.h>
26#include <linux/io.h>
27#include <linux/ioport.h>
28
29#define RAMOOPS_KERNMSG_HDR "===="
30#define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval))
31
32#define RECORD_SIZE 4096
33
34static ulong mem_address;
35module_param(mem_address, ulong, 0400);
36MODULE_PARM_DESC(mem_address,
37 "start of reserved RAM used to store oops/panic logs");
38
39static ulong mem_size;
40module_param(mem_size, ulong, 0400);
41MODULE_PARM_DESC(mem_size,
42 "size of reserved RAM used to store oops/panic logs");
43
44static int dump_oops = 1;
45module_param(dump_oops, int, 0600);
46MODULE_PARM_DESC(dump_oops,
47 "set to 1 to dump oopses, 0 to only dump panics (default 1)");
48
49static struct ramoops_context {
50 struct kmsg_dumper dump;
51 void *virt_addr;
52 phys_addr_t phys_addr;
53 unsigned long size;
54 int count;
55 int max_count;
56} oops_cxt;
57
58static void ramoops_do_dump(struct kmsg_dumper *dumper,
59 enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
60 const char *s2, unsigned long l2)
61{
62 struct ramoops_context *cxt = container_of(dumper,
63 struct ramoops_context, dump);
64 unsigned long s1_start, s2_start;
65 unsigned long l1_cpy, l2_cpy;
66 int res;
67 char *buf;
68 struct timeval timestamp;
69
70 /* Only dump oopses if dump_oops is set */
71 if (reason == KMSG_DUMP_OOPS && !dump_oops)
72 return;
73
74 buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
75 memset(buf, '\0', RECORD_SIZE);
76 res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
77 buf += res;
78 do_gettimeofday(&timestamp);
79 res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec);
80 buf += res;
81
82 l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE));
83 l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy);
84
85 s2_start = l2 - l2_cpy;
86 s1_start = l1 - l1_cpy;
87
88 memcpy(buf, s1 + s1_start, l1_cpy);
89 memcpy(buf + l1_cpy, s2 + s2_start, l2_cpy);
90
91 cxt->count = (cxt->count + 1) % cxt->max_count;
92}
93
94static int __init ramoops_init(void)
95{
96 struct ramoops_context *cxt = &oops_cxt;
97 int err = -EINVAL;
98
99 if (!mem_size) {
100 printk(KERN_ERR "ramoops: invalid size specification");
101 goto fail3;
102 }
103
104 rounddown_pow_of_two(mem_size);
105
106 if (mem_size < RECORD_SIZE) {
107 printk(KERN_ERR "ramoops: size too small");
108 goto fail3;
109 }
110
111 cxt->max_count = mem_size / RECORD_SIZE;
112 cxt->count = 0;
113 cxt->size = mem_size;
114 cxt->phys_addr = mem_address;
115
116 if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
117 printk(KERN_ERR "ramoops: request mem region failed");
118 err = -EINVAL;
119 goto fail3;
120 }
121
122 cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size);
123 if (!cxt->virt_addr) {
124 printk(KERN_ERR "ramoops: ioremap failed");
125 goto fail2;
126 }
127
128 cxt->dump.dump = ramoops_do_dump;
129 err = kmsg_dump_register(&cxt->dump);
130 if (err) {
131 printk(KERN_ERR "ramoops: registering kmsg dumper failed");
132 goto fail1;
133 }
134
135 return 0;
136
137fail1:
138 iounmap(cxt->virt_addr);
139fail2:
140 release_mem_region(cxt->phys_addr, cxt->size);
141fail3:
142 return err;
143}
144
145static void __exit ramoops_exit(void)
146{
147 struct ramoops_context *cxt = &oops_cxt;
148
149 if (kmsg_dump_unregister(&cxt->dump) < 0)
150 printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper");
151
152 iounmap(cxt->virt_addr);
153 release_mem_region(cxt->phys_addr, cxt->size);
154}
155
156
157module_init(ramoops_init);
158module_exit(ramoops_exit);
159
160MODULE_LICENSE("GPL");
161MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>");
162MODULE_DESCRIPTION("RAM Oops/Panic logger/driver");
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index bd1d1164fec5..7cdb6ee569cd 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -3967,13 +3967,9 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
3967 font.charcount = op->charcount; 3967 font.charcount = op->charcount;
3968 font.height = op->height; 3968 font.height = op->height;
3969 font.width = op->width; 3969 font.width = op->width;
3970 font.data = kmalloc(size, GFP_KERNEL); 3970 font.data = memdup_user(op->data, size);
3971 if (!font.data) 3971 if (IS_ERR(font.data))
3972 return -ENOMEM; 3972 return PTR_ERR(font.data);
3973 if (copy_from_user(font.data, op->data, size)) {
3974 kfree(font.data);
3975 return -EFAULT;
3976 }
3977 acquire_console_sem(); 3973 acquire_console_sem();
3978 if (vc->vc_sw->con_font_set) 3974 if (vc->vc_sw->con_font_set)
3979 rc = vc->vc_sw->con_font_set(vc, &font, op->flags); 3975 rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index adc10a2ac5f6..996c1bdb5a34 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -774,7 +774,7 @@ static void i5000_clear_error(struct mem_ctl_info *mci)
774static void i5000_check_error(struct mem_ctl_info *mci) 774static void i5000_check_error(struct mem_ctl_info *mci)
775{ 775{
776 struct i5000_error_info info; 776 struct i5000_error_info info;
777 debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 777 debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
778 i5000_get_error_info(mci, &info); 778 i5000_get_error_info(mci, &info);
779 i5000_process_error_info(mci, &info, 1); 779 i5000_process_error_info(mci, &info, 1);
780} 780}
@@ -1353,8 +1353,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1353 int num_dimms_per_channel; 1353 int num_dimms_per_channel;
1354 int num_csrows; 1354 int num_csrows;
1355 1355
1356 debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1356 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
1357 __func__, 1357 __FILE__, __func__,
1358 pdev->bus->number, 1358 pdev->bus->number,
1359 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1359 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1360 1360
@@ -1389,7 +1389,7 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1389 return -ENOMEM; 1389 return -ENOMEM;
1390 1390
1391 kobject_get(&mci->edac_mci_kobj); 1391 kobject_get(&mci->edac_mci_kobj);
1392 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 1392 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
1393 1393
1394 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1394 mci->dev = &pdev->dev; /* record ptr to the generic device */
1395 1395
@@ -1432,8 +1432,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1432 1432
1433 /* add this new MC control structure to EDAC's list of MCs */ 1433 /* add this new MC control structure to EDAC's list of MCs */
1434 if (edac_mc_add_mc(mci)) { 1434 if (edac_mc_add_mc(mci)) {
1435 debugf0("MC: " __FILE__ 1435 debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
1436 ": %s(): failed edac_mc_add_mc()\n", __func__); 1436 __FILE__, __func__);
1437 /* FIXME: perhaps some code should go here that disables error 1437 /* FIXME: perhaps some code should go here that disables error
1438 * reporting if we just enabled it 1438 * reporting if we just enabled it
1439 */ 1439 */
@@ -1478,7 +1478,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev,
1478{ 1478{
1479 int rc; 1479 int rc;
1480 1480
1481 debugf0("MC: " __FILE__ ": %s()\n", __func__); 1481 debugf0("MC: %s: %s()\n", __FILE__, __func__);
1482 1482
1483 /* wake up device */ 1483 /* wake up device */
1484 rc = pci_enable_device(pdev); 1484 rc = pci_enable_device(pdev);
@@ -1497,7 +1497,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
1497{ 1497{
1498 struct mem_ctl_info *mci; 1498 struct mem_ctl_info *mci;
1499 1499
1500 debugf0(__FILE__ ": %s()\n", __func__); 1500 debugf0("%s: %s()\n", __FILE__, __func__);
1501 1501
1502 if (i5000_pci) 1502 if (i5000_pci)
1503 edac_pci_release_generic_ctl(i5000_pci); 1503 edac_pci_release_generic_ctl(i5000_pci);
@@ -1544,7 +1544,7 @@ static int __init i5000_init(void)
1544{ 1544{
1545 int pci_rc; 1545 int pci_rc;
1546 1546
1547 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1547 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1548 1548
1549 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1549 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1550 opstate_init(); 1550 opstate_init();
@@ -1560,7 +1560,7 @@ static int __init i5000_init(void)
1560 */ 1560 */
1561static void __exit i5000_exit(void) 1561static void __exit i5000_exit(void)
1562{ 1562{
1563 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1563 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1564 pci_unregister_driver(&i5000_driver); 1564 pci_unregister_driver(&i5000_driver);
1565} 1565}
1566 1566
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index f99d10655ed4..010c1d6526f5 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -694,7 +694,7 @@ static void i5400_clear_error(struct mem_ctl_info *mci)
694static void i5400_check_error(struct mem_ctl_info *mci) 694static void i5400_check_error(struct mem_ctl_info *mci)
695{ 695{
696 struct i5400_error_info info; 696 struct i5400_error_info info;
697 debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 697 debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
698 i5400_get_error_info(mci, &info); 698 i5400_get_error_info(mci, &info);
699 i5400_process_error_info(mci, &info); 699 i5400_process_error_info(mci, &info);
700} 700}
@@ -1227,8 +1227,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1227 if (dev_idx >= ARRAY_SIZE(i5400_devs)) 1227 if (dev_idx >= ARRAY_SIZE(i5400_devs))
1228 return -EINVAL; 1228 return -EINVAL;
1229 1229
1230 debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1230 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
1231 __func__, 1231 __FILE__, __func__,
1232 pdev->bus->number, 1232 pdev->bus->number,
1233 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1233 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1234 1234
@@ -1256,7 +1256,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1256 if (mci == NULL) 1256 if (mci == NULL)
1257 return -ENOMEM; 1257 return -ENOMEM;
1258 1258
1259 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 1259 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
1260 1260
1261 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1261 mci->dev = &pdev->dev; /* record ptr to the generic device */
1262 1262
@@ -1299,8 +1299,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1299 1299
1300 /* add this new MC control structure to EDAC's list of MCs */ 1300 /* add this new MC control structure to EDAC's list of MCs */
1301 if (edac_mc_add_mc(mci)) { 1301 if (edac_mc_add_mc(mci)) {
1302 debugf0("MC: " __FILE__ 1302 debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
1303 ": %s(): failed edac_mc_add_mc()\n", __func__); 1303 __FILE__, __func__);
1304 /* FIXME: perhaps some code should go here that disables error 1304 /* FIXME: perhaps some code should go here that disables error
1305 * reporting if we just enabled it 1305 * reporting if we just enabled it
1306 */ 1306 */
@@ -1344,7 +1344,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev,
1344{ 1344{
1345 int rc; 1345 int rc;
1346 1346
1347 debugf0("MC: " __FILE__ ": %s()\n", __func__); 1347 debugf0("MC: %s: %s()\n", __FILE__, __func__);
1348 1348
1349 /* wake up device */ 1349 /* wake up device */
1350 rc = pci_enable_device(pdev); 1350 rc = pci_enable_device(pdev);
@@ -1363,7 +1363,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
1363{ 1363{
1364 struct mem_ctl_info *mci; 1364 struct mem_ctl_info *mci;
1365 1365
1366 debugf0(__FILE__ ": %s()\n", __func__); 1366 debugf0("%s: %s()\n", __FILE__, __func__);
1367 1367
1368 if (i5400_pci) 1368 if (i5400_pci)
1369 edac_pci_release_generic_ctl(i5400_pci); 1369 edac_pci_release_generic_ctl(i5400_pci);
@@ -1409,7 +1409,7 @@ static int __init i5400_init(void)
1409{ 1409{
1410 int pci_rc; 1410 int pci_rc;
1411 1411
1412 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1412 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1413 1413
1414 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1414 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1415 opstate_init(); 1415 opstate_init();
@@ -1425,7 +1425,7 @@ static int __init i5400_init(void)
1425 */ 1425 */
1426static void __exit i5400_exit(void) 1426static void __exit i5400_exit(void)
1427{ 1427{
1428 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1428 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1429 pci_unregister_driver(&i5400_driver); 1429 pci_unregister_driver(&i5400_driver);
1430} 1430}
1431 1431
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 2bf2c5051bfe..a2fa1feed724 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -178,7 +178,7 @@ static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
178{ 178{
179 struct i82443bxgx_edacmc_error_info info; 179 struct i82443bxgx_edacmc_error_info info;
180 180
181 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 181 debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
182 i82443bxgx_edacmc_get_error_info(mci, &info); 182 i82443bxgx_edacmc_get_error_info(mci, &info);
183 i82443bxgx_edacmc_process_error_info(mci, &info, 1); 183 i82443bxgx_edacmc_process_error_info(mci, &info, 1);
184} 184}
@@ -198,13 +198,13 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
198 for (index = 0; index < mci->nr_csrows; index++) { 198 for (index = 0; index < mci->nr_csrows; index++) {
199 csrow = &mci->csrows[index]; 199 csrow = &mci->csrows[index];
200 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); 200 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
201 debugf1("MC%d: " __FILE__ ": %s() Row=%d DRB = %#0x\n", 201 debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
202 mci->mc_idx, __func__, index, drbar); 202 mci->mc_idx, __FILE__, __func__, index, drbar);
203 row_high_limit = ((u32) drbar << 23); 203 row_high_limit = ((u32) drbar << 23);
204 /* find the DRAM Chip Select Base address and mask */ 204 /* find the DRAM Chip Select Base address and mask */
205 debugf1("MC%d: " __FILE__ ": %s() Row=%d, " 205 debugf1("MC%d: %s: %s() Row=%d, "
206 "Boundry Address=%#0x, Last = %#0x \n", 206 "Boundry Address=%#0x, Last = %#0x\n",
207 mci->mc_idx, __func__, index, row_high_limit, 207 mci->mc_idx, __FILE__, __func__, index, row_high_limit,
208 row_high_limit_last); 208 row_high_limit_last);
209 209
210 /* 440GX goes to 2GB, represented with a DRB of 0. */ 210 /* 440GX goes to 2GB, represented with a DRB of 0. */
@@ -237,7 +237,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
237 enum mem_type mtype; 237 enum mem_type mtype;
238 enum edac_type edac_mode; 238 enum edac_type edac_mode;
239 239
240 debugf0("MC: " __FILE__ ": %s()\n", __func__); 240 debugf0("MC: %s: %s()\n", __FILE__, __func__);
241 241
242 /* Something is really hosed if PCI config space reads from 242 /* Something is really hosed if PCI config space reads from
243 * the MC aren't working. 243 * the MC aren't working.
@@ -250,7 +250,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
250 if (mci == NULL) 250 if (mci == NULL)
251 return -ENOMEM; 251 return -ENOMEM;
252 252
253 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 253 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
254 mci->dev = &pdev->dev; 254 mci->dev = &pdev->dev;
255 mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR; 255 mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
256 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 256 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
@@ -336,7 +336,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
336 __func__); 336 __func__);
337 } 337 }
338 338
339 debugf3("MC: " __FILE__ ": %s(): success\n", __func__); 339 debugf3("MC: %s: %s(): success\n", __FILE__, __func__);
340 return 0; 340 return 0;
341 341
342fail: 342fail:
@@ -352,7 +352,7 @@ static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
352{ 352{
353 int rc; 353 int rc;
354 354
355 debugf0("MC: " __FILE__ ": %s()\n", __func__); 355 debugf0("MC: %s: %s()\n", __FILE__, __func__);
356 356
357 /* don't need to call pci_enable_device() */ 357 /* don't need to call pci_enable_device() */
358 rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data); 358 rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
@@ -367,7 +367,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
367{ 367{
368 struct mem_ctl_info *mci; 368 struct mem_ctl_info *mci;
369 369
370 debugf0(__FILE__ ": %s()\n", __func__); 370 debugf0("%s: %s()\n", __FILE__, __func__);
371 371
372 if (i82443bxgx_pci) 372 if (i82443bxgx_pci)
373 edac_pci_release_generic_ctl(i82443bxgx_pci); 373 edac_pci_release_generic_ctl(i82443bxgx_pci);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index fee678f74a19..4fd0f276df5a 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -139,6 +139,13 @@ config GPIO_MAX732X
139 Board setup code must specify the model to use, and the start 139 Board setup code must specify the model to use, and the start
140 number for these GPIOs. 140 number for these GPIOs.
141 141
142config GPIO_MAX732X_IRQ
143 bool "Interrupt controller support for MAX732x"
144 depends on GPIO_MAX732X=y && GENERIC_HARDIRQS
145 help
146 Say yes here to enable the max732x to be used as an interrupt
147 controller. It requires the driver to be built in the kernel.
148
142config GPIO_PCA953X 149config GPIO_PCA953X
143 tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports" 150 tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports"
144 depends on I2C 151 depends on I2C
@@ -264,10 +271,10 @@ config GPIO_BT8XX
264 If unsure, say N. 271 If unsure, say N.
265 272
266config GPIO_LANGWELL 273config GPIO_LANGWELL
267 bool "Intel Moorestown Platform Langwell GPIO support" 274 bool "Intel Langwell/Penwell GPIO support"
268 depends on PCI 275 depends on PCI
269 help 276 help
270 Say Y here to support Intel Moorestown platform GPIO. 277 Say Y here to support Intel Langwell/Penwell GPIO.
271 278
272config GPIO_TIMBERDALE 279config GPIO_TIMBERDALE
273 bool "Support for timberdale GPIO IP" 280 bool "Support for timberdale GPIO IP"
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index 0c3c498f2260..f73a1555e49d 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -197,7 +197,7 @@ static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val)
197 return 0; 197 return 0;
198} 198}
199 199
200static char *cs5535_gpio_names[] = { 200static const char * const cs5535_gpio_names[] = {
201 "GPIO0", "GPIO1", "GPIO2", "GPIO3", 201 "GPIO0", "GPIO1", "GPIO2", "GPIO3",
202 "GPIO4", "GPIO5", "GPIO6", "GPIO7", 202 "GPIO4", "GPIO5", "GPIO6", "GPIO7",
203 "GPIO8", "GPIO9", "GPIO10", "GPIO11", 203 "GPIO8", "GPIO9", "GPIO10", "GPIO11",
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index cae1b8c5b08c..3ca36542e338 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -722,7 +722,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
722 unsigned long flags; 722 unsigned long flags;
723 struct gpio_desc *desc; 723 struct gpio_desc *desc;
724 int status = -EINVAL; 724 int status = -EINVAL;
725 char *ioname = NULL; 725 const char *ioname = NULL;
726 726
727 /* can't export until sysfs is available ... */ 727 /* can't export until sysfs is available ... */
728 if (!gpio_class.p) { 728 if (!gpio_class.p) {
@@ -753,7 +753,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
753 struct device *dev; 753 struct device *dev;
754 754
755 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), 755 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
756 desc, ioname ? ioname : "gpio%d", gpio); 756 desc, ioname ? ioname : "gpio%u", gpio);
757 if (!IS_ERR(dev)) { 757 if (!IS_ERR(dev)) {
758 status = sysfs_create_group(&dev->kobj, 758 status = sysfs_create_group(&dev->kobj,
759 &gpio_attr_group); 759 &gpio_attr_group);
@@ -1106,7 +1106,7 @@ unlock:
1106fail: 1106fail:
1107 /* failures here can mean systems won't boot... */ 1107 /* failures here can mean systems won't boot... */
1108 if (status) 1108 if (status)
1109 pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n", 1109 pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
1110 chip->base, chip->base + chip->ngpio - 1, 1110 chip->base, chip->base + chip->ngpio - 1,
1111 chip->label ? : "generic"); 1111 chip->label ? : "generic");
1112 return status; 1112 return status;
@@ -1447,6 +1447,49 @@ fail:
1447} 1447}
1448EXPORT_SYMBOL_GPL(gpio_direction_output); 1448EXPORT_SYMBOL_GPL(gpio_direction_output);
1449 1449
1450/**
1451 * gpio_set_debounce - sets @debounce time for a @gpio
1452 * @gpio: the gpio to set debounce time
1453 * @debounce: debounce time is microseconds
1454 */
1455int gpio_set_debounce(unsigned gpio, unsigned debounce)
1456{
1457 unsigned long flags;
1458 struct gpio_chip *chip;
1459 struct gpio_desc *desc = &gpio_desc[gpio];
1460 int status = -EINVAL;
1461
1462 spin_lock_irqsave(&gpio_lock, flags);
1463
1464 if (!gpio_is_valid(gpio))
1465 goto fail;
1466 chip = desc->chip;
1467 if (!chip || !chip->set || !chip->set_debounce)
1468 goto fail;
1469 gpio -= chip->base;
1470 if (gpio >= chip->ngpio)
1471 goto fail;
1472 status = gpio_ensure_requested(desc, gpio);
1473 if (status < 0)
1474 goto fail;
1475
1476 /* now we know the gpio is valid and chip won't vanish */
1477
1478 spin_unlock_irqrestore(&gpio_lock, flags);
1479
1480 might_sleep_if(extra_checks && chip->can_sleep);
1481
1482 return chip->set_debounce(chip, gpio, debounce);
1483
1484fail:
1485 spin_unlock_irqrestore(&gpio_lock, flags);
1486 if (status)
1487 pr_debug("%s: gpio-%d status %d\n",
1488 __func__, gpio, status);
1489
1490 return status;
1491}
1492EXPORT_SYMBOL_GPL(gpio_set_debounce);
1450 1493
1451/* I/O calls are only valid after configuration completed; the relevant 1494/* I/O calls are only valid after configuration completed; the relevant
1452 * "is this a valid GPIO" error checks should already have been done. 1495 * "is this a valid GPIO" error checks should already have been done.
diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/it8761e_gpio.c
index 41a9388f2fde..48fc43c4bdd1 100644
--- a/drivers/gpio/it8761e_gpio.c
+++ b/drivers/gpio/it8761e_gpio.c
@@ -217,7 +217,10 @@ gpiochip_add_err:
217static void __exit it8761e_gpio_exit(void) 217static void __exit it8761e_gpio_exit(void)
218{ 218{
219 if (gpio_ba) { 219 if (gpio_ba) {
220 gpiochip_remove(&it8761e_gpio_chip); 220 int ret = gpiochip_remove(&it8761e_gpio_chip);
221
222 WARN(ret, "%s(): gpiochip_remove() failed, ret=%d\n",
223 __func__, ret);
221 224
222 release_region(gpio_ba, GPIO_IOSIZE); 225 release_region(gpio_ba, GPIO_IOSIZE);
223 gpio_ba = 0; 226 gpio_ba = 0;
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 00c3a14127af..8383a8d7f994 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -17,6 +17,7 @@
17 17
18/* Supports: 18/* Supports:
19 * Moorestown platform Langwell chip. 19 * Moorestown platform Langwell chip.
20 * Medfield platform Penwell chip.
20 */ 21 */
21 22
22#include <linux/module.h> 23#include <linux/module.h>
@@ -31,44 +32,65 @@
31#include <linux/gpio.h> 32#include <linux/gpio.h>
32#include <linux/slab.h> 33#include <linux/slab.h>
33 34
34struct lnw_gpio_register { 35/*
35 u32 GPLR[2]; 36 * Langwell chip has 64 pins and thus there are 2 32bit registers to control
36 u32 GPDR[2]; 37 * each feature, while Penwell chip has 96 pins for each block, and need 3 32bit
37 u32 GPSR[2]; 38 * registers to control them, so we only define the order here instead of a
38 u32 GPCR[2]; 39 * structure, to get a bit offset for a pin (use GPDR as an example):
39 u32 GRER[2]; 40 *
40 u32 GFER[2]; 41 * nreg = ngpio / 32;
41 u32 GEDR[2]; 42 * reg = offset / 32;
43 * bit = offset % 32;
44 * reg_addr = reg_base + GPDR * nreg * 4 + reg * 4;
45 *
46 * so the bit of reg_addr is to control pin offset's GPDR feature
47*/
48
49enum GPIO_REG {
50 GPLR = 0, /* pin level read-only */
51 GPDR, /* pin direction */
52 GPSR, /* pin set */
53 GPCR, /* pin clear */
54 GRER, /* rising edge detect */
55 GFER, /* falling edge detect */
56 GEDR, /* edge detect result */
42}; 57};
43 58
44struct lnw_gpio { 59struct lnw_gpio {
45 struct gpio_chip chip; 60 struct gpio_chip chip;
46 struct lnw_gpio_register *reg_base; 61 void *reg_base;
47 spinlock_t lock; 62 spinlock_t lock;
48 unsigned irq_base; 63 unsigned irq_base;
49}; 64};
50 65
51static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset) 66static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
67 enum GPIO_REG reg_type)
52{ 68{
53 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); 69 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
70 unsigned nreg = chip->ngpio / 32;
54 u8 reg = offset / 32; 71 u8 reg = offset / 32;
55 void __iomem *gplr; 72 void __iomem *ptr;
73
74 ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4);
75 return ptr;
76}
77
78static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
79{
80 void __iomem *gplr = gpio_reg(chip, offset, GPLR);
56 81
57 gplr = (void __iomem *)(&lnw->reg_base->GPLR[reg]);
58 return readl(gplr) & BIT(offset % 32); 82 return readl(gplr) & BIT(offset % 32);
59} 83}
60 84
61static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 85static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
62{ 86{
63 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
64 u8 reg = offset / 32;
65 void __iomem *gpsr, *gpcr; 87 void __iomem *gpsr, *gpcr;
66 88
67 if (value) { 89 if (value) {
68 gpsr = (void __iomem *)(&lnw->reg_base->GPSR[reg]); 90 gpsr = gpio_reg(chip, offset, GPSR);
69 writel(BIT(offset % 32), gpsr); 91 writel(BIT(offset % 32), gpsr);
70 } else { 92 } else {
71 gpcr = (void __iomem *)(&lnw->reg_base->GPCR[reg]); 93 gpcr = gpio_reg(chip, offset, GPCR);
72 writel(BIT(offset % 32), gpcr); 94 writel(BIT(offset % 32), gpcr);
73 } 95 }
74} 96}
@@ -76,12 +98,10 @@ static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
76static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 98static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
77{ 99{
78 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); 100 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
79 u8 reg = offset / 32; 101 void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
80 u32 value; 102 u32 value;
81 unsigned long flags; 103 unsigned long flags;
82 void __iomem *gpdr;
83 104
84 gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
85 spin_lock_irqsave(&lnw->lock, flags); 105 spin_lock_irqsave(&lnw->lock, flags);
86 value = readl(gpdr); 106 value = readl(gpdr);
87 value &= ~BIT(offset % 32); 107 value &= ~BIT(offset % 32);
@@ -94,12 +114,10 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
94 unsigned offset, int value) 114 unsigned offset, int value)
95{ 115{
96 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); 116 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
97 u8 reg = offset / 32; 117 void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
98 unsigned long flags; 118 unsigned long flags;
99 void __iomem *gpdr;
100 119
101 lnw_gpio_set(chip, offset, value); 120 lnw_gpio_set(chip, offset, value);
102 gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
103 spin_lock_irqsave(&lnw->lock, flags); 121 spin_lock_irqsave(&lnw->lock, flags);
104 value = readl(gpdr); 122 value = readl(gpdr);
105 value |= BIT(offset % 32);; 123 value |= BIT(offset % 32);;
@@ -118,11 +136,10 @@ static int lnw_irq_type(unsigned irq, unsigned type)
118{ 136{
119 struct lnw_gpio *lnw = get_irq_chip_data(irq); 137 struct lnw_gpio *lnw = get_irq_chip_data(irq);
120 u32 gpio = irq - lnw->irq_base; 138 u32 gpio = irq - lnw->irq_base;
121 u8 reg = gpio / 32;
122 unsigned long flags; 139 unsigned long flags;
123 u32 value; 140 u32 value;
124 void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]); 141 void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
125 void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]); 142 void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
126 143
127 if (gpio >= lnw->chip.ngpio) 144 if (gpio >= lnw->chip.ngpio)
128 return -EINVAL; 145 return -EINVAL;
@@ -158,8 +175,10 @@ static struct irq_chip lnw_irqchip = {
158 .set_type = lnw_irq_type, 175 .set_type = lnw_irq_type,
159}; 176};
160 177
161static struct pci_device_id lnw_gpio_ids[] = { 178static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = { /* pin number */
162 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f) }, 179 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 },
180 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 },
181 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 },
163 { 0, } 182 { 0, }
164}; 183};
165MODULE_DEVICE_TABLE(pci, lnw_gpio_ids); 184MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
@@ -167,17 +186,17 @@ MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
167static void lnw_irq_handler(unsigned irq, struct irq_desc *desc) 186static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
168{ 187{
169 struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq); 188 struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq);
170 u32 reg, gpio; 189 u32 base, gpio;
171 void __iomem *gedr; 190 void __iomem *gedr;
172 u32 gedr_v; 191 u32 gedr_v;
173 192
174 /* check GPIO controller to check which pin triggered the interrupt */ 193 /* check GPIO controller to check which pin triggered the interrupt */
175 for (reg = 0; reg < lnw->chip.ngpio / 32; reg++) { 194 for (base = 0; base < lnw->chip.ngpio; base += 32) {
176 gedr = (void __iomem *)(&lnw->reg_base->GEDR[reg]); 195 gedr = gpio_reg(&lnw->chip, base, GEDR);
177 gedr_v = readl(gedr); 196 gedr_v = readl(gedr);
178 if (!gedr_v) 197 if (!gedr_v)
179 continue; 198 continue;
180 for (gpio = reg*32; gpio < reg*32+32; gpio++) 199 for (gpio = base; gpio < base + 32; gpio++)
181 if (gedr_v & BIT(gpio % 32)) { 200 if (gedr_v & BIT(gpio % 32)) {
182 pr_debug("pin %d triggered\n", gpio); 201 pr_debug("pin %d triggered\n", gpio);
183 generic_handle_irq(lnw->irq_base + gpio); 202 generic_handle_irq(lnw->irq_base + gpio);
@@ -245,7 +264,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
245 lnw->chip.set = lnw_gpio_set; 264 lnw->chip.set = lnw_gpio_set;
246 lnw->chip.to_irq = lnw_gpio_to_irq; 265 lnw->chip.to_irq = lnw_gpio_to_irq;
247 lnw->chip.base = gpio_base; 266 lnw->chip.base = gpio_base;
248 lnw->chip.ngpio = 64; 267 lnw->chip.ngpio = id->driver_data;
249 lnw->chip.can_sleep = 0; 268 lnw->chip.can_sleep = 0;
250 pci_set_drvdata(pdev, lnw); 269 pci_set_drvdata(pdev, lnw);
251 retval = gpiochip_add(&lnw->chip); 270 retval = gpiochip_add(&lnw->chip);
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
index f7868243af89..9cad60f9e962 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/max732x.c
@@ -17,7 +17,8 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20 20#include <linux/interrupt.h>
21#include <linux/irq.h>
21#include <linux/i2c.h> 22#include <linux/i2c.h>
22#include <linux/i2c/max732x.h> 23#include <linux/i2c/max732x.h>
23 24
@@ -31,7 +32,8 @@
31 * - Open Drain I/O 32 * - Open Drain I/O
32 * 33 *
33 * designated by 'O', 'I' and 'P' individually according to MAXIM's 34 * designated by 'O', 'I' and 'P' individually according to MAXIM's
34 * datasheets. 35 * datasheets. 'I' and 'P' ports are interrupt capables, some with
36 * a dedicated interrupt mask.
35 * 37 *
36 * There are two groups of I/O ports, each group usually includes 38 * There are two groups of I/O ports, each group usually includes
37 * up to 8 I/O ports, and is accessed by a specific I2C address: 39 * up to 8 I/O ports, and is accessed by a specific I2C address:
@@ -44,7 +46,8 @@
44 * 46 *
45 * Within each group of ports, there are five known combinations of 47 * Within each group of ports, there are five known combinations of
46 * I/O ports: 4I4O, 4P4O, 8I, 8P, 8O, see the definitions below for 48 * I/O ports: 4I4O, 4P4O, 8I, 8P, 8O, see the definitions below for
47 * the detailed organization of these ports. 49 * the detailed organization of these ports. Only Goup A is interrupt
50 * capable.
48 * 51 *
49 * GPIO numbers start from 'gpio_base + 0' to 'gpio_base + 8/16', 52 * GPIO numbers start from 'gpio_base + 0' to 'gpio_base + 8/16',
50 * and GPIOs from GROUP_A are numbered before those from GROUP_B 53 * and GPIOs from GROUP_A are numbered before those from GROUP_B
@@ -68,16 +71,47 @@
68#define GROUP_A(x) ((x) & 0xffff) /* I2C Addr: 0b'110xxxx */ 71#define GROUP_A(x) ((x) & 0xffff) /* I2C Addr: 0b'110xxxx */
69#define GROUP_B(x) ((x) << 16) /* I2C Addr: 0b'101xxxx */ 72#define GROUP_B(x) ((x) << 16) /* I2C Addr: 0b'101xxxx */
70 73
74#define INT_NONE 0x0 /* No interrupt capability */
75#define INT_NO_MASK 0x1 /* Has interrupts, no mask */
76#define INT_INDEP_MASK 0x2 /* Has interrupts, independent mask */
77#define INT_MERGED_MASK 0x3 /* Has interrupts, merged mask */
78
79#define INT_CAPS(x) (((uint64_t)(x)) << 32)
80
81enum {
82 MAX7319,
83 MAX7320,
84 MAX7321,
85 MAX7322,
86 MAX7323,
87 MAX7324,
88 MAX7325,
89 MAX7326,
90 MAX7327,
91};
92
93static uint64_t max732x_features[] = {
94 [MAX7319] = GROUP_A(IO_8I) | INT_CAPS(INT_MERGED_MASK),
95 [MAX7320] = GROUP_B(IO_8O),
96 [MAX7321] = GROUP_A(IO_8P) | INT_CAPS(INT_NO_MASK),
97 [MAX7322] = GROUP_A(IO_4I4O) | INT_CAPS(INT_MERGED_MASK),
98 [MAX7323] = GROUP_A(IO_4P4O) | INT_CAPS(INT_INDEP_MASK),
99 [MAX7324] = GROUP_A(IO_8I) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
100 [MAX7325] = GROUP_A(IO_8P) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
101 [MAX7326] = GROUP_A(IO_4I4O) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
102 [MAX7327] = GROUP_A(IO_4P4O) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
103};
104
71static const struct i2c_device_id max732x_id[] = { 105static const struct i2c_device_id max732x_id[] = {
72 { "max7319", GROUP_A(IO_8I) }, 106 { "max7319", MAX7319 },
73 { "max7320", GROUP_B(IO_8O) }, 107 { "max7320", MAX7320 },
74 { "max7321", GROUP_A(IO_8P) }, 108 { "max7321", MAX7321 },
75 { "max7322", GROUP_A(IO_4I4O) }, 109 { "max7322", MAX7322 },
76 { "max7323", GROUP_A(IO_4P4O) }, 110 { "max7323", MAX7323 },
77 { "max7324", GROUP_A(IO_8I) | GROUP_B(IO_8O) }, 111 { "max7324", MAX7324 },
78 { "max7325", GROUP_A(IO_8P) | GROUP_B(IO_8O) }, 112 { "max7325", MAX7325 },
79 { "max7326", GROUP_A(IO_4I4O) | GROUP_B(IO_8O) }, 113 { "max7326", MAX7326 },
80 { "max7327", GROUP_A(IO_4P4O) | GROUP_B(IO_8O) }, 114 { "max7327", MAX7327 },
81 { }, 115 { },
82}; 116};
83MODULE_DEVICE_TABLE(i2c, max732x_id); 117MODULE_DEVICE_TABLE(i2c, max732x_id);
@@ -96,9 +130,19 @@ struct max732x_chip {
96 130
97 struct mutex lock; 131 struct mutex lock;
98 uint8_t reg_out[2]; 132 uint8_t reg_out[2];
133
134#ifdef CONFIG_GPIO_MAX732X_IRQ
135 struct mutex irq_lock;
136 int irq_base;
137 uint8_t irq_mask;
138 uint8_t irq_mask_cur;
139 uint8_t irq_trig_raise;
140 uint8_t irq_trig_fall;
141 uint8_t irq_features;
142#endif
99}; 143};
100 144
101static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val) 145static int max732x_writeb(struct max732x_chip *chip, int group_a, uint8_t val)
102{ 146{
103 struct i2c_client *client; 147 struct i2c_client *client;
104 int ret; 148 int ret;
@@ -113,7 +157,7 @@ static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val)
113 return 0; 157 return 0;
114} 158}
115 159
116static int max732x_read(struct max732x_chip *chip, int group_a, uint8_t *val) 160static int max732x_readb(struct max732x_chip *chip, int group_a, uint8_t *val)
117{ 161{
118 struct i2c_client *client; 162 struct i2c_client *client;
119 int ret; 163 int ret;
@@ -142,7 +186,7 @@ static int max732x_gpio_get_value(struct gpio_chip *gc, unsigned off)
142 186
143 chip = container_of(gc, struct max732x_chip, gpio_chip); 187 chip = container_of(gc, struct max732x_chip, gpio_chip);
144 188
145 ret = max732x_read(chip, is_group_a(chip, off), &reg_val); 189 ret = max732x_readb(chip, is_group_a(chip, off), &reg_val);
146 if (ret < 0) 190 if (ret < 0)
147 return 0; 191 return 0;
148 192
@@ -162,7 +206,7 @@ static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
162 reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0]; 206 reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0];
163 reg_out = (val) ? reg_out | mask : reg_out & ~mask; 207 reg_out = (val) ? reg_out | mask : reg_out & ~mask;
164 208
165 ret = max732x_write(chip, is_group_a(chip, off), reg_out); 209 ret = max732x_writeb(chip, is_group_a(chip, off), reg_out);
166 if (ret < 0) 210 if (ret < 0)
167 goto out; 211 goto out;
168 212
@@ -188,6 +232,13 @@ static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
188 return -EACCES; 232 return -EACCES;
189 } 233 }
190 234
235 /*
236 * Open-drain pins must be set to high impedance (which is
237 * equivalent to output-high) to be turned into an input.
238 */
239 if ((mask & chip->dir_output))
240 max732x_gpio_set_value(gc, off, 1);
241
191 return 0; 242 return 0;
192} 243}
193 244
@@ -209,12 +260,278 @@ static int max732x_gpio_direction_output(struct gpio_chip *gc,
209 return 0; 260 return 0;
210} 261}
211 262
263#ifdef CONFIG_GPIO_MAX732X_IRQ
264static int max732x_writew(struct max732x_chip *chip, uint16_t val)
265{
266 int ret;
267
268 val = cpu_to_le16(val);
269
270 ret = i2c_master_send(chip->client_group_a, (char *)&val, 2);
271 if (ret < 0) {
272 dev_err(&chip->client_group_a->dev, "failed writing\n");
273 return ret;
274 }
275
276 return 0;
277}
278
279static int max732x_readw(struct max732x_chip *chip, uint16_t *val)
280{
281 int ret;
282
283 ret = i2c_master_recv(chip->client_group_a, (char *)val, 2);
284 if (ret < 0) {
285 dev_err(&chip->client_group_a->dev, "failed reading\n");
286 return ret;
287 }
288
289 *val = le16_to_cpu(*val);
290 return 0;
291}
292
293static void max732x_irq_update_mask(struct max732x_chip *chip)
294{
295 uint16_t msg;
296
297 if (chip->irq_mask == chip->irq_mask_cur)
298 return;
299
300 chip->irq_mask = chip->irq_mask_cur;
301
302 if (chip->irq_features == INT_NO_MASK)
303 return;
304
305 mutex_lock(&chip->lock);
306
307 switch (chip->irq_features) {
308 case INT_INDEP_MASK:
309 msg = (chip->irq_mask << 8) | chip->reg_out[0];
310 max732x_writew(chip, msg);
311 break;
312
313 case INT_MERGED_MASK:
314 msg = chip->irq_mask | chip->reg_out[0];
315 max732x_writeb(chip, 1, (uint8_t)msg);
316 break;
317 }
318
319 mutex_unlock(&chip->lock);
320}
321
322static int max732x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
323{
324 struct max732x_chip *chip;
325
326 chip = container_of(gc, struct max732x_chip, gpio_chip);
327 return chip->irq_base + off;
328}
329
330static void max732x_irq_mask(unsigned int irq)
331{
332 struct max732x_chip *chip = get_irq_chip_data(irq);
333
334 chip->irq_mask_cur &= ~(1 << (irq - chip->irq_base));
335}
336
337static void max732x_irq_unmask(unsigned int irq)
338{
339 struct max732x_chip *chip = get_irq_chip_data(irq);
340
341 chip->irq_mask_cur |= 1 << (irq - chip->irq_base);
342}
343
344static void max732x_irq_bus_lock(unsigned int irq)
345{
346 struct max732x_chip *chip = get_irq_chip_data(irq);
347
348 mutex_lock(&chip->irq_lock);
349 chip->irq_mask_cur = chip->irq_mask;
350}
351
352static void max732x_irq_bus_sync_unlock(unsigned int irq)
353{
354 struct max732x_chip *chip = get_irq_chip_data(irq);
355
356 max732x_irq_update_mask(chip);
357 mutex_unlock(&chip->irq_lock);
358}
359
360static int max732x_irq_set_type(unsigned int irq, unsigned int type)
361{
362 struct max732x_chip *chip = get_irq_chip_data(irq);
363 uint16_t off = irq - chip->irq_base;
364 uint16_t mask = 1 << off;
365
366 if (!(mask & chip->dir_input)) {
367 dev_dbg(&chip->client->dev, "%s port %d is output only\n",
368 chip->client->name, off);
369 return -EACCES;
370 }
371
372 if (!(type & IRQ_TYPE_EDGE_BOTH)) {
373 dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
374 irq, type);
375 return -EINVAL;
376 }
377
378 if (type & IRQ_TYPE_EDGE_FALLING)
379 chip->irq_trig_fall |= mask;
380 else
381 chip->irq_trig_fall &= ~mask;
382
383 if (type & IRQ_TYPE_EDGE_RISING)
384 chip->irq_trig_raise |= mask;
385 else
386 chip->irq_trig_raise &= ~mask;
387
388 return max732x_gpio_direction_input(&chip->gpio_chip, off);
389}
390
391static struct irq_chip max732x_irq_chip = {
392 .name = "max732x",
393 .mask = max732x_irq_mask,
394 .unmask = max732x_irq_unmask,
395 .bus_lock = max732x_irq_bus_lock,
396 .bus_sync_unlock = max732x_irq_bus_sync_unlock,
397 .set_type = max732x_irq_set_type,
398};
399
400static uint8_t max732x_irq_pending(struct max732x_chip *chip)
401{
402 uint8_t cur_stat;
403 uint8_t old_stat;
404 uint8_t trigger;
405 uint8_t pending;
406 uint16_t status;
407 int ret;
408
409 ret = max732x_readw(chip, &status);
410 if (ret)
411 return 0;
412
413 trigger = status >> 8;
414 trigger &= chip->irq_mask;
415
416 if (!trigger)
417 return 0;
418
419 cur_stat = status & 0xFF;
420 cur_stat &= chip->irq_mask;
421
422 old_stat = cur_stat ^ trigger;
423
424 pending = (old_stat & chip->irq_trig_fall) |
425 (cur_stat & chip->irq_trig_raise);
426 pending &= trigger;
427
428 return pending;
429}
430
431static irqreturn_t max732x_irq_handler(int irq, void *devid)
432{
433 struct max732x_chip *chip = devid;
434 uint8_t pending;
435 uint8_t level;
436
437 pending = max732x_irq_pending(chip);
438
439 if (!pending)
440 return IRQ_HANDLED;
441
442 do {
443 level = __ffs(pending);
444 handle_nested_irq(level + chip->irq_base);
445
446 pending &= ~(1 << level);
447 } while (pending);
448
449 return IRQ_HANDLED;
450}
451
452static int max732x_irq_setup(struct max732x_chip *chip,
453 const struct i2c_device_id *id)
454{
455 struct i2c_client *client = chip->client;
456 struct max732x_platform_data *pdata = client->dev.platform_data;
457 int has_irq = max732x_features[id->driver_data] >> 32;
458 int ret;
459
460 if (pdata->irq_base && has_irq != INT_NONE) {
461 int lvl;
462
463 chip->irq_base = pdata->irq_base;
464 chip->irq_features = has_irq;
465 mutex_init(&chip->irq_lock);
466
467 for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
468 int irq = lvl + chip->irq_base;
469
470 if (!(chip->dir_input & (1 << lvl)))
471 continue;
472
473 set_irq_chip_data(irq, chip);
474 set_irq_chip_and_handler(irq, &max732x_irq_chip,
475 handle_edge_irq);
476 set_irq_nested_thread(irq, 1);
477#ifdef CONFIG_ARM
478 set_irq_flags(irq, IRQF_VALID);
479#else
480 set_irq_noprobe(irq);
481#endif
482 }
483
484 ret = request_threaded_irq(client->irq,
485 NULL,
486 max732x_irq_handler,
487 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
488 dev_name(&client->dev), chip);
489 if (ret) {
490 dev_err(&client->dev, "failed to request irq %d\n",
491 client->irq);
492 goto out_failed;
493 }
494
495 chip->gpio_chip.to_irq = max732x_gpio_to_irq;
496 }
497
498 return 0;
499
500out_failed:
501 chip->irq_base = 0;
502 return ret;
503}
504
505static void max732x_irq_teardown(struct max732x_chip *chip)
506{
507 if (chip->irq_base)
508 free_irq(chip->client->irq, chip);
509}
510#else /* CONFIG_GPIO_MAX732X_IRQ */
511static int max732x_irq_setup(struct max732x_chip *chip,
512 const struct i2c_device_id *id)
513{
514 struct i2c_client *client = chip->client;
515 struct max732x_platform_data *pdata = client->dev.platform_data;
516 int has_irq = max732x_features[id->driver_data] >> 32;
517
518 if (pdata->irq_base && has_irq != INT_NONE)
519 dev_warn(&client->dev, "interrupt support not compiled in\n");
520
521 return 0;
522}
523
524static void max732x_irq_teardown(struct max732x_chip *chip)
525{
526}
527#endif
528
212static int __devinit max732x_setup_gpio(struct max732x_chip *chip, 529static int __devinit max732x_setup_gpio(struct max732x_chip *chip,
213 const struct i2c_device_id *id, 530 const struct i2c_device_id *id,
214 unsigned gpio_start) 531 unsigned gpio_start)
215{ 532{
216 struct gpio_chip *gc = &chip->gpio_chip; 533 struct gpio_chip *gc = &chip->gpio_chip;
217 uint32_t id_data = id->driver_data; 534 uint32_t id_data = (uint32_t)max732x_features[id->driver_data];
218 int i, port = 0; 535 int i, port = 0;
219 536
220 for (i = 0; i < 16; i++, id_data >>= 2) { 537 for (i = 0; i < 16; i++, id_data >>= 2) {
@@ -285,14 +602,14 @@ static int __devinit max732x_probe(struct i2c_client *client,
285 switch (client->addr & 0x70) { 602 switch (client->addr & 0x70) {
286 case 0x60: 603 case 0x60:
287 chip->client_group_a = client; 604 chip->client_group_a = client;
288 if (nr_port > 7) { 605 if (nr_port > 8) {
289 c = i2c_new_dummy(client->adapter, addr_b); 606 c = i2c_new_dummy(client->adapter, addr_b);
290 chip->client_group_b = chip->client_dummy = c; 607 chip->client_group_b = chip->client_dummy = c;
291 } 608 }
292 break; 609 break;
293 case 0x50: 610 case 0x50:
294 chip->client_group_b = client; 611 chip->client_group_b = client;
295 if (nr_port > 7) { 612 if (nr_port > 8) {
296 c = i2c_new_dummy(client->adapter, addr_a); 613 c = i2c_new_dummy(client->adapter, addr_a);
297 chip->client_group_a = chip->client_dummy = c; 614 chip->client_group_a = chip->client_dummy = c;
298 } 615 }
@@ -306,9 +623,13 @@ static int __devinit max732x_probe(struct i2c_client *client,
306 623
307 mutex_init(&chip->lock); 624 mutex_init(&chip->lock);
308 625
309 max732x_read(chip, is_group_a(chip, 0), &chip->reg_out[0]); 626 max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
310 if (nr_port > 7) 627 if (nr_port > 8)
311 max732x_read(chip, is_group_a(chip, 8), &chip->reg_out[1]); 628 max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
629
630 ret = max732x_irq_setup(chip, id);
631 if (ret)
632 goto out_failed;
312 633
313 ret = gpiochip_add(&chip->gpio_chip); 634 ret = gpiochip_add(&chip->gpio_chip);
314 if (ret) 635 if (ret)
@@ -325,6 +646,7 @@ static int __devinit max732x_probe(struct i2c_client *client,
325 return 0; 646 return 0;
326 647
327out_failed: 648out_failed:
649 max732x_irq_teardown(chip);
328 kfree(chip); 650 kfree(chip);
329 return ret; 651 return ret;
330} 652}
@@ -352,6 +674,8 @@ static int __devexit max732x_remove(struct i2c_client *client)
352 return ret; 674 return ret;
353 } 675 }
354 676
677 max732x_irq_teardown(chip);
678
355 /* unregister any dummy i2c_client */ 679 /* unregister any dummy i2c_client */
356 if (chip->client_dummy) 680 if (chip->client_dummy)
357 i2c_unregister_device(chip->client_dummy); 681 i2c_unregister_device(chip->client_dummy);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index f156ab3bb6ed..a2b12aa1f2b9 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -73,7 +73,7 @@ struct pca953x_chip {
73 struct i2c_client *client; 73 struct i2c_client *client;
74 struct pca953x_platform_data *dyn_pdata; 74 struct pca953x_platform_data *dyn_pdata;
75 struct gpio_chip gpio_chip; 75 struct gpio_chip gpio_chip;
76 char **names; 76 const char *const *names;
77}; 77};
78 78
79static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val) 79static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 105701a1f05b..ee568c8fcbd0 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -164,7 +164,7 @@ static int pl061_irq_type(unsigned irq, unsigned trigger)
164 unsigned long flags; 164 unsigned long flags;
165 u8 gpiois, gpioibe, gpioiev; 165 u8 gpiois, gpioibe, gpioiev;
166 166
167 if (offset < 0 || offset > PL061_GPIO_NR) 167 if (offset < 0 || offset >= PL061_GPIO_NR)
168 return -EINVAL; 168 return -EINVAL;
169 169
170 spin_lock_irqsave(&chip->irq_lock, flags); 170 spin_lock_irqsave(&chip->irq_lock, flags);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f569ae88ab38..c1981861bbbd 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -147,7 +147,10 @@ drm_edid_block_valid(u8 *raw_edid)
147 csum += raw_edid[i]; 147 csum += raw_edid[i];
148 if (csum) { 148 if (csum) {
149 DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); 149 DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
150 goto bad; 150
151 /* allow CEA to slide through, switches mangle this */
152 if (raw_edid[0] != 0x02)
153 goto bad;
151 } 154 }
152 155
153 /* per-block-type checks */ 156 /* per-block-type checks */
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7e663a79829f..266b0ff441af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -241,7 +241,8 @@ nouveau_connector_detect(struct drm_connector *connector)
241 if (nv_encoder && nv_connector->native_mode) { 241 if (nv_encoder && nv_connector->native_mode) {
242 unsigned status = connector_status_connected; 242 unsigned status = connector_status_connected;
243 243
244#ifdef CONFIG_ACPI 244#if defined(CONFIG_ACPI_BUTTON) || \
245 (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
245 if (!nouveau_ignorelid && !acpi_lid_open()) 246 if (!nouveau_ignorelid && !acpi_lid_open())
246 status = connector_status_unknown; 247 status = connector_status_unknown;
247#endif 248#endif
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 0616c96e4b67..704a25d04ac9 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -253,7 +253,11 @@ nv40_graph_init(struct drm_device *dev)
253 253
254 if (!dev_priv->engine.graph.ctxprog) { 254 if (!dev_priv->engine.graph.ctxprog) {
255 struct nouveau_grctx ctx = {}; 255 struct nouveau_grctx ctx = {};
256 uint32_t cp[256]; 256 uint32_t *cp;
257
258 cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
259 if (!cp)
260 return -ENOMEM;
257 261
258 ctx.dev = dev; 262 ctx.dev = dev;
259 ctx.mode = NOUVEAU_GRCTX_PROG; 263 ctx.mode = NOUVEAU_GRCTX_PROG;
@@ -265,6 +269,8 @@ nv40_graph_init(struct drm_device *dev)
265 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); 269 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
266 for (i = 0; i < ctx.ctxprog_len; i++) 270 for (i = 0; i < ctx.ctxprog_len; i++)
267 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); 271 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
272
273 kfree(cp);
268 } 274 }
269 275
270 /* No context present currently */ 276 /* No context present currently */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 03dd6c41dc19..f3f2827017ef 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -707,6 +707,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
707 break; 707 break;
708 case ATOM_DCPLL: 708 case ATOM_DCPLL:
709 case ATOM_PPLL_INVALID: 709 case ATOM_PPLL_INVALID:
710 default:
710 pll = &rdev->clock.dcpll; 711 pll = &rdev->clock.dcpll;
711 break; 712 break;
712 } 713 }
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 66a37fb75839..669feb689bfc 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -576,6 +576,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
576 */ 576 */
577int radeon_agp_init(struct radeon_device *rdev); 577int radeon_agp_init(struct radeon_device *rdev);
578void radeon_agp_resume(struct radeon_device *rdev); 578void radeon_agp_resume(struct radeon_device *rdev);
579void radeon_agp_suspend(struct radeon_device *rdev);
579void radeon_agp_fini(struct radeon_device *rdev); 580void radeon_agp_fini(struct radeon_device *rdev);
580 581
581 582
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 28e473f1f56f..f40dfb77f9b1 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev)
270 } 270 }
271#endif 271#endif
272} 272}
273
274void radeon_agp_suspend(struct radeon_device *rdev)
275{
276 radeon_agp_fini(rdev);
277}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 6e733fdc3349..24ea683f7cf5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -680,10 +680,18 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
680 uint8_t dac; 680 uint8_t dac;
681 union atom_supported_devices *supported_devices; 681 union atom_supported_devices *supported_devices;
682 int i, j, max_device; 682 int i, j, max_device;
683 struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; 683 struct bios_connector *bios_connectors;
684 size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
684 685
685 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) 686 bios_connectors = kzalloc(bc_size, GFP_KERNEL);
687 if (!bios_connectors)
688 return false;
689
690 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev,
691 &data_offset)) {
692 kfree(bios_connectors);
686 return false; 693 return false;
694 }
687 695
688 supported_devices = 696 supported_devices =
689 (union atom_supported_devices *)(ctx->bios + data_offset); 697 (union atom_supported_devices *)(ctx->bios + data_offset);
@@ -851,6 +859,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
851 859
852 radeon_link_encoder_connector(dev); 860 radeon_link_encoder_connector(dev);
853 861
862 kfree(bios_connectors);
854 return true; 863 return true;
855} 864}
856 865
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a20b612ffe75..fdc3fdf78acb 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -754,6 +754,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
754 /* evict remaining vram memory */ 754 /* evict remaining vram memory */
755 radeon_bo_evict_vram(rdev); 755 radeon_bo_evict_vram(rdev);
756 756
757 radeon_agp_suspend(rdev);
758
757 pci_save_state(dev->pdev); 759 pci_save_state(dev->pdev);
758 if (state.event == PM_EVENT_SUSPEND) { 760 if (state.event == PM_EVENT_SUSPEND) {
759 /* Shut down the device */ 761 /* Shut down the device */
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 76ba59b9fea1..132278fa6240 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -347,6 +347,14 @@ config HID_QUANTA
347 ---help--- 347 ---help---
348 Support for Quanta Optical Touch dual-touch panels. 348 Support for Quanta Optical Touch dual-touch panels.
349 349
350config HID_ROCCAT
351 tristate "Roccat special event support"
352 depends on USB_HID
353 ---help---
354 Support for Roccat special events.
355 Say Y here if you have a Roccat mouse or keyboard and want OSD or
356 macro execution support.
357
350config HID_ROCCAT_KONE 358config HID_ROCCAT_KONE
351 tristate "Roccat Kone Mouse support" 359 tristate "Roccat Kone Mouse support"
352 depends on USB_HID 360 depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 22e47eaeea32..987fa0627367 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_HID_QUANTA) += hid-quanta.o
48obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o 48obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
49obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o 49obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
50obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o 50obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
51obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o
51obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o 52obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o
52obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o 53obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
53obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o 54obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e10e314d38cc..aa0f7dcabcd7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1301,6 +1301,7 @@ static const struct hid_device_id hid_blacklist[] = {
1301 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, 1301 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
1302 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, 1302 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
1303 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, 1303 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
1304 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
1304 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, 1305 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
1305 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, 1306 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
1306 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, 1307 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 56f314fbd4f9..c94026768570 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -811,7 +811,7 @@ static const char *relatives[REL_MAX + 1] = {
811 [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc", 811 [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc",
812}; 812};
813 813
814static const char *absolutes[ABS_MAX + 1] = { 814static const char *absolutes[ABS_CNT] = {
815 [ABS_X] = "X", [ABS_Y] = "Y", 815 [ABS_X] = "X", [ABS_Y] = "Y",
816 [ABS_Z] = "Z", [ABS_RX] = "Rx", 816 [ABS_Z] = "Z", [ABS_RX] = "Rx",
817 [ABS_RY] = "Ry", [ABS_RZ] = "Rz", 817 [ABS_RY] = "Ry", [ABS_RZ] = "Rz",
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index 62416e6baeca..3975e039c3dd 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -73,6 +73,7 @@ static int gyration_event(struct hid_device *hdev, struct hid_field *field,
73static const struct hid_device_id gyration_devices[] = { 73static const struct hid_device_id gyration_devices[] = {
74 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, 74 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
75 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, 75 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
76 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
76 { } 77 { }
77}; 78};
78MODULE_DEVICE_TABLE(hid, gyration_devices); 79MODULE_DEVICE_TABLE(hid, gyration_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9776896cc4fc..6af77ed0b555 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -282,6 +282,7 @@
282#define USB_VENDOR_ID_GYRATION 0x0c16 282#define USB_VENDOR_ID_GYRATION 0x0c16
283#define USB_DEVICE_ID_GYRATION_REMOTE 0x0002 283#define USB_DEVICE_ID_GYRATION_REMOTE 0x0002
284#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003 284#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
285#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
285 286
286#define USB_VENDOR_ID_HAPP 0x078b 287#define USB_VENDOR_ID_HAPP 0x078b
287#define USB_DEVICE_ID_UGCI_DRIVING 0x0010 288#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 66e694054ba2..17f2dc04f883 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -37,6 +37,7 @@
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include "hid-ids.h" 39#include "hid-ids.h"
40#include "hid-roccat.h"
40#include "hid-roccat-kone.h" 41#include "hid-roccat-kone.h"
41 42
42static void kone_set_settings_checksum(struct kone_settings *settings) 43static void kone_set_settings_checksum(struct kone_settings *settings)
@@ -263,7 +264,7 @@ static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
263 return 0; 264 return 0;
264} 265}
265 266
266static ssize_t kone_sysfs_read_settings(struct kobject *kobj, 267static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
267 struct bin_attribute *attr, char *buf, 268 struct bin_attribute *attr, char *buf,
268 loff_t off, size_t count) { 269 loff_t off, size_t count) {
269 struct device *dev = container_of(kobj, struct device, kobj); 270 struct device *dev = container_of(kobj, struct device, kobj);
@@ -287,7 +288,7 @@ static ssize_t kone_sysfs_read_settings(struct kobject *kobj,
287 * This function keeps values in kone_device up to date and assumes that in 288 * This function keeps values in kone_device up to date and assumes that in
288 * case of error the old data is still valid 289 * case of error the old data is still valid
289 */ 290 */
290static ssize_t kone_sysfs_write_settings(struct kobject *kobj, 291static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
291 struct bin_attribute *attr, char *buf, 292 struct bin_attribute *attr, char *buf,
292 loff_t off, size_t count) { 293 loff_t off, size_t count) {
293 struct device *dev = container_of(kobj, struct device, kobj); 294 struct device *dev = container_of(kobj, struct device, kobj);
@@ -342,31 +343,31 @@ static ssize_t kone_sysfs_read_profilex(struct kobject *kobj,
342 return count; 343 return count;
343} 344}
344 345
345static ssize_t kone_sysfs_read_profile1(struct kobject *kobj, 346static ssize_t kone_sysfs_read_profile1(struct file *fp, struct kobject *kobj,
346 struct bin_attribute *attr, char *buf, 347 struct bin_attribute *attr, char *buf,
347 loff_t off, size_t count) { 348 loff_t off, size_t count) {
348 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 1); 349 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 1);
349} 350}
350 351
351static ssize_t kone_sysfs_read_profile2(struct kobject *kobj, 352static ssize_t kone_sysfs_read_profile2(struct file *fp, struct kobject *kobj,
352 struct bin_attribute *attr, char *buf, 353 struct bin_attribute *attr, char *buf,
353 loff_t off, size_t count) { 354 loff_t off, size_t count) {
354 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 2); 355 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 2);
355} 356}
356 357
357static ssize_t kone_sysfs_read_profile3(struct kobject *kobj, 358static ssize_t kone_sysfs_read_profile3(struct file *fp, struct kobject *kobj,
358 struct bin_attribute *attr, char *buf, 359 struct bin_attribute *attr, char *buf,
359 loff_t off, size_t count) { 360 loff_t off, size_t count) {
360 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 3); 361 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 3);
361} 362}
362 363
363static ssize_t kone_sysfs_read_profile4(struct kobject *kobj, 364static ssize_t kone_sysfs_read_profile4(struct file *fp, struct kobject *kobj,
364 struct bin_attribute *attr, char *buf, 365 struct bin_attribute *attr, char *buf,
365 loff_t off, size_t count) { 366 loff_t off, size_t count) {
366 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 4); 367 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 4);
367} 368}
368 369
369static ssize_t kone_sysfs_read_profile5(struct kobject *kobj, 370static ssize_t kone_sysfs_read_profile5(struct file *fp, struct kobject *kobj,
370 struct bin_attribute *attr, char *buf, 371 struct bin_attribute *attr, char *buf,
371 loff_t off, size_t count) { 372 loff_t off, size_t count) {
372 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 5); 373 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 5);
@@ -404,31 +405,31 @@ static ssize_t kone_sysfs_write_profilex(struct kobject *kobj,
404 return sizeof(struct kone_profile); 405 return sizeof(struct kone_profile);
405} 406}
406 407
407static ssize_t kone_sysfs_write_profile1(struct kobject *kobj, 408static ssize_t kone_sysfs_write_profile1(struct file *fp, struct kobject *kobj,
408 struct bin_attribute *attr, char *buf, 409 struct bin_attribute *attr, char *buf,
409 loff_t off, size_t count) { 410 loff_t off, size_t count) {
410 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 1); 411 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 1);
411} 412}
412 413
413static ssize_t kone_sysfs_write_profile2(struct kobject *kobj, 414static ssize_t kone_sysfs_write_profile2(struct file *fp, struct kobject *kobj,
414 struct bin_attribute *attr, char *buf, 415 struct bin_attribute *attr, char *buf,
415 loff_t off, size_t count) { 416 loff_t off, size_t count) {
416 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 2); 417 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 2);
417} 418}
418 419
419static ssize_t kone_sysfs_write_profile3(struct kobject *kobj, 420static ssize_t kone_sysfs_write_profile3(struct file *fp, struct kobject *kobj,
420 struct bin_attribute *attr, char *buf, 421 struct bin_attribute *attr, char *buf,
421 loff_t off, size_t count) { 422 loff_t off, size_t count) {
422 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 3); 423 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 3);
423} 424}
424 425
425static ssize_t kone_sysfs_write_profile4(struct kobject *kobj, 426static ssize_t kone_sysfs_write_profile4(struct file *fp, struct kobject *kobj,
426 struct bin_attribute *attr, char *buf, 427 struct bin_attribute *attr, char *buf,
427 loff_t off, size_t count) { 428 loff_t off, size_t count) {
428 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 4); 429 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 4);
429} 430}
430 431
431static ssize_t kone_sysfs_write_profile5(struct kobject *kobj, 432static ssize_t kone_sysfs_write_profile5(struct file *fp, struct kobject *kobj,
432 struct bin_attribute *attr, char *buf, 433 struct bin_attribute *attr, char *buf,
433 loff_t off, size_t count) { 434 loff_t off, size_t count) {
434 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 5); 435 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 5);
@@ -849,6 +850,16 @@ static int kone_init_specials(struct hid_device *hdev)
849 "couldn't init struct kone_device\n"); 850 "couldn't init struct kone_device\n");
850 goto exit_free; 851 goto exit_free;
851 } 852 }
853
854 retval = roccat_connect(hdev);
855 if (retval < 0) {
856 dev_err(&hdev->dev, "couldn't init char dev\n");
857 /* be tolerant about not getting chrdev */
858 } else {
859 kone->roccat_claimed = 1;
860 kone->chrdev_minor = retval;
861 }
862
852 retval = kone_create_sysfs_attributes(intf); 863 retval = kone_create_sysfs_attributes(intf);
853 if (retval) { 864 if (retval) {
854 dev_err(&hdev->dev, "cannot create sysfs files\n"); 865 dev_err(&hdev->dev, "cannot create sysfs files\n");
@@ -868,10 +879,14 @@ exit_free:
868static void kone_remove_specials(struct hid_device *hdev) 879static void kone_remove_specials(struct hid_device *hdev)
869{ 880{
870 struct usb_interface *intf = to_usb_interface(hdev->dev.parent); 881 struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
882 struct kone_device *kone;
871 883
872 if (intf->cur_altsetting->desc.bInterfaceProtocol 884 if (intf->cur_altsetting->desc.bInterfaceProtocol
873 == USB_INTERFACE_PROTOCOL_MOUSE) { 885 == USB_INTERFACE_PROTOCOL_MOUSE) {
874 kone_remove_sysfs_attributes(intf); 886 kone_remove_sysfs_attributes(intf);
887 kone = hid_get_drvdata(hdev);
888 if (kone->roccat_claimed)
889 roccat_disconnect(kone->chrdev_minor);
875 kfree(hid_get_drvdata(hdev)); 890 kfree(hid_get_drvdata(hdev));
876 } 891 }
877} 892}
@@ -930,6 +945,37 @@ static void kone_keep_values_up_to_date(struct kone_device *kone,
930 } 945 }
931} 946}
932 947
948static void kone_report_to_chrdev(struct kone_device const *kone,
949 struct kone_mouse_event const *event)
950{
951 struct kone_roccat_report roccat_report;
952
953 switch (event->event) {
954 case kone_mouse_event_switch_profile:
955 case kone_mouse_event_switch_dpi:
956 case kone_mouse_event_osd_profile:
957 case kone_mouse_event_osd_dpi:
958 roccat_report.event = event->event;
959 roccat_report.value = event->value;
960 roccat_report.key = 0;
961 roccat_report_event(kone->chrdev_minor,
962 (uint8_t *)&roccat_report,
963 sizeof(struct kone_roccat_report));
964 break;
965 case kone_mouse_event_call_overlong_macro:
966 if (event->value == kone_keystroke_action_press) {
967 roccat_report.event = kone_mouse_event_call_overlong_macro;
968 roccat_report.value = kone->actual_profile;
969 roccat_report.key = event->macro_key;
970 roccat_report_event(kone->chrdev_minor,
971 (uint8_t *)&roccat_report,
972 sizeof(struct kone_roccat_report));
973 }
974 break;
975 }
976
977}
978
933/* 979/*
934 * Is called for keyboard- and mousepart. 980 * Is called for keyboard- and mousepart.
935 * Only mousepart gets informations about special events in its extended event 981 * Only mousepart gets informations about special events in its extended event
@@ -958,6 +1004,9 @@ static int kone_raw_event(struct hid_device *hdev, struct hid_report *report,
958 1004
959 kone_keep_values_up_to_date(kone, event); 1005 kone_keep_values_up_to_date(kone, event);
960 1006
1007 if (kone->roccat_claimed)
1008 kone_report_to_chrdev(kone, event);
1009
961 return 0; /* always do further processing */ 1010 return 0; /* always do further processing */
962} 1011}
963 1012
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
index b413b10a7f8a..003e6f81c195 100644
--- a/drivers/hid/hid-roccat-kone.h
+++ b/drivers/hid/hid-roccat-kone.h
@@ -189,6 +189,12 @@ enum kone_commands {
189 kone_command_firmware = 0xe5a 189 kone_command_firmware = 0xe5a
190}; 190};
191 191
192struct kone_roccat_report {
193 uint8_t event;
194 uint8_t value; /* holds dpi or profile value */
195 uint8_t key; /* macro key on overlong macro execution */
196};
197
192#pragma pack(pop) 198#pragma pack(pop)
193 199
194struct kone_device { 200struct kone_device {
@@ -219,6 +225,9 @@ struct kone_device {
219 * so it's read only once 225 * so it's read only once
220 */ 226 */
221 int firmware_version; 227 int firmware_version;
228
229 int roccat_claimed;
230 int chrdev_minor;
222}; 231};
223 232
224#endif 233#endif
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
new file mode 100644
index 000000000000..e05d48edb66f
--- /dev/null
+++ b/drivers/hid/hid-roccat.c
@@ -0,0 +1,428 @@
1/*
2 * Roccat driver for Linux
3 *
4 * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
5 */
6
7/*
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 */
13
14/*
15 * Module roccat is a char device used to report special events of roccat
16 * hardware to userland. These events include requests for on-screen-display of
17 * profile or dpi settings or requests for execution of macro sequences that are
18 * not stored in device. The information in these events depends on hid device
19 * implementation and contains data that is not available in a single hid event
20 * or else hidraw could have been used.
21 * It is inspired by hidraw, but uses only one circular buffer for all readers.
22 */
23
24#include <linux/cdev.h>
25#include <linux/poll.h>
26#include <linux/sched.h>
27
28#include "hid-roccat.h"
29
30#define ROCCAT_FIRST_MINOR 0
31#define ROCCAT_MAX_DEVICES 8
32
33/* should be a power of 2 for performance reason */
34#define ROCCAT_CBUF_SIZE 16
35
36struct roccat_report {
37 uint8_t *value;
38 int len;
39};
40
41struct roccat_device {
42 unsigned int minor;
43 int open;
44 int exist;
45 wait_queue_head_t wait;
46 struct device *dev;
47 struct hid_device *hid;
48 struct list_head readers;
49 /* protects modifications of readers list */
50 struct mutex readers_lock;
51
52 /*
53 * circular_buffer has one writer and multiple readers with their own
54 * read pointers
55 */
56 struct roccat_report cbuf[ROCCAT_CBUF_SIZE];
57 int cbuf_end;
58 struct mutex cbuf_lock;
59};
60
61struct roccat_reader {
62 struct list_head node;
63 struct roccat_device *device;
64 int cbuf_start;
65};
66
67static int roccat_major;
68static struct class *roccat_class;
69static struct cdev roccat_cdev;
70
71static struct roccat_device *devices[ROCCAT_MAX_DEVICES];
72/* protects modifications of devices array */
73static DEFINE_MUTEX(devices_lock);
74
75static ssize_t roccat_read(struct file *file, char __user *buffer,
76 size_t count, loff_t *ppos)
77{
78 struct roccat_reader *reader = file->private_data;
79 struct roccat_device *device = reader->device;
80 struct roccat_report *report;
81 ssize_t retval = 0, len;
82 DECLARE_WAITQUEUE(wait, current);
83
84 mutex_lock(&device->cbuf_lock);
85
86 /* no data? */
87 if (reader->cbuf_start == device->cbuf_end) {
88 add_wait_queue(&device->wait, &wait);
89 set_current_state(TASK_INTERRUPTIBLE);
90
91 /* wait for data */
92 while (reader->cbuf_start == device->cbuf_end) {
93 if (file->f_flags & O_NONBLOCK) {
94 retval = -EAGAIN;
95 break;
96 }
97 if (signal_pending(current)) {
98 retval = -ERESTARTSYS;
99 break;
100 }
101 if (!device->exist) {
102 retval = -EIO;
103 break;
104 }
105
106 mutex_unlock(&device->cbuf_lock);
107 schedule();
108 mutex_lock(&device->cbuf_lock);
109 set_current_state(TASK_INTERRUPTIBLE);
110 }
111
112 set_current_state(TASK_RUNNING);
113 remove_wait_queue(&device->wait, &wait);
114 }
115
116 /* here we either have data or a reason to return if retval is set */
117 if (retval)
118 goto exit_unlock;
119
120 report = &device->cbuf[reader->cbuf_start];
121 /*
122 * If report is larger than requested amount of data, rest of report
123 * is lost!
124 */
125 len = report->len > count ? count : report->len;
126
127 if (copy_to_user(buffer, report->value, len)) {
128 retval = -EFAULT;
129 goto exit_unlock;
130 }
131 retval += len;
132 reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE;
133
134exit_unlock:
135 mutex_unlock(&device->cbuf_lock);
136 return retval;
137}
138
139static unsigned int roccat_poll(struct file *file, poll_table *wait)
140{
141 struct roccat_reader *reader = file->private_data;
142 poll_wait(file, &reader->device->wait, wait);
143 if (reader->cbuf_start != reader->device->cbuf_end)
144 return POLLIN | POLLRDNORM;
145 if (!reader->device->exist)
146 return POLLERR | POLLHUP;
147 return 0;
148}
149
150static int roccat_open(struct inode *inode, struct file *file)
151{
152 unsigned int minor = iminor(inode);
153 struct roccat_reader *reader;
154 struct roccat_device *device;
155 int error = 0;
156
157 reader = kzalloc(sizeof(struct roccat_reader), GFP_KERNEL);
158 if (!reader)
159 return -ENOMEM;
160
161 mutex_lock(&devices_lock);
162
163 device = devices[minor];
164
165 mutex_lock(&device->readers_lock);
166
167 if (!device) {
168 printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
169 minor);
170 error = -ENODEV;
171 goto exit_unlock;
172 }
173
174 if (!device->open++) {
175 /* power on device on adding first reader */
176 if (device->hid->ll_driver->power) {
177 error = device->hid->ll_driver->power(device->hid,
178 PM_HINT_FULLON);
179 if (error < 0) {
180 --device->open;
181 goto exit_unlock;
182 }
183 }
184 error = device->hid->ll_driver->open(device->hid);
185 if (error < 0) {
186 if (device->hid->ll_driver->power)
187 device->hid->ll_driver->power(device->hid,
188 PM_HINT_NORMAL);
189 --device->open;
190 goto exit_unlock;
191 }
192 }
193
194 reader->device = device;
195 /* new reader doesn't get old events */
196 reader->cbuf_start = device->cbuf_end;
197
198 list_add_tail(&reader->node, &device->readers);
199 file->private_data = reader;
200
201exit_unlock:
202 mutex_unlock(&device->readers_lock);
203 mutex_unlock(&devices_lock);
204 return error;
205}
206
207static int roccat_release(struct inode *inode, struct file *file)
208{
209 unsigned int minor = iminor(inode);
210 struct roccat_reader *reader = file->private_data;
211 struct roccat_device *device;
212
213 mutex_lock(&devices_lock);
214
215 device = devices[minor];
216 if (!device) {
217 mutex_unlock(&devices_lock);
218 printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
219 minor);
220 return -ENODEV;
221 }
222
223 mutex_lock(&device->readers_lock);
224 list_del(&reader->node);
225 mutex_unlock(&device->readers_lock);
226 kfree(reader);
227
228 if (!--device->open) {
229 /* removing last reader */
230 if (device->exist) {
231 if (device->hid->ll_driver->power)
232 device->hid->ll_driver->power(device->hid,
233 PM_HINT_NORMAL);
234 device->hid->ll_driver->close(device->hid);
235 } else {
236 kfree(device);
237 }
238 }
239
240 mutex_unlock(&devices_lock);
241
242 return 0;
243}
244
245/*
246 * roccat_report_event() - output data to readers
247 * @minor: minor device number returned by roccat_connect()
248 * @data: pointer to data
249 * @len: size of data
250 *
251 * Return value is zero on success, a negative error code on failure.
252 *
253 * This is called from interrupt handler.
254 */
255int roccat_report_event(int minor, u8 const *data, int len)
256{
257 struct roccat_device *device;
258 struct roccat_reader *reader;
259 struct roccat_report *report;
260 uint8_t *new_value;
261
262 new_value = kmemdup(data, len, GFP_ATOMIC);
263 if (!new_value)
264 return -ENOMEM;
265
266 device = devices[minor];
267
268 report = &device->cbuf[device->cbuf_end];
269
270 /* passing NULL is safe */
271 kfree(report->value);
272
273 report->value = new_value;
274 report->len = len;
275 device->cbuf_end = (device->cbuf_end + 1) % ROCCAT_CBUF_SIZE;
276
277 list_for_each_entry(reader, &device->readers, node) {
278 /*
279 * As we already inserted one element, the buffer can't be
280 * empty. If start and end are equal, buffer is full and we
281 * increase start, so that slow reader misses one event, but
282 * gets the newer ones in the right order.
283 */
284 if (reader->cbuf_start == device->cbuf_end)
285 reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE;
286 }
287
288 wake_up_interruptible(&device->wait);
289 return 0;
290}
291EXPORT_SYMBOL_GPL(roccat_report_event);
292
293/*
294 * roccat_connect() - create a char device for special event output
295 * @hid: the hid device the char device should be connected to.
296 *
297 * Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on
298 * success, a negative error code on failure.
299 */
300int roccat_connect(struct hid_device *hid)
301{
302 unsigned int minor;
303 struct roccat_device *device;
304 int temp;
305
306 device = kzalloc(sizeof(struct roccat_device), GFP_KERNEL);
307 if (!device)
308 return -ENOMEM;
309
310 mutex_lock(&devices_lock);
311
312 for (minor = 0; minor < ROCCAT_MAX_DEVICES; ++minor) {
313 if (devices[minor])
314 continue;
315 break;
316 }
317
318 if (minor < ROCCAT_MAX_DEVICES) {
319 devices[minor] = device;
320 } else {
321 mutex_unlock(&devices_lock);
322 kfree(device);
323 return -EINVAL;
324 }
325
326 device->dev = device_create(roccat_class, &hid->dev,
327 MKDEV(roccat_major, minor), NULL,
328 "%s%s%d", "roccat", hid->driver->name, minor);
329
330 if (IS_ERR(device->dev)) {
331 devices[minor] = NULL;
332 mutex_unlock(&devices_lock);
333 temp = PTR_ERR(device->dev);
334 kfree(device);
335 return temp;
336 }
337
338 mutex_unlock(&devices_lock);
339
340 init_waitqueue_head(&device->wait);
341 INIT_LIST_HEAD(&device->readers);
342 mutex_init(&device->readers_lock);
343 mutex_init(&device->cbuf_lock);
344 device->minor = minor;
345 device->hid = hid;
346 device->exist = 1;
347 device->cbuf_end = 0;
348
349 return minor;
350}
351EXPORT_SYMBOL_GPL(roccat_connect);
352
353/* roccat_disconnect() - remove char device from hid device
354 * @minor: the minor device number returned by roccat_connect()
355 */
356void roccat_disconnect(int minor)
357{
358 struct roccat_device *device;
359
360 mutex_lock(&devices_lock);
361 device = devices[minor];
362 devices[minor] = NULL;
363 mutex_unlock(&devices_lock);
364
365 device->exist = 0; /* TODO exist maybe not needed */
366
367 device_destroy(roccat_class, MKDEV(roccat_major, minor));
368
369 if (device->open) {
370 device->hid->ll_driver->close(device->hid);
371 wake_up_interruptible(&device->wait);
372 } else {
373 kfree(device);
374 }
375}
376EXPORT_SYMBOL_GPL(roccat_disconnect);
377
378static const struct file_operations roccat_ops = {
379 .owner = THIS_MODULE,
380 .read = roccat_read,
381 .poll = roccat_poll,
382 .open = roccat_open,
383 .release = roccat_release,
384};
385
386static int __init roccat_init(void)
387{
388 int retval;
389 dev_t dev_id;
390
391 retval = alloc_chrdev_region(&dev_id, ROCCAT_FIRST_MINOR,
392 ROCCAT_MAX_DEVICES, "roccat");
393
394 roccat_major = MAJOR(dev_id);
395
396 if (retval < 0) {
397 printk(KERN_WARNING "roccat: can't get major number\n");
398 return retval;
399 }
400
401 roccat_class = class_create(THIS_MODULE, "roccat");
402 if (IS_ERR(roccat_class)) {
403 retval = PTR_ERR(roccat_class);
404 unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
405 return retval;
406 }
407
408 cdev_init(&roccat_cdev, &roccat_ops);
409 cdev_add(&roccat_cdev, dev_id, ROCCAT_MAX_DEVICES);
410
411 return 0;
412}
413
414static void __exit roccat_exit(void)
415{
416 dev_t dev_id = MKDEV(roccat_major, 0);
417
418 cdev_del(&roccat_cdev);
419 class_destroy(roccat_class);
420 unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
421}
422
423module_init(roccat_init);
424module_exit(roccat_exit);
425
426MODULE_AUTHOR("Stefan Achatz");
427MODULE_DESCRIPTION("USB Roccat char device");
428MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat.h b/drivers/hid/hid-roccat.h
new file mode 100644
index 000000000000..d8aae0c1fa7e
--- /dev/null
+++ b/drivers/hid/hid-roccat.h
@@ -0,0 +1,31 @@
1#ifndef __HID_ROCCAT_H
2#define __HID_ROCCAT_H
3
4/*
5 * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
6 */
7
8/*
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 */
14
15#include <linux/hid.h>
16#include <linux/types.h>
17
18#if defined(CONFIG_HID_ROCCAT) || defined (CONFIG_HID_ROCCAT_MODULE)
19int roccat_connect(struct hid_device *hid);
20void roccat_disconnect(int minor);
21int roccat_report_event(int minor, u8 const *data, int len);
22#else
23static inline int roccat_connect(struct hid_device *hid) { return -1; }
24static inline void roccat_disconnect(int minor) {}
25static inline int roccat_report_event(int minor, u8 const *data, int len)
26{
27 return 0;
28}
29#endif
30
31#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 07cae552cafb..e571e60ecb88 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -847,7 +847,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
847 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); 847 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
848 if (!create_comp_task(pool, cpu)) { 848 if (!create_comp_task(pool, cpu)) {
849 ehca_gen_err("Can't create comp_task for cpu: %x", cpu); 849 ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
850 return NOTIFY_BAD; 850 return notifier_from_errno(-ENOMEM);
851 } 851 }
852 break; 852 break;
853 case CPU_UP_CANCELED: 853 case CPU_UP_CANCELED:
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 423e0e6031ab..34157bb97ed6 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -47,15 +47,15 @@ struct joydev {
47 struct mutex mutex; 47 struct mutex mutex;
48 struct device dev; 48 struct device dev;
49 49
50 struct js_corr corr[ABS_MAX + 1]; 50 struct js_corr corr[ABS_CNT];
51 struct JS_DATA_SAVE_TYPE glue; 51 struct JS_DATA_SAVE_TYPE glue;
52 int nabs; 52 int nabs;
53 int nkey; 53 int nkey;
54 __u16 keymap[KEY_MAX - BTN_MISC + 1]; 54 __u16 keymap[KEY_MAX - BTN_MISC + 1];
55 __u16 keypam[KEY_MAX - BTN_MISC + 1]; 55 __u16 keypam[KEY_MAX - BTN_MISC + 1];
56 __u8 absmap[ABS_MAX + 1]; 56 __u8 absmap[ABS_CNT];
57 __u8 abspam[ABS_MAX + 1]; 57 __u8 abspam[ABS_CNT];
58 __s16 abs[ABS_MAX + 1]; 58 __s16 abs[ABS_CNT];
59}; 59};
60 60
61struct joydev_client { 61struct joydev_client {
@@ -826,7 +826,7 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
826 joydev->handle.handler = handler; 826 joydev->handle.handler = handler;
827 joydev->handle.private = joydev; 827 joydev->handle.private = joydev;
828 828
829 for (i = 0; i < ABS_MAX + 1; i++) 829 for (i = 0; i < ABS_CNT; i++)
830 if (test_bit(i, dev->absbit)) { 830 if (test_bit(i, dev->absbit)) {
831 joydev->absmap[i] = joydev->nabs; 831 joydev->absmap[i] = joydev->nabs;
832 joydev->abspam[joydev->nabs] = i; 832 joydev->abspam[joydev->nabs] = i;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 48cdabec372a..c44b9eafc556 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -80,6 +80,16 @@ config INPUT_M68K_BEEP
80 tristate "M68k Beeper support" 80 tristate "M68k Beeper support"
81 depends on M68K 81 depends on M68K
82 82
83config INPUT_MAX8925_ONKEY
84 tristate "MAX8925 ONKEY support"
85 depends on MFD_MAX8925
86 help
87 Support the ONKEY of MAX8925 PMICs as an input device
88 reporting power button status.
89
90 To compile this driver as a module, choose M here: the module
91 will be called max8925_onkey.
92
83config INPUT_APANEL 93config INPUT_APANEL
84 tristate "Fujitsu Lifebook Application Panel buttons" 94 tristate "Fujitsu Lifebook Application Panel buttons"
85 depends on X86 && I2C && LEDS_CLASS 95 depends on X86 && I2C && LEDS_CLASS
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index f9f577031e06..71fe57d8023f 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
20obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o 20obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
21obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o 21obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
22obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o 22obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
23obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
23obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o 24obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
24obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o 25obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
25obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o 26obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
new file mode 100644
index 000000000000..80af44608018
--- /dev/null
+++ b/drivers/input/misc/max8925_onkey.c
@@ -0,0 +1,148 @@
1/**
2 * max8925_onkey.c - MAX8925 ONKEY driver
3 *
4 * Copyright (C) 2009 Marvell International Ltd.
5 * Haojian Zhuang <haojian.zhuang@marvell.com>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file "COPYING" in the main directory of this
9 * archive for more details.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/i2c.h>
25#include <linux/input.h>
26#include <linux/interrupt.h>
27#include <linux/mfd/max8925.h>
28#include <linux/slab.h>
29
30#define HARDRESET_EN (1 << 7)
31#define PWREN_EN (1 << 7)
32
33struct max8925_onkey_info {
34 struct input_dev *idev;
35 struct i2c_client *i2c;
36 int irq;
37};
38
39/*
40 * MAX8925 gives us an interrupt when ONKEY is held for 3 seconds.
41 * max8925_set_bits() operates I2C bus and may sleep. So implement
42 * it in thread IRQ handler.
43 */
44static irqreturn_t max8925_onkey_handler(int irq, void *data)
45{
46 struct max8925_onkey_info *info = data;
47
48 input_report_key(info->idev, KEY_POWER, 1);
49 input_sync(info->idev);
50
51 /* Enable hardreset to halt if system isn't shutdown on time */
52 max8925_set_bits(info->i2c, MAX8925_SYSENSEL,
53 HARDRESET_EN, HARDRESET_EN);
54
55 return IRQ_HANDLED;
56}
57
58static int __devinit max8925_onkey_probe(struct platform_device *pdev)
59{
60 struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
61 struct max8925_onkey_info *info;
62 int error;
63
64 info = kzalloc(sizeof(struct max8925_onkey_info), GFP_KERNEL);
65 if (!info)
66 return -ENOMEM;
67
68 info->i2c = chip->i2c;
69 info->irq = chip->irq_base + MAX8925_IRQ_GPM_SW_3SEC;
70
71 info->idev = input_allocate_device();
72 if (!info->idev) {
73 dev_err(chip->dev, "Failed to allocate input dev\n");
74 error = -ENOMEM;
75 goto out_input;
76 }
77
78 info->idev->name = "max8925_on";
79 info->idev->phys = "max8925_on/input0";
80 info->idev->id.bustype = BUS_I2C;
81 info->idev->dev.parent = &pdev->dev;
82 info->idev->evbit[0] = BIT_MASK(EV_KEY);
83 info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
84
85 error = request_threaded_irq(info->irq, NULL, max8925_onkey_handler,
86 IRQF_ONESHOT, "onkey", info);
87 if (error < 0) {
88 dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
89 info->irq, error);
90 goto out_irq;
91 }
92
93 error = input_register_device(info->idev);
94 if (error) {
95 dev_err(chip->dev, "Can't register input device: %d\n", error);
96 goto out;
97 }
98
99 platform_set_drvdata(pdev, info);
100
101 return 0;
102
103out:
104 free_irq(info->irq, info);
105out_irq:
106 input_free_device(info->idev);
107out_input:
108 kfree(info);
109 return error;
110}
111
112static int __devexit max8925_onkey_remove(struct platform_device *pdev)
113{
114 struct max8925_onkey_info *info = platform_get_drvdata(pdev);
115
116 free_irq(info->irq, info);
117 input_unregister_device(info->idev);
118 kfree(info);
119
120 platform_set_drvdata(pdev, NULL);
121
122 return 0;
123}
124
125static struct platform_driver max8925_onkey_driver = {
126 .driver = {
127 .name = "max8925-onkey",
128 .owner = THIS_MODULE,
129 },
130 .probe = max8925_onkey_probe,
131 .remove = __devexit_p(max8925_onkey_remove),
132};
133
134static int __init max8925_onkey_init(void)
135{
136 return platform_driver_register(&max8925_onkey_driver);
137}
138module_init(max8925_onkey_init);
139
140static void __exit max8925_onkey_exit(void)
141{
142 platform_driver_unregister(&max8925_onkey_driver);
143}
144module_exit(max8925_onkey_exit);
145
146MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver");
147MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
148MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index fee9eac8e04a..4f9b2afc24e8 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -90,8 +90,8 @@ static void vibra_disable(struct vibra_info *info)
90 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, 90 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
91 (reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL); 91 (reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
92 92
93 twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
94 twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL); 93 twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL);
94 twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
95 95
96 info->enabled = false; 96 info->enabled = false;
97} 97}
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 1477466076ad..b71eb55f2dbc 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -300,7 +300,7 @@ static int uinput_validate_absbits(struct input_dev *dev)
300 unsigned int cnt; 300 unsigned int cnt;
301 int retval = 0; 301 int retval = 0;
302 302
303 for (cnt = 0; cnt < ABS_MAX + 1; cnt++) { 303 for (cnt = 0; cnt < ABS_CNT; cnt++) {
304 if (!test_bit(cnt, dev->absbit)) 304 if (!test_bit(cnt, dev->absbit))
305 continue; 305 continue;
306 306
@@ -387,7 +387,7 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
387 dev->id.product = user_dev->id.product; 387 dev->id.product = user_dev->id.product;
388 dev->id.version = user_dev->id.version; 388 dev->id.version = user_dev->id.version;
389 389
390 size = sizeof(int) * (ABS_MAX + 1); 390 size = sizeof(int) * ABS_CNT;
391 memcpy(dev->absmax, user_dev->absmax, size); 391 memcpy(dev->absmax, user_dev->absmax, size);
392 memcpy(dev->absmin, user_dev->absmin, size); 392 memcpy(dev->absmin, user_dev->absmin, size);
393 memcpy(dev->absfuzz, user_dev->absfuzz, size); 393 memcpy(dev->absfuzz, user_dev->absfuzz, size);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 532279cda0e4..634f6f6b9b13 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1163,8 +1163,8 @@ static int __devinit ads7846_probe(struct spi_device *spi)
1163 1163
1164 ts->reg = regulator_get(&spi->dev, "vcc"); 1164 ts->reg = regulator_get(&spi->dev, "vcc");
1165 if (IS_ERR(ts->reg)) { 1165 if (IS_ERR(ts->reg)) {
1166 dev_err(&spi->dev, "unable to get regulator: %ld\n", 1166 err = PTR_ERR(ts->reg);
1167 PTR_ERR(ts->reg)); 1167 dev_err(&spi->dev, "unable to get regulator: %ld\n", err);
1168 goto err_free_gpio; 1168 goto err_free_gpio;
1169 } 1169 }
1170 1170
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index e0b7c834111d..ac5d0f9b0cb1 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -413,6 +413,8 @@ static struct dev_pm_ops s3c_ts_pmops = {
413#endif 413#endif
414 414
415static struct platform_device_id s3cts_driver_ids[] = { 415static struct platform_device_id s3cts_driver_ids[] = {
416 { "s3c2410-ts", 0 },
417 { "s3c2440-ts", 0 },
416 { "s3c64xx-ts", FEAT_PEN_IRQ }, 418 { "s3c64xx-ts", FEAT_PEN_IRQ },
417 { } 419 { }
418}; 420};
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 29a8bbf3f086..567d57215c28 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -857,6 +857,11 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
857 if ((pkt[0] & 0xe0) != 0xe0) 857 if ((pkt[0] & 0xe0) != 0xe0)
858 return 0; 858 return 0;
859 859
860 if (be16_to_cpu(packet->data_len) > 0xff)
861 packet->data_len = cpu_to_be16(be16_to_cpu(packet->data_len) - 0x100);
862 if (be16_to_cpu(packet->x_len) > 0xff)
863 packet->x_len = cpu_to_be16(be16_to_cpu(packet->x_len) - 0x80);
864
860 /* send ACK */ 865 /* send ACK */
861 ret = usb_submit_urb(priv->ack, GFP_ATOMIC); 866 ret = usb_submit_urb(priv->ack, GFP_ATOMIC);
862 867
@@ -1112,7 +1117,7 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
1112 1117
1113#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO 1118#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO
1114 [DEVTYPE_NEXIO] = { 1119 [DEVTYPE_NEXIO] = {
1115 .rept_size = 128, 1120 .rept_size = 1024,
1116 .irq_always = true, 1121 .irq_always = true,
1117 .read_data = nexio_read_data, 1122 .read_data = nexio_read_data,
1118 .init = nexio_init, 1123 .init = nexio_init,
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index c3243c913ec0..81048b8ed8ad 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -98,8 +98,6 @@ mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off)
98 if (*debug & DEBUG_TIMER) 98 if (*debug & DEBUG_TIMER)
99 printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__, 99 printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__,
100 filep, buf, (int)count, off); 100 filep, buf, (int)count, off);
101 if (*off != filep->f_pos)
102 return -ESPIPE;
103 101
104 if (list_empty(&dev->expired) && (dev->work == 0)) { 102 if (list_empty(&dev->expired) && (dev->work == 0)) {
105 if (filep->f_flags & O_NONBLOCK) 103 if (filep->f_flags & O_NONBLOCK)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9ea17d6c799b..d2c0f94fa37d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4645,7 +4645,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4645 kfree(percpu->scribble); 4645 kfree(percpu->scribble);
4646 pr_err("%s: failed memory allocation for cpu%ld\n", 4646 pr_err("%s: failed memory allocation for cpu%ld\n",
4647 __func__, cpu); 4647 __func__, cpu);
4648 return NOTIFY_BAD; 4648 return notifier_from_errno(-ENOMEM);
4649 } 4649 }
4650 break; 4650 break;
4651 case CPU_DEAD: 4651 case CPU_DEAD:
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index d33693c13368..c4b117f5fb70 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -186,14 +186,9 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type)
186 if (!dev) 186 if (!dev)
187 return -ENXIO; 187 return -ENXIO;
188 188
189 ops = kmalloc(kcmd.oplen, GFP_KERNEL); 189 ops = memdup_user(kcmd.opbuf, kcmd.oplen);
190 if (!ops) 190 if (IS_ERR(ops))
191 return -ENOMEM; 191 return PTR_ERR(ops);
192
193 if (copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) {
194 kfree(ops);
195 return -EFAULT;
196 }
197 192
198 /* 193 /*
199 * It's possible to have a _very_ large table 194 * It's possible to have a _very_ large table
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 31a991161f0a..5bfb2a2041b8 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -75,6 +75,9 @@ enum ctype {
75 UNALIGNED_LOAD_STORE_WRITE, 75 UNALIGNED_LOAD_STORE_WRITE,
76 OVERWRITE_ALLOCATION, 76 OVERWRITE_ALLOCATION,
77 WRITE_AFTER_FREE, 77 WRITE_AFTER_FREE,
78 SOFTLOCKUP,
79 HARDLOCKUP,
80 HUNG_TASK,
78}; 81};
79 82
80static char* cp_name[] = { 83static char* cp_name[] = {
@@ -99,6 +102,9 @@ static char* cp_type[] = {
99 "UNALIGNED_LOAD_STORE_WRITE", 102 "UNALIGNED_LOAD_STORE_WRITE",
100 "OVERWRITE_ALLOCATION", 103 "OVERWRITE_ALLOCATION",
101 "WRITE_AFTER_FREE", 104 "WRITE_AFTER_FREE",
105 "SOFTLOCKUP",
106 "HARDLOCKUP",
107 "HUNG_TASK",
102}; 108};
103 109
104static struct jprobe lkdtm; 110static struct jprobe lkdtm;
@@ -320,6 +326,20 @@ static void lkdtm_do_action(enum ctype which)
320 memset(data, 0x78, len); 326 memset(data, 0x78, len);
321 break; 327 break;
322 } 328 }
329 case SOFTLOCKUP:
330 preempt_disable();
331 for (;;)
332 cpu_relax();
333 break;
334 case HARDLOCKUP:
335 local_irq_disable();
336 for (;;)
337 cpu_relax();
338 break;
339 case HUNG_TASK:
340 set_current_state(TASK_UNINTERRUPTIBLE);
341 schedule();
342 break;
323 case NONE: 343 case NONE:
324 default: 344 default:
325 break; 345 break;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 3168ebd616b2..569e94da844c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1252,9 +1252,8 @@ EXPORT_SYMBOL(mmc_card_can_sleep);
1252/** 1252/**
1253 * mmc_suspend_host - suspend a host 1253 * mmc_suspend_host - suspend a host
1254 * @host: mmc host 1254 * @host: mmc host
1255 * @state: suspend mode (PM_SUSPEND_xxx)
1256 */ 1255 */
1257int mmc_suspend_host(struct mmc_host *host, pm_message_t state) 1256int mmc_suspend_host(struct mmc_host *host)
1258{ 1257{
1259 int err = 0; 1258 int err = 0;
1260 1259
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 0d96080d44b0..63772e7e7608 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -79,8 +79,6 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
79 * we cannot use the retries field in mmc_command. 79 * we cannot use the retries field in mmc_command.
80 */ 80 */
81 for (i = 0;i <= retries;i++) { 81 for (i = 0;i <= retries;i++) {
82 memset(&mrq, 0, sizeof(struct mmc_request));
83
84 err = mmc_app_cmd(host, card); 82 err = mmc_app_cmd(host, card);
85 if (err) { 83 if (err) {
86 /* no point in retrying; no APP commands allowed */ 84 /* no point in retrying; no APP commands allowed */
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index ff27c8c71355..0f687cdeb064 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -406,6 +406,36 @@ void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret)
406EXPORT_SYMBOL_GPL(sdio_writeb); 406EXPORT_SYMBOL_GPL(sdio_writeb);
407 407
408/** 408/**
409 * sdio_writeb_readb - write and read a byte from SDIO function
410 * @func: SDIO function to access
411 * @write_byte: byte to write
412 * @addr: address to write to
413 * @err_ret: optional status value from transfer
414 *
415 * Performs a RAW (Read after Write) operation as defined by SDIO spec -
416 * single byte is written to address space of a given SDIO function and
417 * response is read back from the same address, both using single request.
418 * If there is a problem with the operation, 0xff is returned and
419 * @err_ret will contain the error code.
420 */
421u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
422 unsigned int addr, int *err_ret)
423{
424 int ret;
425 u8 val;
426
427 ret = mmc_io_rw_direct(func->card, 1, func->num, addr,
428 write_byte, &val);
429 if (err_ret)
430 *err_ret = ret;
431 if (ret)
432 val = 0xff;
433
434 return val;
435}
436EXPORT_SYMBOL_GPL(sdio_writeb_readb);
437
438/**
409 * sdio_memcpy_fromio - read a chunk of memory from a SDIO function 439 * sdio_memcpy_fromio - read a chunk of memory from a SDIO function
410 * @func: SDIO function to access 440 * @func: SDIO function to access
411 * @dst: buffer to store the data 441 * @dst: buffer to store the data
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2e13b94769fd..e171e77f6129 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -136,6 +136,18 @@ config MMC_SDHCI_S3C
136 136
137 If unsure, say N. 137 If unsure, say N.
138 138
139config MMC_SDHCI_SPEAR
140 tristate "SDHCI support on ST SPEAr platform"
141 depends on MMC_SDHCI && PLAT_SPEAR
142 help
143 This selects the Secure Digital Host Controller Interface (SDHCI)
144 often referrered to as the HSMMC block in some of the ST SPEAR range
145 of SoC
146
147 If you have a controller with this interface, say Y or M here.
148
149 If unsure, say N.
150
139config MMC_SDHCI_S3C_DMA 151config MMC_SDHCI_S3C_DMA
140 bool "DMA support on S3C SDHCI" 152 bool "DMA support on S3C SDHCI"
141 depends on MMC_SDHCI_S3C && EXPERIMENTAL 153 depends on MMC_SDHCI_S3C && EXPERIMENTAL
@@ -412,3 +424,11 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
412 depends on SDH_BFIN 424 depends on SDH_BFIN
413 help 425 help
414 If you say yes here SD-Cards may work on the EZkit. 426 If you say yes here SD-Cards may work on the EZkit.
427
428config MMC_SH_MMCIF
429 tristate "SuperH Internal MMCIF support"
430 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
431 help
432 This selects the MMC Host Interface controler (MMCIF).
433
434 This driver supports MMCIF in sh7724/sh7757/sh7372.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index f4803977dfce..e30c2ee48894 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o
14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
15obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o 15obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
16obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o 16obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
17obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
17obj-$(CONFIG_MMC_WBSD) += wbsd.o 18obj-$(CONFIG_MMC_WBSD) += wbsd.o
18obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 19obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
19obj-$(CONFIG_MMC_OMAP) += omap.o 20obj-$(CONFIG_MMC_OMAP) += omap.o
@@ -34,6 +35,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
34obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 35obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
35obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 36obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
36obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 37obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
38obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
37 39
38obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o 40obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
39sdhci-of-y := sdhci-of-core.o 41sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 336d9f553f3e..5f3a599ead07 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -1157,7 +1157,7 @@ static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
1157 enable_irq_wake(host->board->det_pin); 1157 enable_irq_wake(host->board->det_pin);
1158 1158
1159 if (mmc) 1159 if (mmc)
1160 ret = mmc_suspend_host(mmc, state); 1160 ret = mmc_suspend_host(mmc);
1161 1161
1162 return ret; 1162 return ret;
1163} 1163}
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index df0e8a88d85f..95ef864ad8f9 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -173,6 +173,7 @@ struct atmel_mci {
173 * @mmc: The mmc_host representing this slot. 173 * @mmc: The mmc_host representing this slot.
174 * @host: The MMC controller this slot is using. 174 * @host: The MMC controller this slot is using.
175 * @sdc_reg: Value of SDCR to be written before using this slot. 175 * @sdc_reg: Value of SDCR to be written before using this slot.
176 * @sdio_irq: SDIO irq mask for this slot.
176 * @mrq: mmc_request currently being processed or waiting to be 177 * @mrq: mmc_request currently being processed or waiting to be
177 * processed, or NULL when the slot is idle. 178 * processed, or NULL when the slot is idle.
178 * @queue_node: List node for placing this node in the @queue list of 179 * @queue_node: List node for placing this node in the @queue list of
@@ -191,6 +192,7 @@ struct atmel_mci_slot {
191 struct atmel_mci *host; 192 struct atmel_mci *host;
192 193
193 u32 sdc_reg; 194 u32 sdc_reg;
195 u32 sdio_irq;
194 196
195 struct mmc_request *mrq; 197 struct mmc_request *mrq;
196 struct list_head queue_node; 198 struct list_head queue_node;
@@ -792,7 +794,7 @@ static void atmci_start_request(struct atmel_mci *host,
792 mci_writel(host, SDCR, slot->sdc_reg); 794 mci_writel(host, SDCR, slot->sdc_reg);
793 795
794 iflags = mci_readl(host, IMR); 796 iflags = mci_readl(host, IMR);
795 if (iflags) 797 if (iflags & ~(MCI_SDIOIRQA | MCI_SDIOIRQB))
796 dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", 798 dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
797 iflags); 799 iflags);
798 800
@@ -952,10 +954,21 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
952 if (mci_has_rwproof()) 954 if (mci_has_rwproof())
953 host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF); 955 host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF);
954 956
955 if (list_empty(&host->queue)) 957 if (atmci_is_mci2()) {
958 /* setup High Speed mode in relation with card capacity */
959 if (ios->timing == MMC_TIMING_SD_HS)
960 host->cfg_reg |= MCI_CFG_HSMODE;
961 else
962 host->cfg_reg &= ~MCI_CFG_HSMODE;
963 }
964
965 if (list_empty(&host->queue)) {
956 mci_writel(host, MR, host->mode_reg); 966 mci_writel(host, MR, host->mode_reg);
957 else 967 if (atmci_is_mci2())
968 mci_writel(host, CFG, host->cfg_reg);
969 } else {
958 host->need_clock_update = true; 970 host->need_clock_update = true;
971 }
959 972
960 spin_unlock_bh(&host->lock); 973 spin_unlock_bh(&host->lock);
961 } else { 974 } else {
@@ -1030,11 +1043,23 @@ static int atmci_get_cd(struct mmc_host *mmc)
1030 return present; 1043 return present;
1031} 1044}
1032 1045
1046static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1047{
1048 struct atmel_mci_slot *slot = mmc_priv(mmc);
1049 struct atmel_mci *host = slot->host;
1050
1051 if (enable)
1052 mci_writel(host, IER, slot->sdio_irq);
1053 else
1054 mci_writel(host, IDR, slot->sdio_irq);
1055}
1056
1033static const struct mmc_host_ops atmci_ops = { 1057static const struct mmc_host_ops atmci_ops = {
1034 .request = atmci_request, 1058 .request = atmci_request,
1035 .set_ios = atmci_set_ios, 1059 .set_ios = atmci_set_ios,
1036 .get_ro = atmci_get_ro, 1060 .get_ro = atmci_get_ro,
1037 .get_cd = atmci_get_cd, 1061 .get_cd = atmci_get_cd,
1062 .enable_sdio_irq = atmci_enable_sdio_irq,
1038}; 1063};
1039 1064
1040/* Called with host->lock held */ 1065/* Called with host->lock held */
@@ -1052,8 +1077,11 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1052 * necessary if set_ios() is called when a different slot is 1077 * necessary if set_ios() is called when a different slot is
1053 * busy transfering data. 1078 * busy transfering data.
1054 */ 1079 */
1055 if (host->need_clock_update) 1080 if (host->need_clock_update) {
1056 mci_writel(host, MR, host->mode_reg); 1081 mci_writel(host, MR, host->mode_reg);
1082 if (atmci_is_mci2())
1083 mci_writel(host, CFG, host->cfg_reg);
1084 }
1057 1085
1058 host->cur_slot->mrq = NULL; 1086 host->cur_slot->mrq = NULL;
1059 host->mrq = NULL; 1087 host->mrq = NULL;
@@ -1483,6 +1511,19 @@ static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
1483 tasklet_schedule(&host->tasklet); 1511 tasklet_schedule(&host->tasklet);
1484} 1512}
1485 1513
1514static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1515{
1516 int i;
1517
1518 for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
1519 struct atmel_mci_slot *slot = host->slot[i];
1520 if (slot && (status & slot->sdio_irq)) {
1521 mmc_signal_sdio_irq(slot->mmc);
1522 }
1523 }
1524}
1525
1526
1486static irqreturn_t atmci_interrupt(int irq, void *dev_id) 1527static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1487{ 1528{
1488 struct atmel_mci *host = dev_id; 1529 struct atmel_mci *host = dev_id;
@@ -1522,6 +1563,10 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1522 1563
1523 if (pending & MCI_CMDRDY) 1564 if (pending & MCI_CMDRDY)
1524 atmci_cmd_interrupt(host, status); 1565 atmci_cmd_interrupt(host, status);
1566
1567 if (pending & (MCI_SDIOIRQA | MCI_SDIOIRQB))
1568 atmci_sdio_interrupt(host, status);
1569
1525 } while (pass_count++ < 5); 1570 } while (pass_count++ < 5);
1526 1571
1527 return pass_count ? IRQ_HANDLED : IRQ_NONE; 1572 return pass_count ? IRQ_HANDLED : IRQ_NONE;
@@ -1544,7 +1589,7 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
1544 1589
1545static int __init atmci_init_slot(struct atmel_mci *host, 1590static int __init atmci_init_slot(struct atmel_mci *host,
1546 struct mci_slot_pdata *slot_data, unsigned int id, 1591 struct mci_slot_pdata *slot_data, unsigned int id,
1547 u32 sdc_reg) 1592 u32 sdc_reg, u32 sdio_irq)
1548{ 1593{
1549 struct mmc_host *mmc; 1594 struct mmc_host *mmc;
1550 struct atmel_mci_slot *slot; 1595 struct atmel_mci_slot *slot;
@@ -1560,11 +1605,16 @@ static int __init atmci_init_slot(struct atmel_mci *host,
1560 slot->wp_pin = slot_data->wp_pin; 1605 slot->wp_pin = slot_data->wp_pin;
1561 slot->detect_is_active_high = slot_data->detect_is_active_high; 1606 slot->detect_is_active_high = slot_data->detect_is_active_high;
1562 slot->sdc_reg = sdc_reg; 1607 slot->sdc_reg = sdc_reg;
1608 slot->sdio_irq = sdio_irq;
1563 1609
1564 mmc->ops = &atmci_ops; 1610 mmc->ops = &atmci_ops;
1565 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); 1611 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
1566 mmc->f_max = host->bus_hz / 2; 1612 mmc->f_max = host->bus_hz / 2;
1567 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1613 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1614 if (sdio_irq)
1615 mmc->caps |= MMC_CAP_SDIO_IRQ;
1616 if (atmci_is_mci2())
1617 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1568 if (slot_data->bus_width >= 4) 1618 if (slot_data->bus_width >= 4)
1569 mmc->caps |= MMC_CAP_4_BIT_DATA; 1619 mmc->caps |= MMC_CAP_4_BIT_DATA;
1570 1620
@@ -1753,13 +1803,13 @@ static int __init atmci_probe(struct platform_device *pdev)
1753 ret = -ENODEV; 1803 ret = -ENODEV;
1754 if (pdata->slot[0].bus_width) { 1804 if (pdata->slot[0].bus_width) {
1755 ret = atmci_init_slot(host, &pdata->slot[0], 1805 ret = atmci_init_slot(host, &pdata->slot[0],
1756 0, MCI_SDCSEL_SLOT_A); 1806 0, MCI_SDCSEL_SLOT_A, MCI_SDIOIRQA);
1757 if (!ret) 1807 if (!ret)
1758 nr_slots++; 1808 nr_slots++;
1759 } 1809 }
1760 if (pdata->slot[1].bus_width) { 1810 if (pdata->slot[1].bus_width) {
1761 ret = atmci_init_slot(host, &pdata->slot[1], 1811 ret = atmci_init_slot(host, &pdata->slot[1],
1762 1, MCI_SDCSEL_SLOT_B); 1812 1, MCI_SDCSEL_SLOT_B, MCI_SDIOIRQB);
1763 if (!ret) 1813 if (!ret)
1764 nr_slots++; 1814 nr_slots++;
1765 } 1815 }
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index f5834449400e..c8da5d30a861 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1142,7 +1142,7 @@ static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
1142 struct au1xmmc_host *host = platform_get_drvdata(pdev); 1142 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1143 int ret; 1143 int ret;
1144 1144
1145 ret = mmc_suspend_host(host->mmc, state); 1145 ret = mmc_suspend_host(host->mmc);
1146 if (ret) 1146 if (ret)
1147 return ret; 1147 return ret;
1148 1148
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 6919e844072c..4b0e677d7295 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -576,7 +576,7 @@ static int sdh_suspend(struct platform_device *dev, pm_message_t state)
576 int ret = 0; 576 int ret = 0;
577 577
578 if (mmc) 578 if (mmc)
579 ret = mmc_suspend_host(mmc, state); 579 ret = mmc_suspend_host(mmc);
580 580
581 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON); 581 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);
582 peripheral_free_list(drv_data->pin_req); 582 peripheral_free_list(drv_data->pin_req);
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 92a324f7417c..ca3bdc831900 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -675,7 +675,7 @@ static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
675 struct mmc_host *mmc = cb710_slot_to_mmc(slot); 675 struct mmc_host *mmc = cb710_slot_to_mmc(slot);
676 int err; 676 int err;
677 677
678 err = mmc_suspend_host(mmc, state); 678 err = mmc_suspend_host(mmc);
679 if (err) 679 if (err)
680 return err; 680 return err;
681 681
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 3bd0ba294e9d..33d9f1b00862 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -137,15 +137,15 @@
137 137
138/* 138/*
139 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 139 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
140 * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 140 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
141 * for drivers with max_hw_segs == 1, making the segments bigger (64KB) 141 * for drivers with max_hw_segs == 1, making the segments bigger (64KB)
142 * than the page or two that's otherwise typical. NR_SG == 16 gives at 142 * than the page or two that's otherwise typical. nr_sg (passed from
143 * least the same throughput boost, using EDMA transfer linkage instead 143 * platform data) == 16 gives at least the same throughput boost, using
144 * of spending CPU time copying pages. 144 * EDMA transfer linkage instead of spending CPU time copying pages.
145 */ 145 */
146#define MAX_CCNT ((1 << 16) - 1) 146#define MAX_CCNT ((1 << 16) - 1)
147 147
148#define NR_SG 16 148#define MAX_NR_SG 16
149 149
150static unsigned rw_threshold = 32; 150static unsigned rw_threshold = 32;
151module_param(rw_threshold, uint, S_IRUGO); 151module_param(rw_threshold, uint, S_IRUGO);
@@ -171,6 +171,7 @@ struct mmc_davinci_host {
171#define DAVINCI_MMC_DATADIR_READ 1 171#define DAVINCI_MMC_DATADIR_READ 1
172#define DAVINCI_MMC_DATADIR_WRITE 2 172#define DAVINCI_MMC_DATADIR_WRITE 2
173 unsigned char data_dir; 173 unsigned char data_dir;
174 unsigned char suspended;
174 175
175 /* buffer is used during PIO of one scatterlist segment, and 176 /* buffer is used during PIO of one scatterlist segment, and
176 * is updated along with buffer_bytes_left. bytes_left applies 177 * is updated along with buffer_bytes_left. bytes_left applies
@@ -192,7 +193,7 @@ struct mmc_davinci_host {
192 struct edmacc_param tx_template; 193 struct edmacc_param tx_template;
193 struct edmacc_param rx_template; 194 struct edmacc_param rx_template;
194 unsigned n_link; 195 unsigned n_link;
195 u32 links[NR_SG - 1]; 196 u32 links[MAX_NR_SG - 1];
196 197
197 /* For PIO we walk scatterlists one segment at a time. */ 198 /* For PIO we walk scatterlists one segment at a time. */
198 unsigned int sg_len; 199 unsigned int sg_len;
@@ -202,6 +203,8 @@ struct mmc_davinci_host {
202 u8 version; 203 u8 version;
203 /* for ns in one cycle calculation */ 204 /* for ns in one cycle calculation */
204 unsigned ns_in_one_cycle; 205 unsigned ns_in_one_cycle;
206 /* Number of sg segments */
207 u8 nr_sg;
205#ifdef CONFIG_CPU_FREQ 208#ifdef CONFIG_CPU_FREQ
206 struct notifier_block freq_transition; 209 struct notifier_block freq_transition;
207#endif 210#endif
@@ -568,6 +571,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
568 571
569static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) 572static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
570{ 573{
574 u32 link_size;
571 int r, i; 575 int r, i;
572 576
573 /* Acquire master DMA write channel */ 577 /* Acquire master DMA write channel */
@@ -593,7 +597,8 @@ static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
593 /* Allocate parameter RAM slots, which will later be bound to a 597 /* Allocate parameter RAM slots, which will later be bound to a
594 * channel as needed to handle a scatterlist. 598 * channel as needed to handle a scatterlist.
595 */ 599 */
596 for (i = 0; i < ARRAY_SIZE(host->links); i++) { 600 link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links));
601 for (i = 0; i < link_size; i++) {
597 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); 602 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
598 if (r < 0) { 603 if (r < 0) {
599 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", 604 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
@@ -905,19 +910,26 @@ static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
905 } 910 }
906} 911}
907 912
908static void 913static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host,
909davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 914 int val)
910{ 915{
911 u32 temp; 916 u32 temp;
912 917
913 /* reset command and data state machines */
914 temp = readl(host->base + DAVINCI_MMCCTL); 918 temp = readl(host->base + DAVINCI_MMCCTL);
915 writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST, 919 if (val) /* reset */
916 host->base + DAVINCI_MMCCTL); 920 temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
921 else /* enable */
922 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
917 923
918 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
919 udelay(10);
920 writel(temp, host->base + DAVINCI_MMCCTL); 924 writel(temp, host->base + DAVINCI_MMCCTL);
925 udelay(10);
926}
927
928static void
929davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
930{
931 mmc_davinci_reset_ctrl(host, 1);
932 mmc_davinci_reset_ctrl(host, 0);
921} 933}
922 934
923static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 935static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
@@ -1121,15 +1133,8 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1121#endif 1133#endif
1122static void __init init_mmcsd_host(struct mmc_davinci_host *host) 1134static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1123{ 1135{
1124 /* DAT line portion is diabled and in reset state */
1125 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST,
1126 host->base + DAVINCI_MMCCTL);
1127
1128 /* CMD line portion is diabled and in reset state */
1129 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST,
1130 host->base + DAVINCI_MMCCTL);
1131 1136
1132 udelay(10); 1137 mmc_davinci_reset_ctrl(host, 1);
1133 1138
1134 writel(0, host->base + DAVINCI_MMCCLK); 1139 writel(0, host->base + DAVINCI_MMCCLK);
1135 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1140 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
@@ -1137,12 +1142,7 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1137 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1142 writel(0x1FFF, host->base + DAVINCI_MMCTOR);
1138 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1143 writel(0xFFFF, host->base + DAVINCI_MMCTOD);
1139 1144
1140 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST, 1145 mmc_davinci_reset_ctrl(host, 0);
1141 host->base + DAVINCI_MMCCTL);
1142 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST,
1143 host->base + DAVINCI_MMCCTL);
1144
1145 udelay(10);
1146} 1146}
1147 1147
1148static int __init davinci_mmcsd_probe(struct platform_device *pdev) 1148static int __init davinci_mmcsd_probe(struct platform_device *pdev)
@@ -1202,6 +1202,12 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1202 1202
1203 init_mmcsd_host(host); 1203 init_mmcsd_host(host);
1204 1204
1205 if (pdata->nr_sg)
1206 host->nr_sg = pdata->nr_sg - 1;
1207
1208 if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
1209 host->nr_sg = MAX_NR_SG;
1210
1205 host->use_dma = use_dma; 1211 host->use_dma = use_dma;
1206 host->irq = irq; 1212 host->irq = irq;
1207 1213
@@ -1327,32 +1333,65 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
1327} 1333}
1328 1334
1329#ifdef CONFIG_PM 1335#ifdef CONFIG_PM
1330static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg) 1336static int davinci_mmcsd_suspend(struct device *dev)
1331{ 1337{
1338 struct platform_device *pdev = to_platform_device(dev);
1332 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1339 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1340 int ret;
1333 1341
1334 return mmc_suspend_host(host->mmc, msg); 1342 mmc_host_enable(host->mmc);
1343 ret = mmc_suspend_host(host->mmc);
1344 if (!ret) {
1345 writel(0, host->base + DAVINCI_MMCIM);
1346 mmc_davinci_reset_ctrl(host, 1);
1347 mmc_host_disable(host->mmc);
1348 clk_disable(host->clk);
1349 host->suspended = 1;
1350 } else {
1351 host->suspended = 0;
1352 mmc_host_disable(host->mmc);
1353 }
1354
1355 return ret;
1335} 1356}
1336 1357
1337static int davinci_mmcsd_resume(struct platform_device *pdev) 1358static int davinci_mmcsd_resume(struct device *dev)
1338{ 1359{
1360 struct platform_device *pdev = to_platform_device(dev);
1339 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1361 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1362 int ret;
1363
1364 if (!host->suspended)
1365 return 0;
1340 1366
1341 return mmc_resume_host(host->mmc); 1367 clk_enable(host->clk);
1368 mmc_host_enable(host->mmc);
1369
1370 mmc_davinci_reset_ctrl(host, 0);
1371 ret = mmc_resume_host(host->mmc);
1372 if (!ret)
1373 host->suspended = 0;
1374
1375 return ret;
1342} 1376}
1377
1378static const struct dev_pm_ops davinci_mmcsd_pm = {
1379 .suspend = davinci_mmcsd_suspend,
1380 .resume = davinci_mmcsd_resume,
1381};
1382
1383#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
1343#else 1384#else
1344#define davinci_mmcsd_suspend NULL 1385#define davinci_mmcsd_pm_ops NULL
1345#define davinci_mmcsd_resume NULL
1346#endif 1386#endif
1347 1387
1348static struct platform_driver davinci_mmcsd_driver = { 1388static struct platform_driver davinci_mmcsd_driver = {
1349 .driver = { 1389 .driver = {
1350 .name = "davinci_mmc", 1390 .name = "davinci_mmc",
1351 .owner = THIS_MODULE, 1391 .owner = THIS_MODULE,
1392 .pm = davinci_mmcsd_pm_ops,
1352 }, 1393 },
1353 .remove = __exit_p(davinci_mmcsd_remove), 1394 .remove = __exit_p(davinci_mmcsd_remove),
1354 .suspend = davinci_mmcsd_suspend,
1355 .resume = davinci_mmcsd_resume,
1356}; 1395};
1357 1396
1358static int __init davinci_mmcsd_init(void) 1397static int __init davinci_mmcsd_init(void)
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index bf98d7cc928a..9a68ff4353a2 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -1115,7 +1115,7 @@ static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
1115 int ret = 0; 1115 int ret = 0;
1116 1116
1117 if (mmc) 1117 if (mmc)
1118 ret = mmc_suspend_host(mmc, state); 1118 ret = mmc_suspend_host(mmc);
1119 1119
1120 return ret; 1120 return ret;
1121} 1121}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index ff115d920888..4917af96bae1 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -824,7 +824,7 @@ static int mmci_suspend(struct amba_device *dev, pm_message_t state)
824 if (mmc) { 824 if (mmc) {
825 struct mmci_host *host = mmc_priv(mmc); 825 struct mmci_host *host = mmc_priv(mmc);
826 826
827 ret = mmc_suspend_host(mmc, state); 827 ret = mmc_suspend_host(mmc);
828 if (ret == 0) 828 if (ret == 0)
829 writel(0, host->base + MMCIMASK0); 829 writel(0, host->base + MMCIMASK0);
830 } 830 }
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 61f1d27fed3f..24e09454e522 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -1327,7 +1327,7 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
1327 disable_irq(host->stat_irq); 1327 disable_irq(host->stat_irq);
1328 1328
1329 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) 1329 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1330 rc = mmc_suspend_host(mmc, state); 1330 rc = mmc_suspend_host(mmc);
1331 if (!rc) 1331 if (!rc)
1332 msmsdcc_writel(host, 0, MMCIMASK0); 1332 msmsdcc_writel(host, 0, MMCIMASK0);
1333 if (host->clks_on) 1333 if (host->clks_on)
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 34e23489811a..366eefa77c5a 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -865,7 +865,7 @@ static int mvsd_suspend(struct platform_device *dev, pm_message_t state)
865 int ret = 0; 865 int ret = 0;
866 866
867 if (mmc) 867 if (mmc)
868 ret = mmc_suspend_host(mmc, state); 868 ret = mmc_suspend_host(mmc);
869 869
870 return ret; 870 return ret;
871} 871}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index ec18e3b60342..d9d4a72e0ec7 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -932,7 +932,7 @@ static int mxcmci_suspend(struct platform_device *dev, pm_message_t state)
932 int ret = 0; 932 int ret = 0;
933 933
934 if (mmc) 934 if (mmc)
935 ret = mmc_suspend_host(mmc, state); 935 ret = mmc_suspend_host(mmc);
936 936
937 return ret; 937 return ret;
938} 938}
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 84d280406341..2b281680e320 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -39,30 +39,30 @@
39#include <plat/fpga.h> 39#include <plat/fpga.h>
40 40
41#define OMAP_MMC_REG_CMD 0x00 41#define OMAP_MMC_REG_CMD 0x00
42#define OMAP_MMC_REG_ARGL 0x04 42#define OMAP_MMC_REG_ARGL 0x01
43#define OMAP_MMC_REG_ARGH 0x08 43#define OMAP_MMC_REG_ARGH 0x02
44#define OMAP_MMC_REG_CON 0x0c 44#define OMAP_MMC_REG_CON 0x03
45#define OMAP_MMC_REG_STAT 0x10 45#define OMAP_MMC_REG_STAT 0x04
46#define OMAP_MMC_REG_IE 0x14 46#define OMAP_MMC_REG_IE 0x05
47#define OMAP_MMC_REG_CTO 0x18 47#define OMAP_MMC_REG_CTO 0x06
48#define OMAP_MMC_REG_DTO 0x1c 48#define OMAP_MMC_REG_DTO 0x07
49#define OMAP_MMC_REG_DATA 0x20 49#define OMAP_MMC_REG_DATA 0x08
50#define OMAP_MMC_REG_BLEN 0x24 50#define OMAP_MMC_REG_BLEN 0x09
51#define OMAP_MMC_REG_NBLK 0x28 51#define OMAP_MMC_REG_NBLK 0x0a
52#define OMAP_MMC_REG_BUF 0x2c 52#define OMAP_MMC_REG_BUF 0x0b
53#define OMAP_MMC_REG_SDIO 0x34 53#define OMAP_MMC_REG_SDIO 0x0d
54#define OMAP_MMC_REG_REV 0x3c 54#define OMAP_MMC_REG_REV 0x0f
55#define OMAP_MMC_REG_RSP0 0x40 55#define OMAP_MMC_REG_RSP0 0x10
56#define OMAP_MMC_REG_RSP1 0x44 56#define OMAP_MMC_REG_RSP1 0x11
57#define OMAP_MMC_REG_RSP2 0x48 57#define OMAP_MMC_REG_RSP2 0x12
58#define OMAP_MMC_REG_RSP3 0x4c 58#define OMAP_MMC_REG_RSP3 0x13
59#define OMAP_MMC_REG_RSP4 0x50 59#define OMAP_MMC_REG_RSP4 0x14
60#define OMAP_MMC_REG_RSP5 0x54 60#define OMAP_MMC_REG_RSP5 0x15
61#define OMAP_MMC_REG_RSP6 0x58 61#define OMAP_MMC_REG_RSP6 0x16
62#define OMAP_MMC_REG_RSP7 0x5c 62#define OMAP_MMC_REG_RSP7 0x17
63#define OMAP_MMC_REG_IOSR 0x60 63#define OMAP_MMC_REG_IOSR 0x18
64#define OMAP_MMC_REG_SYSC 0x64 64#define OMAP_MMC_REG_SYSC 0x19
65#define OMAP_MMC_REG_SYSS 0x68 65#define OMAP_MMC_REG_SYSS 0x1a
66 66
67#define OMAP_MMC_STAT_CARD_ERR (1 << 14) 67#define OMAP_MMC_STAT_CARD_ERR (1 << 14)
68#define OMAP_MMC_STAT_CARD_IRQ (1 << 13) 68#define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
@@ -78,8 +78,9 @@
78#define OMAP_MMC_STAT_CARD_BUSY (1 << 2) 78#define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
79#define OMAP_MMC_STAT_END_OF_CMD (1 << 0) 79#define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
80 80
81#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG_##reg) 81#define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift)
82#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg) 82#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))
83#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg))
83 84
84/* 85/*
85 * Command types 86 * Command types
@@ -133,6 +134,7 @@ struct mmc_omap_host {
133 int irq; 134 int irq;
134 unsigned char bus_mode; 135 unsigned char bus_mode;
135 unsigned char hw_bus_mode; 136 unsigned char hw_bus_mode;
137 unsigned int reg_shift;
136 138
137 struct work_struct cmd_abort_work; 139 struct work_struct cmd_abort_work;
138 unsigned abort:1; 140 unsigned abort:1;
@@ -680,9 +682,9 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
680 host->data->bytes_xfered += n; 682 host->data->bytes_xfered += n;
681 683
682 if (write) { 684 if (write) {
683 __raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n); 685 __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
684 } else { 686 } else {
685 __raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n); 687 __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
686 } 688 }
687} 689}
688 690
@@ -900,7 +902,7 @@ mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
900 int dst_port = 0; 902 int dst_port = 0;
901 int sync_dev = 0; 903 int sync_dev = 0;
902 904
903 data_addr = host->phys_base + OMAP_MMC_REG_DATA; 905 data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
904 frame = data->blksz; 906 frame = data->blksz;
905 count = sg_dma_len(sg); 907 count = sg_dma_len(sg);
906 908
@@ -1493,6 +1495,8 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1493 } 1495 }
1494 } 1496 }
1495 1497
1498 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
1499
1496 return 0; 1500 return 0;
1497 1501
1498err_plat_cleanup: 1502err_plat_cleanup:
@@ -1557,7 +1561,7 @@ static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1557 struct mmc_omap_slot *slot; 1561 struct mmc_omap_slot *slot;
1558 1562
1559 slot = host->slots[i]; 1563 slot = host->slots[i];
1560 ret = mmc_suspend_host(slot->mmc, mesg); 1564 ret = mmc_suspend_host(slot->mmc);
1561 if (ret < 0) { 1565 if (ret < 0) {
1562 while (--i >= 0) { 1566 while (--i >= 0) {
1563 slot = host->slots[i]; 1567 slot = host->slots[i];
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index e9caf694c59e..b032828c6126 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -157,12 +157,10 @@ struct omap_hsmmc_host {
157 */ 157 */
158 struct regulator *vcc; 158 struct regulator *vcc;
159 struct regulator *vcc_aux; 159 struct regulator *vcc_aux;
160 struct semaphore sem;
161 struct work_struct mmc_carddetect_work; 160 struct work_struct mmc_carddetect_work;
162 void __iomem *base; 161 void __iomem *base;
163 resource_size_t mapbase; 162 resource_size_t mapbase;
164 spinlock_t irq_lock; /* Prevent races with irq handler */ 163 spinlock_t irq_lock; /* Prevent races with irq handler */
165 unsigned long flags;
166 unsigned int id; 164 unsigned int id;
167 unsigned int dma_len; 165 unsigned int dma_len;
168 unsigned int dma_sg_idx; 166 unsigned int dma_sg_idx;
@@ -183,6 +181,7 @@ struct omap_hsmmc_host {
183 int protect_card; 181 int protect_card;
184 int reqs_blocked; 182 int reqs_blocked;
185 int use_reg; 183 int use_reg;
184 int req_in_progress;
186 185
187 struct omap_mmc_platform_data *pdata; 186 struct omap_mmc_platform_data *pdata;
188}; 187};
@@ -524,6 +523,27 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
524 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); 523 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n");
525} 524}
526 525
526static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host)
527{
528 unsigned int irq_mask;
529
530 if (host->use_dma)
531 irq_mask = INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE);
532 else
533 irq_mask = INT_EN_MASK;
534
535 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
536 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
537 OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
538}
539
540static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
541{
542 OMAP_HSMMC_WRITE(host->base, ISE, 0);
543 OMAP_HSMMC_WRITE(host->base, IE, 0);
544 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
545}
546
527#ifdef CONFIG_PM 547#ifdef CONFIG_PM
528 548
529/* 549/*
@@ -592,9 +612,7 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
592 && time_before(jiffies, timeout)) 612 && time_before(jiffies, timeout))
593 ; 613 ;
594 614
595 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 615 omap_hsmmc_disable_irq(host);
596 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
597 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
598 616
599 /* Do not initialize card-specific things if the power is off */ 617 /* Do not initialize card-specific things if the power is off */
600 if (host->power_mode == MMC_POWER_OFF) 618 if (host->power_mode == MMC_POWER_OFF)
@@ -697,6 +715,8 @@ static void send_init_stream(struct omap_hsmmc_host *host)
697 return; 715 return;
698 716
699 disable_irq(host->irq); 717 disable_irq(host->irq);
718
719 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
700 OMAP_HSMMC_WRITE(host->base, CON, 720 OMAP_HSMMC_WRITE(host->base, CON,
701 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM); 721 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
702 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD); 722 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);
@@ -762,17 +782,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
762 mmc_hostname(host->mmc), cmd->opcode, cmd->arg); 782 mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
763 host->cmd = cmd; 783 host->cmd = cmd;
764 784
765 /* 785 omap_hsmmc_enable_irq(host);
766 * Clear status bits and enable interrupts
767 */
768 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
769 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
770
771 if (host->use_dma)
772 OMAP_HSMMC_WRITE(host->base, IE,
773 INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
774 else
775 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
776 786
777 host->response_busy = 0; 787 host->response_busy = 0;
778 if (cmd->flags & MMC_RSP_PRESENT) { 788 if (cmd->flags & MMC_RSP_PRESENT) {
@@ -806,13 +816,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
806 if (host->use_dma) 816 if (host->use_dma)
807 cmdreg |= DMA_EN; 817 cmdreg |= DMA_EN;
808 818
809 /* 819 host->req_in_progress = 1;
810 * In an interrupt context (i.e. STOP command), the spinlock is unlocked
811 * by the interrupt handler, otherwise (i.e. for a new request) it is
812 * unlocked here.
813 */
814 if (!in_interrupt())
815 spin_unlock_irqrestore(&host->irq_lock, host->flags);
816 820
817 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); 821 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
818 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); 822 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
@@ -827,6 +831,23 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
827 return DMA_FROM_DEVICE; 831 return DMA_FROM_DEVICE;
828} 832}
829 833
834static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
835{
836 int dma_ch;
837
838 spin_lock(&host->irq_lock);
839 host->req_in_progress = 0;
840 dma_ch = host->dma_ch;
841 spin_unlock(&host->irq_lock);
842
843 omap_hsmmc_disable_irq(host);
844 /* Do not complete the request if DMA is still in progress */
845 if (mrq->data && host->use_dma && dma_ch != -1)
846 return;
847 host->mrq = NULL;
848 mmc_request_done(host->mmc, mrq);
849}
850
830/* 851/*
831 * Notify the transfer complete to MMC core 852 * Notify the transfer complete to MMC core
832 */ 853 */
@@ -843,25 +864,19 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
843 return; 864 return;
844 } 865 }
845 866
846 host->mrq = NULL; 867 omap_hsmmc_request_done(host, mrq);
847 mmc_request_done(host->mmc, mrq);
848 return; 868 return;
849 } 869 }
850 870
851 host->data = NULL; 871 host->data = NULL;
852 872
853 if (host->use_dma && host->dma_ch != -1)
854 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
855 omap_hsmmc_get_dma_dir(host, data));
856
857 if (!data->error) 873 if (!data->error)
858 data->bytes_xfered += data->blocks * (data->blksz); 874 data->bytes_xfered += data->blocks * (data->blksz);
859 else 875 else
860 data->bytes_xfered = 0; 876 data->bytes_xfered = 0;
861 877
862 if (!data->stop) { 878 if (!data->stop) {
863 host->mrq = NULL; 879 omap_hsmmc_request_done(host, data->mrq);
864 mmc_request_done(host->mmc, data->mrq);
865 return; 880 return;
866 } 881 }
867 omap_hsmmc_start_command(host, data->stop, NULL); 882 omap_hsmmc_start_command(host, data->stop, NULL);
@@ -887,10 +902,8 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
887 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10); 902 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
888 } 903 }
889 } 904 }
890 if ((host->data == NULL && !host->response_busy) || cmd->error) { 905 if ((host->data == NULL && !host->response_busy) || cmd->error)
891 host->mrq = NULL; 906 omap_hsmmc_request_done(host, cmd->mrq);
892 mmc_request_done(host->mmc, cmd->mrq);
893 }
894} 907}
895 908
896/* 909/*
@@ -898,14 +911,19 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
898 */ 911 */
899static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) 912static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
900{ 913{
914 int dma_ch;
915
901 host->data->error = errno; 916 host->data->error = errno;
902 917
903 if (host->use_dma && host->dma_ch != -1) { 918 spin_lock(&host->irq_lock);
919 dma_ch = host->dma_ch;
920 host->dma_ch = -1;
921 spin_unlock(&host->irq_lock);
922
923 if (host->use_dma && dma_ch != -1) {
904 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, 924 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
905 omap_hsmmc_get_dma_dir(host, host->data)); 925 omap_hsmmc_get_dma_dir(host, host->data));
906 omap_free_dma(host->dma_ch); 926 omap_free_dma(dma_ch);
907 host->dma_ch = -1;
908 up(&host->sem);
909 } 927 }
910 host->data = NULL; 928 host->data = NULL;
911} 929}
@@ -967,28 +985,21 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
967 __func__); 985 __func__);
968} 986}
969 987
970/* 988static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
971 * MMC controller IRQ handler
972 */
973static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
974{ 989{
975 struct omap_hsmmc_host *host = dev_id;
976 struct mmc_data *data; 990 struct mmc_data *data;
977 int end_cmd = 0, end_trans = 0, status; 991 int end_cmd = 0, end_trans = 0;
978 992
979 spin_lock(&host->irq_lock); 993 if (!host->req_in_progress) {
980 994 do {
981 if (host->mrq == NULL) { 995 OMAP_HSMMC_WRITE(host->base, STAT, status);
982 OMAP_HSMMC_WRITE(host->base, STAT, 996 /* Flush posted write */
983 OMAP_HSMMC_READ(host->base, STAT)); 997 status = OMAP_HSMMC_READ(host->base, STAT);
984 /* Flush posted write */ 998 } while (status & INT_EN_MASK);
985 OMAP_HSMMC_READ(host->base, STAT); 999 return;
986 spin_unlock(&host->irq_lock);
987 return IRQ_HANDLED;
988 } 1000 }
989 1001
990 data = host->data; 1002 data = host->data;
991 status = OMAP_HSMMC_READ(host->base, STAT);
992 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); 1003 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
993 1004
994 if (status & ERR) { 1005 if (status & ERR) {
@@ -1041,15 +1052,27 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
1041 } 1052 }
1042 1053
1043 OMAP_HSMMC_WRITE(host->base, STAT, status); 1054 OMAP_HSMMC_WRITE(host->base, STAT, status);
1044 /* Flush posted write */
1045 OMAP_HSMMC_READ(host->base, STAT);
1046 1055
1047 if (end_cmd || ((status & CC) && host->cmd)) 1056 if (end_cmd || ((status & CC) && host->cmd))
1048 omap_hsmmc_cmd_done(host, host->cmd); 1057 omap_hsmmc_cmd_done(host, host->cmd);
1049 if ((end_trans || (status & TC)) && host->mrq) 1058 if ((end_trans || (status & TC)) && host->mrq)
1050 omap_hsmmc_xfer_done(host, data); 1059 omap_hsmmc_xfer_done(host, data);
1060}
1051 1061
1052 spin_unlock(&host->irq_lock); 1062/*
1063 * MMC controller IRQ handler
1064 */
1065static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
1066{
1067 struct omap_hsmmc_host *host = dev_id;
1068 int status;
1069
1070 status = OMAP_HSMMC_READ(host->base, STAT);
1071 do {
1072 omap_hsmmc_do_irq(host, status);
1073 /* Flush posted write */
1074 status = OMAP_HSMMC_READ(host->base, STAT);
1075 } while (status & INT_EN_MASK);
1053 1076
1054 return IRQ_HANDLED; 1077 return IRQ_HANDLED;
1055} 1078}
@@ -1244,31 +1267,47 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
1244/* 1267/*
1245 * DMA call back function 1268 * DMA call back function
1246 */ 1269 */
1247static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data) 1270static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1248{ 1271{
1249 struct omap_hsmmc_host *host = data; 1272 struct omap_hsmmc_host *host = cb_data;
1273 struct mmc_data *data = host->mrq->data;
1274 int dma_ch, req_in_progress;
1250 1275
1251 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ) 1276 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
1252 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n"); 1277 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
1253 1278
1254 if (host->dma_ch < 0) 1279 spin_lock(&host->irq_lock);
1280 if (host->dma_ch < 0) {
1281 spin_unlock(&host->irq_lock);
1255 return; 1282 return;
1283 }
1256 1284
1257 host->dma_sg_idx++; 1285 host->dma_sg_idx++;
1258 if (host->dma_sg_idx < host->dma_len) { 1286 if (host->dma_sg_idx < host->dma_len) {
1259 /* Fire up the next transfer. */ 1287 /* Fire up the next transfer. */
1260 omap_hsmmc_config_dma_params(host, host->data, 1288 omap_hsmmc_config_dma_params(host, data,
1261 host->data->sg + host->dma_sg_idx); 1289 data->sg + host->dma_sg_idx);
1290 spin_unlock(&host->irq_lock);
1262 return; 1291 return;
1263 } 1292 }
1264 1293
1265 omap_free_dma(host->dma_ch); 1294 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
1295 omap_hsmmc_get_dma_dir(host, data));
1296
1297 req_in_progress = host->req_in_progress;
1298 dma_ch = host->dma_ch;
1266 host->dma_ch = -1; 1299 host->dma_ch = -1;
1267 /* 1300 spin_unlock(&host->irq_lock);
1268 * DMA Callback: run in interrupt context. 1301
1269 * mutex_unlock will throw a kernel warning if used. 1302 omap_free_dma(dma_ch);
1270 */ 1303
1271 up(&host->sem); 1304 /* If DMA has finished after TC, complete the request */
1305 if (!req_in_progress) {
1306 struct mmc_request *mrq = host->mrq;
1307
1308 host->mrq = NULL;
1309 mmc_request_done(host->mmc, mrq);
1310 }
1272} 1311}
1273 1312
1274/* 1313/*
@@ -1277,7 +1316,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data)
1277static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, 1316static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1278 struct mmc_request *req) 1317 struct mmc_request *req)
1279{ 1318{
1280 int dma_ch = 0, ret = 0, err = 1, i; 1319 int dma_ch = 0, ret = 0, i;
1281 struct mmc_data *data = req->data; 1320 struct mmc_data *data = req->data;
1282 1321
1283 /* Sanity check: all the SG entries must be aligned by block size. */ 1322 /* Sanity check: all the SG entries must be aligned by block size. */
@@ -1294,23 +1333,7 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1294 */ 1333 */
1295 return -EINVAL; 1334 return -EINVAL;
1296 1335
1297 /* 1336 BUG_ON(host->dma_ch != -1);
1298 * If for some reason the DMA transfer is still active,
1299 * we wait for timeout period and free the dma
1300 */
1301 if (host->dma_ch != -1) {
1302 set_current_state(TASK_UNINTERRUPTIBLE);
1303 schedule_timeout(100);
1304 if (down_trylock(&host->sem)) {
1305 omap_free_dma(host->dma_ch);
1306 host->dma_ch = -1;
1307 up(&host->sem);
1308 return err;
1309 }
1310 } else {
1311 if (down_trylock(&host->sem))
1312 return err;
1313 }
1314 1337
1315 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), 1338 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
1316 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); 1339 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
@@ -1410,37 +1433,27 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1410 struct omap_hsmmc_host *host = mmc_priv(mmc); 1433 struct omap_hsmmc_host *host = mmc_priv(mmc);
1411 int err; 1434 int err;
1412 1435
1413 /* 1436 BUG_ON(host->req_in_progress);
1414 * Prevent races with the interrupt handler because of unexpected 1437 BUG_ON(host->dma_ch != -1);
1415 * interrupts, but not if we are already in interrupt context i.e. 1438 if (host->protect_card) {
1416 * retries. 1439 if (host->reqs_blocked < 3) {
1417 */ 1440 /*
1418 if (!in_interrupt()) { 1441 * Ensure the controller is left in a consistent
1419 spin_lock_irqsave(&host->irq_lock, host->flags); 1442 * state by resetting the command and data state
1420 /* 1443 * machines.
1421 * Protect the card from I/O if there is a possibility 1444 */
1422 * it can be removed. 1445 omap_hsmmc_reset_controller_fsm(host, SRD);
1423 */ 1446 omap_hsmmc_reset_controller_fsm(host, SRC);
1424 if (host->protect_card) { 1447 host->reqs_blocked += 1;
1425 if (host->reqs_blocked < 3) { 1448 }
1426 /* 1449 req->cmd->error = -EBADF;
1427 * Ensure the controller is left in a consistent 1450 if (req->data)
1428 * state by resetting the command and data state 1451 req->data->error = -EBADF;
1429 * machines. 1452 req->cmd->retries = 0;
1430 */ 1453 mmc_request_done(mmc, req);
1431 omap_hsmmc_reset_controller_fsm(host, SRD); 1454 return;
1432 omap_hsmmc_reset_controller_fsm(host, SRC); 1455 } else if (host->reqs_blocked)
1433 host->reqs_blocked += 1; 1456 host->reqs_blocked = 0;
1434 }
1435 req->cmd->error = -EBADF;
1436 if (req->data)
1437 req->data->error = -EBADF;
1438 spin_unlock_irqrestore(&host->irq_lock, host->flags);
1439 mmc_request_done(mmc, req);
1440 return;
1441 } else if (host->reqs_blocked)
1442 host->reqs_blocked = 0;
1443 }
1444 WARN_ON(host->mrq != NULL); 1457 WARN_ON(host->mrq != NULL);
1445 host->mrq = req; 1458 host->mrq = req;
1446 err = omap_hsmmc_prepare_data(host, req); 1459 err = omap_hsmmc_prepare_data(host, req);
@@ -1449,8 +1462,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1449 if (req->data) 1462 if (req->data)
1450 req->data->error = err; 1463 req->data->error = err;
1451 host->mrq = NULL; 1464 host->mrq = NULL;
1452 if (!in_interrupt())
1453 spin_unlock_irqrestore(&host->irq_lock, host->flags);
1454 mmc_request_done(mmc, req); 1465 mmc_request_done(mmc, req);
1455 return; 1466 return;
1456 } 1467 }
@@ -2019,7 +2030,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2019 mmc->f_min = 400000; 2030 mmc->f_min = 400000;
2020 mmc->f_max = 52000000; 2031 mmc->f_max = 52000000;
2021 2032
2022 sema_init(&host->sem, 1);
2023 spin_lock_init(&host->irq_lock); 2033 spin_lock_init(&host->irq_lock);
2024 2034
2025 host->iclk = clk_get(&pdev->dev, "ick"); 2035 host->iclk = clk_get(&pdev->dev, "ick");
@@ -2162,8 +2172,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2162 } 2172 }
2163 } 2173 }
2164 2174
2165 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); 2175 omap_hsmmc_disable_irq(host);
2166 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
2167 2176
2168 mmc_host_lazy_disable(host->mmc); 2177 mmc_host_lazy_disable(host->mmc);
2169 2178
@@ -2258,10 +2267,12 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2258} 2267}
2259 2268
2260#ifdef CONFIG_PM 2269#ifdef CONFIG_PM
2261static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state) 2270static int omap_hsmmc_suspend(struct device *dev)
2262{ 2271{
2263 int ret = 0; 2272 int ret = 0;
2273 struct platform_device *pdev = to_platform_device(dev);
2264 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2274 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2275 pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
2265 2276
2266 if (host && host->suspended) 2277 if (host && host->suspended)
2267 return 0; 2278 return 0;
@@ -2281,12 +2292,9 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
2281 } 2292 }
2282 cancel_work_sync(&host->mmc_carddetect_work); 2293 cancel_work_sync(&host->mmc_carddetect_work);
2283 mmc_host_enable(host->mmc); 2294 mmc_host_enable(host->mmc);
2284 ret = mmc_suspend_host(host->mmc, state); 2295 ret = mmc_suspend_host(host->mmc);
2285 if (ret == 0) { 2296 if (ret == 0) {
2286 OMAP_HSMMC_WRITE(host->base, ISE, 0); 2297 omap_hsmmc_disable_irq(host);
2287 OMAP_HSMMC_WRITE(host->base, IE, 0);
2288
2289
2290 OMAP_HSMMC_WRITE(host->base, HCTL, 2298 OMAP_HSMMC_WRITE(host->base, HCTL,
2291 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); 2299 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
2292 mmc_host_disable(host->mmc); 2300 mmc_host_disable(host->mmc);
@@ -2310,9 +2318,10 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
2310} 2318}
2311 2319
2312/* Routine to resume the MMC device */ 2320/* Routine to resume the MMC device */
2313static int omap_hsmmc_resume(struct platform_device *pdev) 2321static int omap_hsmmc_resume(struct device *dev)
2314{ 2322{
2315 int ret = 0; 2323 int ret = 0;
2324 struct platform_device *pdev = to_platform_device(dev);
2316 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2325 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2317 2326
2318 if (host && !host->suspended) 2327 if (host && !host->suspended)
@@ -2363,13 +2372,17 @@ clk_en_err:
2363#define omap_hsmmc_resume NULL 2372#define omap_hsmmc_resume NULL
2364#endif 2373#endif
2365 2374
2366static struct platform_driver omap_hsmmc_driver = { 2375static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
2367 .remove = omap_hsmmc_remove,
2368 .suspend = omap_hsmmc_suspend, 2376 .suspend = omap_hsmmc_suspend,
2369 .resume = omap_hsmmc_resume, 2377 .resume = omap_hsmmc_resume,
2378};
2379
2380static struct platform_driver omap_hsmmc_driver = {
2381 .remove = omap_hsmmc_remove,
2370 .driver = { 2382 .driver = {
2371 .name = DRIVER_NAME, 2383 .name = DRIVER_NAME,
2372 .owner = THIS_MODULE, 2384 .owner = THIS_MODULE,
2385 .pm = &omap_hsmmc_dev_pm_ops,
2373 }, 2386 },
2374}; 2387};
2375 2388
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index e4f00e70a749..0a4e43f37140 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -813,7 +813,7 @@ static int pxamci_suspend(struct device *dev)
813 int ret = 0; 813 int ret = 0;
814 814
815 if (mmc) 815 if (mmc)
816 ret = mmc_suspend_host(mmc, PMSG_SUSPEND); 816 ret = mmc_suspend_host(mmc);
817 817
818 return ret; 818 return ret;
819} 819}
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2fdf7689ae6c..2e16e0a90a5e 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1881,9 +1881,8 @@ MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
1881static int s3cmci_suspend(struct device *dev) 1881static int s3cmci_suspend(struct device *dev)
1882{ 1882{
1883 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); 1883 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
1884 struct pm_message event = { PM_EVENT_SUSPEND };
1885 1884
1886 return mmc_suspend_host(mmc, event); 1885 return mmc_suspend_host(mmc);
1887} 1886}
1888 1887
1889static int s3cmci_resume(struct device *dev) 1888static int s3cmci_resume(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index 7802a543d8fc..a2e9820cd42f 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -89,7 +89,7 @@ static int sdhci_of_suspend(struct of_device *ofdev, pm_message_t state)
89{ 89{
90 struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); 90 struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
91 91
92 return mmc_suspend_host(host->mmc, state); 92 return mmc_suspend_host(host->mmc);
93} 93}
94 94
95static int sdhci_of_resume(struct of_device *ofdev) 95static int sdhci_of_resume(struct of_device *ofdev)
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d5b11a17e648..c8623de13af3 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -129,12 +129,12 @@ struct sdhci_of_data sdhci_esdhc = {
129 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | 129 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
130 SDHCI_QUIRK_NO_CARD_NO_RESET, 130 SDHCI_QUIRK_NO_CARD_NO_RESET,
131 .ops = { 131 .ops = {
132 .readl = sdhci_be32bs_readl, 132 .read_l = sdhci_be32bs_readl,
133 .readw = esdhc_readw, 133 .read_w = esdhc_readw,
134 .readb = sdhci_be32bs_readb, 134 .read_b = sdhci_be32bs_readb,
135 .writel = sdhci_be32bs_writel, 135 .write_l = sdhci_be32bs_writel,
136 .writew = esdhc_writew, 136 .write_w = esdhc_writew,
137 .writeb = esdhc_writeb, 137 .write_b = esdhc_writeb,
138 .set_clock = esdhc_set_clock, 138 .set_clock = esdhc_set_clock,
139 .enable_dma = esdhc_enable_dma, 139 .enable_dma = esdhc_enable_dma,
140 .get_max_clock = esdhc_get_max_clock, 140 .get_max_clock = esdhc_get_max_clock,
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 35117f3ed757..68ddb7546ae2 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -55,11 +55,11 @@ struct sdhci_of_data sdhci_hlwd = {
55 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | 55 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
56 SDHCI_QUIRK_32BIT_DMA_SIZE, 56 SDHCI_QUIRK_32BIT_DMA_SIZE,
57 .ops = { 57 .ops = {
58 .readl = sdhci_be32bs_readl, 58 .read_l = sdhci_be32bs_readl,
59 .readw = sdhci_be32bs_readw, 59 .read_w = sdhci_be32bs_readw,
60 .readb = sdhci_be32bs_readb, 60 .read_b = sdhci_be32bs_readb,
61 .writel = sdhci_hlwd_writel, 61 .write_l = sdhci_hlwd_writel,
62 .writew = sdhci_hlwd_writew, 62 .write_w = sdhci_hlwd_writew,
63 .writeb = sdhci_hlwd_writeb, 63 .write_b = sdhci_hlwd_writeb,
64 }, 64 },
65}; 65};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 6701af629c30..65483fdea45b 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -628,7 +628,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
628 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot)); 628 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot));
629 if (IS_ERR(host)) { 629 if (IS_ERR(host)) {
630 dev_err(&pdev->dev, "cannot allocate host\n"); 630 dev_err(&pdev->dev, "cannot allocate host\n");
631 return ERR_PTR(PTR_ERR(host)); 631 return ERR_CAST(host);
632 } 632 }
633 633
634 slot = sdhci_priv(host); 634 slot = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 297f40ae6ad5..b6ee0d719698 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -29,6 +29,7 @@
29#include <linux/mmc/host.h> 29#include <linux/mmc/host.h>
30 30
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/sdhci-pltfm.h>
32 33
33#include "sdhci.h" 34#include "sdhci.h"
34 35
@@ -49,19 +50,18 @@ static struct sdhci_ops sdhci_pltfm_ops = {
49 50
50static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) 51static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
51{ 52{
53 struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
52 struct sdhci_host *host; 54 struct sdhci_host *host;
53 struct resource *iomem; 55 struct resource *iomem;
54 int ret; 56 int ret;
55 57
56 BUG_ON(pdev == NULL);
57
58 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 58 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
59 if (!iomem) { 59 if (!iomem) {
60 ret = -ENOMEM; 60 ret = -ENOMEM;
61 goto err; 61 goto err;
62 } 62 }
63 63
64 if (resource_size(iomem) != 0x100) 64 if (resource_size(iomem) < 0x100)
65 dev_err(&pdev->dev, "Invalid iomem size. You may " 65 dev_err(&pdev->dev, "Invalid iomem size. You may "
66 "experience problems.\n"); 66 "experience problems.\n");
67 67
@@ -76,7 +76,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
76 } 76 }
77 77
78 host->hw_name = "platform"; 78 host->hw_name = "platform";
79 host->ops = &sdhci_pltfm_ops; 79 if (pdata && pdata->ops)
80 host->ops = pdata->ops;
81 else
82 host->ops = &sdhci_pltfm_ops;
83 if (pdata)
84 host->quirks = pdata->quirks;
80 host->irq = platform_get_irq(pdev, 0); 85 host->irq = platform_get_irq(pdev, 0);
81 86
82 if (!request_mem_region(iomem->start, resource_size(iomem), 87 if (!request_mem_region(iomem->start, resource_size(iomem),
@@ -93,6 +98,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
93 goto err_remap; 98 goto err_remap;
94 } 99 }
95 100
101 if (pdata && pdata->init) {
102 ret = pdata->init(host);
103 if (ret)
104 goto err_plat_init;
105 }
106
96 ret = sdhci_add_host(host); 107 ret = sdhci_add_host(host);
97 if (ret) 108 if (ret)
98 goto err_add_host; 109 goto err_add_host;
@@ -102,6 +113,9 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
102 return 0; 113 return 0;
103 114
104err_add_host: 115err_add_host:
116 if (pdata && pdata->exit)
117 pdata->exit(host);
118err_plat_init:
105 iounmap(host->ioaddr); 119 iounmap(host->ioaddr);
106err_remap: 120err_remap:
107 release_mem_region(iomem->start, resource_size(iomem)); 121 release_mem_region(iomem->start, resource_size(iomem));
@@ -114,6 +128,7 @@ err:
114 128
115static int __devexit sdhci_pltfm_remove(struct platform_device *pdev) 129static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
116{ 130{
131 struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
117 struct sdhci_host *host = platform_get_drvdata(pdev); 132 struct sdhci_host *host = platform_get_drvdata(pdev);
118 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 133 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
119 int dead; 134 int dead;
@@ -125,6 +140,8 @@ static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
125 dead = 1; 140 dead = 1;
126 141
127 sdhci_remove_host(host, dead); 142 sdhci_remove_host(host, dead);
143 if (pdata && pdata->exit)
144 pdata->exit(host);
128 iounmap(host->ioaddr); 145 iounmap(host->ioaddr);
129 release_mem_region(iomem->start, resource_size(iomem)); 146 release_mem_region(iomem->start, resource_size(iomem));
130 sdhci_free_host(host); 147 sdhci_free_host(host);
@@ -165,4 +182,3 @@ MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
165MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); 182MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
166MODULE_LICENSE("GPL v2"); 183MODULE_LICENSE("GPL v2");
167MODULE_ALIAS("platform:sdhci"); 184MODULE_ALIAS("platform:sdhci");
168
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 2136794c0cfa..af217924a76e 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -317,12 +317,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
317 host->irq = irq; 317 host->irq = irq;
318 318
319 /* Setup quirks for the controller */ 319 /* Setup quirks for the controller */
320 320 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
321 /* Currently with ADMA enabled we are getting some length
322 * interrupts that are not being dealt with, do disable
323 * ADMA until this is sorted out. */
324 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
325 host->quirks |= SDHCI_QUIRK_32BIT_ADMA_SIZE;
326 321
327#ifndef CONFIG_MMC_SDHCI_S3C_DMA 322#ifndef CONFIG_MMC_SDHCI_S3C_DMA
328 323
@@ -330,9 +325,6 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
330 * support as well. */ 325 * support as well. */
331 host->quirks |= SDHCI_QUIRK_BROKEN_DMA; 326 host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
332 327
333 /* PIO currently has problems with multi-block IO */
334 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
335
336#endif /* CONFIG_MMC_SDHCI_S3C_DMA */ 328#endif /* CONFIG_MMC_SDHCI_S3C_DMA */
337 329
338 /* It seems we do not get an DATA transfer complete on non-busy 330 /* It seems we do not get an DATA transfer complete on non-busy
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
new file mode 100644
index 000000000000..d70c54c7b70a
--- /dev/null
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -0,0 +1,298 @@
1/*
2 * drivers/mmc/host/sdhci-spear.c
3 *
4 * Support of SDHCI platform devices for spear soc family
5 *
6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com>
8 *
9 * Inspired by sdhci-pltfm.c
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/gpio.h>
19#include <linux/highmem.h>
20#include <linux/interrupt.h>
21#include <linux/irq.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/sdhci-spear.h>
26#include <linux/io.h>
27#include "sdhci.h"
28
29struct spear_sdhci {
30 struct clk *clk;
31 struct sdhci_plat_data *data;
32};
33
34/* sdhci ops */
35static struct sdhci_ops sdhci_pltfm_ops = {
36 /* Nothing to do for now. */
37};
38
39/* gpio card detection interrupt handler */
40static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id)
41{
42 struct platform_device *pdev = dev_id;
43 struct sdhci_host *host = platform_get_drvdata(pdev);
44 struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
45 unsigned long gpio_irq_type;
46 int val;
47
48 val = gpio_get_value(sdhci->data->card_int_gpio);
49
50 /* val == 1 -> card removed, val == 0 -> card inserted */
51 /* if card removed - set irq for low level, else vice versa */
52 gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH;
53 set_irq_type(irq, gpio_irq_type);
54
55 if (sdhci->data->card_power_gpio >= 0) {
56 if (!sdhci->data->power_always_enb) {
57 /* if card inserted, give power, otherwise remove it */
58 val = sdhci->data->power_active_high ? !val : val ;
59 gpio_set_value(sdhci->data->card_power_gpio, val);
60 }
61 }
62
63 /* inform sdhci driver about card insertion/removal */
64 tasklet_schedule(&host->card_tasklet);
65
66 return IRQ_HANDLED;
67}
68
69static int __devinit sdhci_probe(struct platform_device *pdev)
70{
71 struct sdhci_host *host;
72 struct resource *iomem;
73 struct spear_sdhci *sdhci;
74 int ret;
75
76 BUG_ON(pdev == NULL);
77
78 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
79 if (!iomem) {
80 ret = -ENOMEM;
81 dev_dbg(&pdev->dev, "memory resource not defined\n");
82 goto err;
83 }
84
85 if (!request_mem_region(iomem->start, resource_size(iomem),
86 "spear-sdhci")) {
87 ret = -EBUSY;
88 dev_dbg(&pdev->dev, "cannot request region\n");
89 goto err;
90 }
91
92 sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL);
93 if (!sdhci) {
94 ret = -ENOMEM;
95 dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
96 goto err_kzalloc;
97 }
98
99 /* clk enable */
100 sdhci->clk = clk_get(&pdev->dev, NULL);
101 if (IS_ERR(sdhci->clk)) {
102 ret = PTR_ERR(sdhci->clk);
103 dev_dbg(&pdev->dev, "Error getting clock\n");
104 goto err_clk_get;
105 }
106
107 ret = clk_enable(sdhci->clk);
108 if (ret) {
109 dev_dbg(&pdev->dev, "Error enabling clock\n");
110 goto err_clk_enb;
111 }
112
113 /* overwrite platform_data */
114 sdhci->data = dev_get_platdata(&pdev->dev);
115 pdev->dev.platform_data = sdhci;
116
117 if (pdev->dev.parent)
118 host = sdhci_alloc_host(pdev->dev.parent, 0);
119 else
120 host = sdhci_alloc_host(&pdev->dev, 0);
121
122 if (IS_ERR(host)) {
123 ret = PTR_ERR(host);
124 dev_dbg(&pdev->dev, "error allocating host\n");
125 goto err_alloc_host;
126 }
127
128 host->hw_name = "sdhci";
129 host->ops = &sdhci_pltfm_ops;
130 host->irq = platform_get_irq(pdev, 0);
131 host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
132
133 host->ioaddr = ioremap(iomem->start, resource_size(iomem));
134 if (!host->ioaddr) {
135 ret = -ENOMEM;
136 dev_dbg(&pdev->dev, "failed to remap registers\n");
137 goto err_ioremap;
138 }
139
140 ret = sdhci_add_host(host);
141 if (ret) {
142 dev_dbg(&pdev->dev, "error adding host\n");
143 goto err_add_host;
144 }
145
146 platform_set_drvdata(pdev, host);
147
148 /*
149 * It is optional to use GPIOs for sdhci Power control & sdhci card
150 * interrupt detection. If sdhci->data is NULL, then use original sdhci
151 * lines otherwise GPIO lines.
152 * If GPIO is selected for power control, then power should be disabled
153 * after card removal and should be enabled when card insertion
154 * interrupt occurs
155 */
156 if (!sdhci->data)
157 return 0;
158
159 if (sdhci->data->card_power_gpio >= 0) {
160 int val = 0;
161
162 ret = gpio_request(sdhci->data->card_power_gpio, "sdhci");
163 if (ret < 0) {
164 dev_dbg(&pdev->dev, "gpio request fail: %d\n",
165 sdhci->data->card_power_gpio);
166 goto err_pgpio_request;
167 }
168
169 if (sdhci->data->power_always_enb)
170 val = sdhci->data->power_active_high;
171 else
172 val = !sdhci->data->power_active_high;
173
174 ret = gpio_direction_output(sdhci->data->card_power_gpio, val);
175 if (ret) {
176 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
177 sdhci->data->card_power_gpio);
178 goto err_pgpio_direction;
179 }
180
181 gpio_set_value(sdhci->data->card_power_gpio, 1);
182 }
183
184 if (sdhci->data->card_int_gpio >= 0) {
185 ret = gpio_request(sdhci->data->card_int_gpio, "sdhci");
186 if (ret < 0) {
187 dev_dbg(&pdev->dev, "gpio request fail: %d\n",
188 sdhci->data->card_int_gpio);
189 goto err_igpio_request;
190 }
191
192 ret = gpio_direction_input(sdhci->data->card_int_gpio);
193 if (ret) {
194 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
195 sdhci->data->card_int_gpio);
196 goto err_igpio_direction;
197 }
198 ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio),
199 sdhci_gpio_irq, IRQF_TRIGGER_LOW,
200 mmc_hostname(host->mmc), pdev);
201 if (ret) {
202 dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
203 sdhci->data->card_int_gpio);
204 goto err_igpio_request_irq;
205 }
206
207 }
208
209 return 0;
210
211err_igpio_request_irq:
212err_igpio_direction:
213 if (sdhci->data->card_int_gpio >= 0)
214 gpio_free(sdhci->data->card_int_gpio);
215err_igpio_request:
216err_pgpio_direction:
217 if (sdhci->data->card_power_gpio >= 0)
218 gpio_free(sdhci->data->card_power_gpio);
219err_pgpio_request:
220 platform_set_drvdata(pdev, NULL);
221 sdhci_remove_host(host, 1);
222err_add_host:
223 iounmap(host->ioaddr);
224err_ioremap:
225 sdhci_free_host(host);
226err_alloc_host:
227 clk_disable(sdhci->clk);
228err_clk_enb:
229 clk_put(sdhci->clk);
230err_clk_get:
231 kfree(sdhci);
232err_kzalloc:
233 release_mem_region(iomem->start, resource_size(iomem));
234err:
235 dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
236 return ret;
237}
238
239static int __devexit sdhci_remove(struct platform_device *pdev)
240{
241 struct sdhci_host *host = platform_get_drvdata(pdev);
242 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
243 struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
244 int dead;
245 u32 scratch;
246
247 if (sdhci->data) {
248 if (sdhci->data->card_int_gpio >= 0) {
249 free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
250 gpio_free(sdhci->data->card_int_gpio);
251 }
252
253 if (sdhci->data->card_power_gpio >= 0)
254 gpio_free(sdhci->data->card_power_gpio);
255 }
256
257 platform_set_drvdata(pdev, NULL);
258 dead = 0;
259 scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
260 if (scratch == (u32)-1)
261 dead = 1;
262
263 sdhci_remove_host(host, dead);
264 iounmap(host->ioaddr);
265 sdhci_free_host(host);
266 clk_disable(sdhci->clk);
267 clk_put(sdhci->clk);
268 kfree(sdhci);
269 if (iomem)
270 release_mem_region(iomem->start, resource_size(iomem));
271
272 return 0;
273}
274
275static struct platform_driver sdhci_driver = {
276 .driver = {
277 .name = "sdhci",
278 .owner = THIS_MODULE,
279 },
280 .probe = sdhci_probe,
281 .remove = __devexit_p(sdhci_remove),
282};
283
284static int __init sdhci_init(void)
285{
286 return platform_driver_register(&sdhci_driver);
287}
288module_init(sdhci_init);
289
290static void __exit sdhci_exit(void)
291{
292 platform_driver_unregister(&sdhci_driver);
293}
294module_exit(sdhci_exit);
295
296MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
297MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
298MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9d4fdfa685e5..c6d1bd8d4ac4 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -496,12 +496,22 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
496 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); 496 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
497 } 497 }
498 498
499 /* 499 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
500 * Add a terminating entry. 500 /*
501 */ 501 * Mark the last descriptor as the terminating descriptor
502 */
503 if (desc != host->adma_desc) {
504 desc -= 8;
505 desc[0] |= 0x2; /* end */
506 }
507 } else {
508 /*
509 * Add a terminating entry.
510 */
502 511
503 /* nop, end, valid */ 512 /* nop, end, valid */
504 sdhci_set_adma_desc(desc, 0, 0, 0x3); 513 sdhci_set_adma_desc(desc, 0, 0, 0x3);
514 }
505 515
506 /* 516 /*
507 * Resync align buffer as we might have changed it. 517 * Resync align buffer as we might have changed it.
@@ -1587,7 +1597,7 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1587 1597
1588 sdhci_disable_card_detection(host); 1598 sdhci_disable_card_detection(host);
1589 1599
1590 ret = mmc_suspend_host(host->mmc, state); 1600 ret = mmc_suspend_host(host->mmc);
1591 if (ret) 1601 if (ret)
1592 return ret; 1602 return ret;
1593 1603
@@ -1744,7 +1754,8 @@ int sdhci_add_host(struct sdhci_host *host)
1744 host->max_clk = 1754 host->max_clk =
1745 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 1755 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1746 host->max_clk *= 1000000; 1756 host->max_clk *= 1000000;
1747 if (host->max_clk == 0) { 1757 if (host->max_clk == 0 || host->quirks &
1758 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
1748 if (!host->ops->get_max_clock) { 1759 if (!host->ops->get_max_clock) {
1749 printk(KERN_ERR 1760 printk(KERN_ERR
1750 "%s: Hardware doesn't specify base clock " 1761 "%s: Hardware doesn't specify base clock "
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 842f46f94284..c8468134adc9 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -127,7 +127,7 @@
127#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \ 127#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
128 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \ 128 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
129 SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \ 129 SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
130 SDHCI_INT_DATA_END_BIT | SDHCI_ADMA_ERROR) 130 SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR)
131#define SDHCI_INT_ALL_MASK ((unsigned int)-1) 131#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
132 132
133#define SDHCI_ACMD12_ERR 0x3C 133#define SDHCI_ACMD12_ERR 0x3C
@@ -236,6 +236,10 @@ struct sdhci_host {
236#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23) 236#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
237/* Controller uses SDCLK instead of TMCLK for data timeouts */ 237/* Controller uses SDCLK instead of TMCLK for data timeouts */
238#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24) 238#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
239/* Controller reports wrong base clock capability */
240#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
241/* Controller cannot support End Attribute in NOP ADMA descriptor */
242#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
239 243
240 int irq; /* Device IRQ */ 244 int irq; /* Device IRQ */
241 void __iomem * ioaddr; /* Mapped address */ 245 void __iomem * ioaddr; /* Mapped address */
@@ -294,12 +298,12 @@ struct sdhci_host {
294 298
295struct sdhci_ops { 299struct sdhci_ops {
296#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS 300#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
297 u32 (*readl)(struct sdhci_host *host, int reg); 301 u32 (*read_l)(struct sdhci_host *host, int reg);
298 u16 (*readw)(struct sdhci_host *host, int reg); 302 u16 (*read_w)(struct sdhci_host *host, int reg);
299 u8 (*readb)(struct sdhci_host *host, int reg); 303 u8 (*read_b)(struct sdhci_host *host, int reg);
300 void (*writel)(struct sdhci_host *host, u32 val, int reg); 304 void (*write_l)(struct sdhci_host *host, u32 val, int reg);
301 void (*writew)(struct sdhci_host *host, u16 val, int reg); 305 void (*write_w)(struct sdhci_host *host, u16 val, int reg);
302 void (*writeb)(struct sdhci_host *host, u8 val, int reg); 306 void (*write_b)(struct sdhci_host *host, u8 val, int reg);
303#endif 307#endif
304 308
305 void (*set_clock)(struct sdhci_host *host, unsigned int clock); 309 void (*set_clock)(struct sdhci_host *host, unsigned int clock);
@@ -314,48 +318,48 @@ struct sdhci_ops {
314 318
315static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg) 319static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg)
316{ 320{
317 if (unlikely(host->ops->writel)) 321 if (unlikely(host->ops->write_l))
318 host->ops->writel(host, val, reg); 322 host->ops->write_l(host, val, reg);
319 else 323 else
320 writel(val, host->ioaddr + reg); 324 writel(val, host->ioaddr + reg);
321} 325}
322 326
323static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg) 327static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg)
324{ 328{
325 if (unlikely(host->ops->writew)) 329 if (unlikely(host->ops->write_w))
326 host->ops->writew(host, val, reg); 330 host->ops->write_w(host, val, reg);
327 else 331 else
328 writew(val, host->ioaddr + reg); 332 writew(val, host->ioaddr + reg);
329} 333}
330 334
331static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg) 335static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
332{ 336{
333 if (unlikely(host->ops->writeb)) 337 if (unlikely(host->ops->write_b))
334 host->ops->writeb(host, val, reg); 338 host->ops->write_b(host, val, reg);
335 else 339 else
336 writeb(val, host->ioaddr + reg); 340 writeb(val, host->ioaddr + reg);
337} 341}
338 342
339static inline u32 sdhci_readl(struct sdhci_host *host, int reg) 343static inline u32 sdhci_readl(struct sdhci_host *host, int reg)
340{ 344{
341 if (unlikely(host->ops->readl)) 345 if (unlikely(host->ops->read_l))
342 return host->ops->readl(host, reg); 346 return host->ops->read_l(host, reg);
343 else 347 else
344 return readl(host->ioaddr + reg); 348 return readl(host->ioaddr + reg);
345} 349}
346 350
347static inline u16 sdhci_readw(struct sdhci_host *host, int reg) 351static inline u16 sdhci_readw(struct sdhci_host *host, int reg)
348{ 352{
349 if (unlikely(host->ops->readw)) 353 if (unlikely(host->ops->read_w))
350 return host->ops->readw(host, reg); 354 return host->ops->read_w(host, reg);
351 else 355 else
352 return readw(host->ioaddr + reg); 356 return readw(host->ioaddr + reg);
353} 357}
354 358
355static inline u8 sdhci_readb(struct sdhci_host *host, int reg) 359static inline u8 sdhci_readb(struct sdhci_host *host, int reg)
356{ 360{
357 if (unlikely(host->ops->readb)) 361 if (unlikely(host->ops->read_b))
358 return host->ops->readb(host, reg); 362 return host->ops->read_b(host, reg);
359 else 363 else
360 return readb(host->ioaddr + reg); 364 return readb(host->ioaddr + reg);
361} 365}
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index cb41e9c3ac07..e7507af3856e 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -519,7 +519,7 @@ static int sdricoh_pcmcia_suspend(struct pcmcia_device *link)
519{ 519{
520 struct mmc_host *mmc = link->priv; 520 struct mmc_host *mmc = link->priv;
521 dev_dbg(&link->dev, "suspend\n"); 521 dev_dbg(&link->dev, "suspend\n");
522 mmc_suspend_host(mmc, PMSG_SUSPEND); 522 mmc_suspend_host(mmc);
523 return 0; 523 return 0;
524} 524}
525 525
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
new file mode 100644
index 000000000000..eb97830c0344
--- /dev/null
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -0,0 +1,965 @@
1/*
2 * MMCIF eMMC driver.
3 *
4 * Copyright (C) 2010 Renesas Solutions Corp.
5 * Yusuke Goda <yusuke.goda.sx@renesas.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License.
10 *
11 *
12 * TODO
13 * 1. DMA
14 * 2. Power management
15 * 3. Handle MMC errors better
16 *
17 */
18
19#include <linux/dma-mapping.h>
20#include <linux/mmc/host.h>
21#include <linux/mmc/card.h>
22#include <linux/mmc/core.h>
23#include <linux/mmc/mmc.h>
24#include <linux/mmc/sdio.h>
25#include <linux/delay.h>
26#include <linux/platform_device.h>
27#include <linux/clk.h>
28#include <linux/mmc/sh_mmcif.h>
29
30#define DRIVER_NAME "sh_mmcif"
31#define DRIVER_VERSION "2010-04-28"
32
33#define MMCIF_CE_CMD_SET 0x00000000
34#define MMCIF_CE_ARG 0x00000008
35#define MMCIF_CE_ARG_CMD12 0x0000000C
36#define MMCIF_CE_CMD_CTRL 0x00000010
37#define MMCIF_CE_BLOCK_SET 0x00000014
38#define MMCIF_CE_CLK_CTRL 0x00000018
39#define MMCIF_CE_BUF_ACC 0x0000001C
40#define MMCIF_CE_RESP3 0x00000020
41#define MMCIF_CE_RESP2 0x00000024
42#define MMCIF_CE_RESP1 0x00000028
43#define MMCIF_CE_RESP0 0x0000002C
44#define MMCIF_CE_RESP_CMD12 0x00000030
45#define MMCIF_CE_DATA 0x00000034
46#define MMCIF_CE_INT 0x00000040
47#define MMCIF_CE_INT_MASK 0x00000044
48#define MMCIF_CE_HOST_STS1 0x00000048
49#define MMCIF_CE_HOST_STS2 0x0000004C
50#define MMCIF_CE_VERSION 0x0000007C
51
52/* CE_CMD_SET */
53#define CMD_MASK 0x3f000000
54#define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
55#define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
56#define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
57#define CMD_SET_RBSY (1 << 21) /* R1b */
58#define CMD_SET_CCSEN (1 << 20)
59#define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
60#define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
61#define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
62#define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
63#define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
64#define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
65#define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
66#define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
67#define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
68#define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
69#define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
70#define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
71#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
72#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
73#define CMD_SET_CCSH (1 << 5)
74#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
75#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
76#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
77
78/* CE_CMD_CTRL */
79#define CMD_CTRL_BREAK (1 << 0)
80
81/* CE_BLOCK_SET */
82#define BLOCK_SIZE_MASK 0x0000ffff
83
84/* CE_CLK_CTRL */
85#define CLK_ENABLE (1 << 24) /* 1: output mmc clock */
86#define CLK_CLEAR ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
87#define CLK_SUP_PCLK ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
88#define SRSPTO_256 ((1 << 13) | (0 << 12)) /* resp timeout */
89#define SRBSYTO_29 ((1 << 11) | (1 << 10) | \
90 (1 << 9) | (1 << 8)) /* resp busy timeout */
91#define SRWDTO_29 ((1 << 7) | (1 << 6) | \
92 (1 << 5) | (1 << 4)) /* read/write timeout */
93#define SCCSTO_29 ((1 << 3) | (1 << 2) | \
94 (1 << 1) | (1 << 0)) /* ccs timeout */
95
96/* CE_BUF_ACC */
97#define BUF_ACC_DMAWEN (1 << 25)
98#define BUF_ACC_DMAREN (1 << 24)
99#define BUF_ACC_BUSW_32 (0 << 17)
100#define BUF_ACC_BUSW_16 (1 << 17)
101#define BUF_ACC_ATYP (1 << 16)
102
103/* CE_INT */
104#define INT_CCSDE (1 << 29)
105#define INT_CMD12DRE (1 << 26)
106#define INT_CMD12RBE (1 << 25)
107#define INT_CMD12CRE (1 << 24)
108#define INT_DTRANE (1 << 23)
109#define INT_BUFRE (1 << 22)
110#define INT_BUFWEN (1 << 21)
111#define INT_BUFREN (1 << 20)
112#define INT_CCSRCV (1 << 19)
113#define INT_RBSYE (1 << 17)
114#define INT_CRSPE (1 << 16)
115#define INT_CMDVIO (1 << 15)
116#define INT_BUFVIO (1 << 14)
117#define INT_WDATERR (1 << 11)
118#define INT_RDATERR (1 << 10)
119#define INT_RIDXERR (1 << 9)
120#define INT_RSPERR (1 << 8)
121#define INT_CCSTO (1 << 5)
122#define INT_CRCSTO (1 << 4)
123#define INT_WDATTO (1 << 3)
124#define INT_RDATTO (1 << 2)
125#define INT_RBSYTO (1 << 1)
126#define INT_RSPTO (1 << 0)
127#define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
128 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
129 INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
130 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
131
132/* CE_INT_MASK */
133#define MASK_ALL 0x00000000
134#define MASK_MCCSDE (1 << 29)
135#define MASK_MCMD12DRE (1 << 26)
136#define MASK_MCMD12RBE (1 << 25)
137#define MASK_MCMD12CRE (1 << 24)
138#define MASK_MDTRANE (1 << 23)
139#define MASK_MBUFRE (1 << 22)
140#define MASK_MBUFWEN (1 << 21)
141#define MASK_MBUFREN (1 << 20)
142#define MASK_MCCSRCV (1 << 19)
143#define MASK_MRBSYE (1 << 17)
144#define MASK_MCRSPE (1 << 16)
145#define MASK_MCMDVIO (1 << 15)
146#define MASK_MBUFVIO (1 << 14)
147#define MASK_MWDATERR (1 << 11)
148#define MASK_MRDATERR (1 << 10)
149#define MASK_MRIDXERR (1 << 9)
150#define MASK_MRSPERR (1 << 8)
151#define MASK_MCCSTO (1 << 5)
152#define MASK_MCRCSTO (1 << 4)
153#define MASK_MWDATTO (1 << 3)
154#define MASK_MRDATTO (1 << 2)
155#define MASK_MRBSYTO (1 << 1)
156#define MASK_MRSPTO (1 << 0)
157
158/* CE_HOST_STS1 */
159#define STS1_CMDSEQ (1 << 31)
160
161/* CE_HOST_STS2 */
162#define STS2_CRCSTE (1 << 31)
163#define STS2_CRC16E (1 << 30)
164#define STS2_AC12CRCE (1 << 29)
165#define STS2_RSPCRC7E (1 << 28)
166#define STS2_CRCSTEBE (1 << 27)
167#define STS2_RDATEBE (1 << 26)
168#define STS2_AC12REBE (1 << 25)
169#define STS2_RSPEBE (1 << 24)
170#define STS2_AC12IDXE (1 << 23)
171#define STS2_RSPIDXE (1 << 22)
172#define STS2_CCSTO (1 << 15)
173#define STS2_RDATTO (1 << 14)
174#define STS2_DATBSYTO (1 << 13)
175#define STS2_CRCSTTO (1 << 12)
176#define STS2_AC12BSYTO (1 << 11)
177#define STS2_RSPBSYTO (1 << 10)
178#define STS2_AC12RSPTO (1 << 9)
179#define STS2_RSPTO (1 << 8)
180#define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
181 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
182#define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
183 STS2_DATBSYTO | STS2_CRCSTTO | \
184 STS2_AC12BSYTO | STS2_RSPBSYTO | \
185 STS2_AC12RSPTO | STS2_RSPTO)
186
187/* CE_VERSION */
188#define SOFT_RST_ON (1 << 31)
189#define SOFT_RST_OFF (0 << 31)
190
191#define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
192#define CLKDEV_MMC_DATA 20000000 /* 20MHz */
193#define CLKDEV_INIT 400000 /* 400 KHz */
194
195struct sh_mmcif_host {
196 struct mmc_host *mmc;
197 struct mmc_data *data;
198 struct mmc_command *cmd;
199 struct platform_device *pd;
200 struct clk *hclk;
201 unsigned int clk;
202 int bus_width;
203 u16 wait_int;
204 u16 sd_error;
205 long timeout;
206 void __iomem *addr;
207 wait_queue_head_t intr_wait;
208};
209
210static inline u32 sh_mmcif_readl(struct sh_mmcif_host *host, unsigned int reg)
211{
212 return readl(host->addr + reg);
213}
214
215static inline void sh_mmcif_writel(struct sh_mmcif_host *host,
216 unsigned int reg, u32 val)
217{
218 writel(val, host->addr + reg);
219}
220
221static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
222 unsigned int reg, u32 val)
223{
224 writel(val | sh_mmcif_readl(host, reg), host->addr + reg);
225}
226
227static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
228 unsigned int reg, u32 val)
229{
230 writel(~val & sh_mmcif_readl(host, reg), host->addr + reg);
231}
232
233
234static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
235{
236 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
237
238 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
239 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
240
241 if (!clk)
242 return;
243 if (p->sup_pclk && clk == host->clk)
244 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
245 else
246 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
247 (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
248
249 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
250}
251
252static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
253{
254 u32 tmp;
255
256 tmp = 0x010f0000 & sh_mmcif_readl(host, MMCIF_CE_CLK_CTRL);
257
258 sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_ON);
259 sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_OFF);
260 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
261 SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
262 /* byte swap on */
263 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
264}
265
266static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
267{
268 u32 state1, state2;
269 int ret, timeout = 10000000;
270
271 host->sd_error = 0;
272 host->wait_int = 0;
273
274 state1 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS1);
275 state2 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS2);
276 pr_debug("%s: ERR HOST_STS1 = %08x\n", \
277 DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS1));
278 pr_debug("%s: ERR HOST_STS2 = %08x\n", \
279 DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS2));
280
281 if (state1 & STS1_CMDSEQ) {
282 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
283 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
284 while (1) {
285 timeout--;
286 if (timeout < 0) {
287 pr_err(DRIVER_NAME": Forceed end of " \
288 "command sequence timeout err\n");
289 return -EIO;
290 }
291 if (!(sh_mmcif_readl(host, MMCIF_CE_HOST_STS1)
292 & STS1_CMDSEQ))
293 break;
294 mdelay(1);
295 }
296 sh_mmcif_sync_reset(host);
297 pr_debug(DRIVER_NAME": Forced end of command sequence\n");
298 return -EIO;
299 }
300
301 if (state2 & STS2_CRC_ERR) {
302 pr_debug(DRIVER_NAME": Happened CRC error\n");
303 ret = -EIO;
304 } else if (state2 & STS2_TIMEOUT_ERR) {
305 pr_debug(DRIVER_NAME": Happened Timeout error\n");
306 ret = -ETIMEDOUT;
307 } else {
308 pr_debug(DRIVER_NAME": Happened End/Index error\n");
309 ret = -EIO;
310 }
311 return ret;
312}
313
314static int sh_mmcif_single_read(struct sh_mmcif_host *host,
315 struct mmc_request *mrq)
316{
317 struct mmc_data *data = mrq->data;
318 long time;
319 u32 blocksize, i, *p = sg_virt(data->sg);
320
321 host->wait_int = 0;
322
323 /* buf read enable */
324 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
325 time = wait_event_interruptible_timeout(host->intr_wait,
326 host->wait_int == 1 ||
327 host->sd_error == 1, host->timeout);
328 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
329 return sh_mmcif_error_manage(host);
330
331 host->wait_int = 0;
332 blocksize = (BLOCK_SIZE_MASK &
333 sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
334 for (i = 0; i < blocksize / 4; i++)
335 *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
336
337 /* buffer read end */
338 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
339 time = wait_event_interruptible_timeout(host->intr_wait,
340 host->wait_int == 1 ||
341 host->sd_error == 1, host->timeout);
342 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
343 return sh_mmcif_error_manage(host);
344
345 host->wait_int = 0;
346 return 0;
347}
348
349static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
350 struct mmc_request *mrq)
351{
352 struct mmc_data *data = mrq->data;
353 long time;
354 u32 blocksize, i, j, sec, *p;
355
356 blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
357 for (j = 0; j < data->sg_len; j++) {
358 p = sg_virt(data->sg);
359 host->wait_int = 0;
360 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
361 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
362 /* buf read enable */
363 time = wait_event_interruptible_timeout(host->intr_wait,
364 host->wait_int == 1 ||
365 host->sd_error == 1, host->timeout);
366
367 if (host->wait_int != 1 &&
368 (time == 0 || host->sd_error != 0))
369 return sh_mmcif_error_manage(host);
370
371 host->wait_int = 0;
372 for (i = 0; i < blocksize / 4; i++)
373 *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
374 }
375 if (j < data->sg_len - 1)
376 data->sg++;
377 }
378 return 0;
379}
380
381static int sh_mmcif_single_write(struct sh_mmcif_host *host,
382 struct mmc_request *mrq)
383{
384 struct mmc_data *data = mrq->data;
385 long time;
386 u32 blocksize, i, *p = sg_virt(data->sg);
387
388 host->wait_int = 0;
389 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
390
391 /* buf write enable */
392 time = wait_event_interruptible_timeout(host->intr_wait,
393 host->wait_int == 1 ||
394 host->sd_error == 1, host->timeout);
395 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
396 return sh_mmcif_error_manage(host);
397
398 host->wait_int = 0;
399 blocksize = (BLOCK_SIZE_MASK &
400 sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
401 for (i = 0; i < blocksize / 4; i++)
402 sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
403
404 /* buffer write end */
405 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
406
407 time = wait_event_interruptible_timeout(host->intr_wait,
408 host->wait_int == 1 ||
409 host->sd_error == 1, host->timeout);
410 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
411 return sh_mmcif_error_manage(host);
412
413 host->wait_int = 0;
414 return 0;
415}
416
417static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
418 struct mmc_request *mrq)
419{
420 struct mmc_data *data = mrq->data;
421 long time;
422 u32 i, sec, j, blocksize, *p;
423
424 blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
425
426 for (j = 0; j < data->sg_len; j++) {
427 p = sg_virt(data->sg);
428 host->wait_int = 0;
429 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
430 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
431 /* buf write enable*/
432 time = wait_event_interruptible_timeout(host->intr_wait,
433 host->wait_int == 1 ||
434 host->sd_error == 1, host->timeout);
435
436 if (host->wait_int != 1 &&
437 (time == 0 || host->sd_error != 0))
438 return sh_mmcif_error_manage(host);
439
440 host->wait_int = 0;
441 for (i = 0; i < blocksize / 4; i++)
442 sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
443 }
444 if (j < data->sg_len - 1)
445 data->sg++;
446 }
447 return 0;
448}
449
450static void sh_mmcif_get_response(struct sh_mmcif_host *host,
451 struct mmc_command *cmd)
452{
453 if (cmd->flags & MMC_RSP_136) {
454 cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP3);
455 cmd->resp[1] = sh_mmcif_readl(host, MMCIF_CE_RESP2);
456 cmd->resp[2] = sh_mmcif_readl(host, MMCIF_CE_RESP1);
457 cmd->resp[3] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
458 } else
459 cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
460}
461
462static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
463 struct mmc_command *cmd)
464{
465 cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP_CMD12);
466}
467
468static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
469 struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
470{
471 u32 tmp = 0;
472
473 /* Response Type check */
474 switch (mmc_resp_type(cmd)) {
475 case MMC_RSP_NONE:
476 tmp |= CMD_SET_RTYP_NO;
477 break;
478 case MMC_RSP_R1:
479 case MMC_RSP_R1B:
480 case MMC_RSP_R3:
481 tmp |= CMD_SET_RTYP_6B;
482 break;
483 case MMC_RSP_R2:
484 tmp |= CMD_SET_RTYP_17B;
485 break;
486 default:
487 pr_err(DRIVER_NAME": Not support type response.\n");
488 break;
489 }
490 switch (opc) {
491 /* RBSY */
492 case MMC_SWITCH:
493 case MMC_STOP_TRANSMISSION:
494 case MMC_SET_WRITE_PROT:
495 case MMC_CLR_WRITE_PROT:
496 case MMC_ERASE:
497 case MMC_GEN_CMD:
498 tmp |= CMD_SET_RBSY;
499 break;
500 }
501 /* WDAT / DATW */
502 if (host->data) {
503 tmp |= CMD_SET_WDAT;
504 switch (host->bus_width) {
505 case MMC_BUS_WIDTH_1:
506 tmp |= CMD_SET_DATW_1;
507 break;
508 case MMC_BUS_WIDTH_4:
509 tmp |= CMD_SET_DATW_4;
510 break;
511 case MMC_BUS_WIDTH_8:
512 tmp |= CMD_SET_DATW_8;
513 break;
514 default:
515 pr_err(DRIVER_NAME": Not support bus width.\n");
516 break;
517 }
518 }
519 /* DWEN */
520 if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
521 tmp |= CMD_SET_DWEN;
522 /* CMLTE/CMD12EN */
523 if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
524 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
525 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
526 mrq->data->blocks << 16);
527 }
528 /* RIDXC[1:0] check bits */
529 if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
530 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
531 tmp |= CMD_SET_RIDXC_BITS;
532 /* RCRC7C[1:0] check bits */
533 if (opc == MMC_SEND_OP_COND)
534 tmp |= CMD_SET_CRC7C_BITS;
535 /* RCRC7C[1:0] internal CRC7 */
536 if (opc == MMC_ALL_SEND_CID ||
537 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
538 tmp |= CMD_SET_CRC7C_INTERNAL;
539
540 return opc = ((opc << 24) | tmp);
541}
542
543static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host,
544 struct mmc_request *mrq, u32 opc)
545{
546 u32 ret;
547
548 switch (opc) {
549 case MMC_READ_MULTIPLE_BLOCK:
550 ret = sh_mmcif_multi_read(host, mrq);
551 break;
552 case MMC_WRITE_MULTIPLE_BLOCK:
553 ret = sh_mmcif_multi_write(host, mrq);
554 break;
555 case MMC_WRITE_BLOCK:
556 ret = sh_mmcif_single_write(host, mrq);
557 break;
558 case MMC_READ_SINGLE_BLOCK:
559 case MMC_SEND_EXT_CSD:
560 ret = sh_mmcif_single_read(host, mrq);
561 break;
562 default:
563 pr_err(DRIVER_NAME": NOT SUPPORT CMD = d'%08d\n", opc);
564 ret = -EINVAL;
565 break;
566 }
567 return ret;
568}
569
570static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
571 struct mmc_request *mrq, struct mmc_command *cmd)
572{
573 long time;
574 int ret = 0, mask = 0;
575 u32 opc = cmd->opcode;
576
577 host->cmd = cmd;
578
579 switch (opc) {
580 /* respons busy check */
581 case MMC_SWITCH:
582 case MMC_STOP_TRANSMISSION:
583 case MMC_SET_WRITE_PROT:
584 case MMC_CLR_WRITE_PROT:
585 case MMC_ERASE:
586 case MMC_GEN_CMD:
587 mask = MASK_MRBSYE;
588 break;
589 default:
590 mask = MASK_MCRSPE;
591 break;
592 }
593 mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
594 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
595 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
596 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
597
598 if (host->data) {
599 sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, 0);
600 sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, mrq->data->blksz);
601 }
602 opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
603
604 sh_mmcif_writel(host, MMCIF_CE_INT, 0xD80430C0);
605 sh_mmcif_writel(host, MMCIF_CE_INT_MASK, mask);
606 /* set arg */
607 sh_mmcif_writel(host, MMCIF_CE_ARG, cmd->arg);
608 host->wait_int = 0;
609 /* set cmd */
610 sh_mmcif_writel(host, MMCIF_CE_CMD_SET, opc);
611
612 time = wait_event_interruptible_timeout(host->intr_wait,
613 host->wait_int == 1 || host->sd_error == 1, host->timeout);
614 if (host->wait_int != 1 && time == 0) {
615 cmd->error = sh_mmcif_error_manage(host);
616 return;
617 }
618 if (host->sd_error) {
619 switch (cmd->opcode) {
620 case MMC_ALL_SEND_CID:
621 case MMC_SELECT_CARD:
622 case MMC_APP_CMD:
623 cmd->error = -ETIMEDOUT;
624 break;
625 default:
626 pr_debug("%s: Cmd(d'%d) err\n",
627 DRIVER_NAME, cmd->opcode);
628 cmd->error = sh_mmcif_error_manage(host);
629 break;
630 }
631 host->sd_error = 0;
632 host->wait_int = 0;
633 return;
634 }
635 if (!(cmd->flags & MMC_RSP_PRESENT)) {
636 cmd->error = ret;
637 host->wait_int = 0;
638 return;
639 }
640 if (host->wait_int == 1) {
641 sh_mmcif_get_response(host, cmd);
642 host->wait_int = 0;
643 }
644 if (host->data) {
645 ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
646 if (ret < 0)
647 mrq->data->bytes_xfered = 0;
648 else
649 mrq->data->bytes_xfered =
650 mrq->data->blocks * mrq->data->blksz;
651 }
652 cmd->error = ret;
653}
654
655static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
656 struct mmc_request *mrq, struct mmc_command *cmd)
657{
658 long time;
659
660 if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
661 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
662 else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
663 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
664 else {
665 pr_err(DRIVER_NAME": not support stop cmd\n");
666 cmd->error = sh_mmcif_error_manage(host);
667 return;
668 }
669
670 time = wait_event_interruptible_timeout(host->intr_wait,
671 host->wait_int == 1 ||
672 host->sd_error == 1, host->timeout);
673 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) {
674 cmd->error = sh_mmcif_error_manage(host);
675 return;
676 }
677 sh_mmcif_get_cmd12response(host, cmd);
678 host->wait_int = 0;
679 cmd->error = 0;
680}
681
682static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
683{
684 struct sh_mmcif_host *host = mmc_priv(mmc);
685
686 switch (mrq->cmd->opcode) {
687 /* MMCIF does not support SD/SDIO command */
688 case SD_IO_SEND_OP_COND:
689 case MMC_APP_CMD:
690 mrq->cmd->error = -ETIMEDOUT;
691 mmc_request_done(mmc, mrq);
692 return;
693 case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
694 if (!mrq->data) {
695 /* send_if_cond cmd (not support) */
696 mrq->cmd->error = -ETIMEDOUT;
697 mmc_request_done(mmc, mrq);
698 return;
699 }
700 break;
701 default:
702 break;
703 }
704 host->data = mrq->data;
705 sh_mmcif_start_cmd(host, mrq, mrq->cmd);
706 host->data = NULL;
707
708 if (mrq->cmd->error != 0) {
709 mmc_request_done(mmc, mrq);
710 return;
711 }
712 if (mrq->stop)
713 sh_mmcif_stop_cmd(host, mrq, mrq->stop);
714 mmc_request_done(mmc, mrq);
715}
716
717static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
718{
719 struct sh_mmcif_host *host = mmc_priv(mmc);
720 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
721
722 if (ios->power_mode == MMC_POWER_OFF) {
723 /* clock stop */
724 sh_mmcif_clock_control(host, 0);
725 if (p->down_pwr)
726 p->down_pwr(host->pd);
727 return;
728 } else if (ios->power_mode == MMC_POWER_UP) {
729 if (p->set_pwr)
730 p->set_pwr(host->pd, ios->power_mode);
731 }
732
733 if (ios->clock)
734 sh_mmcif_clock_control(host, ios->clock);
735
736 host->bus_width = ios->bus_width;
737}
738
739static struct mmc_host_ops sh_mmcif_ops = {
740 .request = sh_mmcif_request,
741 .set_ios = sh_mmcif_set_ios,
742};
743
744static void sh_mmcif_detect(struct mmc_host *mmc)
745{
746 mmc_detect_change(mmc, 0);
747}
748
749static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
750{
751 struct sh_mmcif_host *host = dev_id;
752 u32 state = 0;
753 int err = 0;
754
755 state = sh_mmcif_readl(host, MMCIF_CE_INT);
756
757 if (state & INT_RBSYE) {
758 sh_mmcif_writel(host, MMCIF_CE_INT, ~(INT_RBSYE | INT_CRSPE));
759 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
760 } else if (state & INT_CRSPE) {
761 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_CRSPE);
762 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
763 } else if (state & INT_BUFREN) {
764 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFREN);
765 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
766 } else if (state & INT_BUFWEN) {
767 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFWEN);
768 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
769 } else if (state & INT_CMD12DRE) {
770 sh_mmcif_writel(host, MMCIF_CE_INT,
771 ~(INT_CMD12DRE | INT_CMD12RBE |
772 INT_CMD12CRE | INT_BUFRE));
773 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
774 } else if (state & INT_BUFRE) {
775 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFRE);
776 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
777 } else if (state & INT_DTRANE) {
778 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_DTRANE);
779 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
780 } else if (state & INT_CMD12RBE) {
781 sh_mmcif_writel(host, MMCIF_CE_INT,
782 ~(INT_CMD12RBE | INT_CMD12CRE));
783 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
784 } else if (state & INT_ERR_STS) {
785 /* err interrupts */
786 sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
787 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
788 err = 1;
789 } else {
790 pr_debug("%s: Not support int\n", DRIVER_NAME);
791 sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
792 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
793 err = 1;
794 }
795 if (err) {
796 host->sd_error = 1;
797 pr_debug("%s: int err state = %08x\n", DRIVER_NAME, state);
798 }
799 host->wait_int = 1;
800 wake_up(&host->intr_wait);
801
802 return IRQ_HANDLED;
803}
804
805static int __devinit sh_mmcif_probe(struct platform_device *pdev)
806{
807 int ret = 0, irq[2];
808 struct mmc_host *mmc;
809 struct sh_mmcif_host *host = NULL;
810 struct sh_mmcif_plat_data *pd = NULL;
811 struct resource *res;
812 void __iomem *reg;
813 char clk_name[8];
814
815 irq[0] = platform_get_irq(pdev, 0);
816 irq[1] = platform_get_irq(pdev, 1);
817 if (irq[0] < 0 || irq[1] < 0) {
818 pr_err(DRIVER_NAME": Get irq error\n");
819 return -ENXIO;
820 }
821 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
822 if (!res) {
823 dev_err(&pdev->dev, "platform_get_resource error.\n");
824 return -ENXIO;
825 }
826 reg = ioremap(res->start, resource_size(res));
827 if (!reg) {
828 dev_err(&pdev->dev, "ioremap error.\n");
829 return -ENOMEM;
830 }
831 pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data);
832 if (!pd) {
833 dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
834 ret = -ENXIO;
835 goto clean_up;
836 }
837 mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
838 if (!mmc) {
839 ret = -ENOMEM;
840 goto clean_up;
841 }
842 host = mmc_priv(mmc);
843 host->mmc = mmc;
844 host->addr = reg;
845 host->timeout = 1000;
846
847 snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
848 host->hclk = clk_get(&pdev->dev, clk_name);
849 if (IS_ERR(host->hclk)) {
850 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
851 ret = PTR_ERR(host->hclk);
852 goto clean_up1;
853 }
854 clk_enable(host->hclk);
855 host->clk = clk_get_rate(host->hclk);
856 host->pd = pdev;
857
858 init_waitqueue_head(&host->intr_wait);
859
860 mmc->ops = &sh_mmcif_ops;
861 mmc->f_max = host->clk;
862 /* close to 400KHz */
863 if (mmc->f_max < 51200000)
864 mmc->f_min = mmc->f_max / 128;
865 else if (mmc->f_max < 102400000)
866 mmc->f_min = mmc->f_max / 256;
867 else
868 mmc->f_min = mmc->f_max / 512;
869 if (pd->ocr)
870 mmc->ocr_avail = pd->ocr;
871 mmc->caps = MMC_CAP_MMC_HIGHSPEED;
872 if (pd->caps)
873 mmc->caps |= pd->caps;
874 mmc->max_phys_segs = 128;
875 mmc->max_hw_segs = 128;
876 mmc->max_blk_size = 512;
877 mmc->max_blk_count = 65535;
878 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
879 mmc->max_seg_size = mmc->max_req_size;
880
881 sh_mmcif_sync_reset(host);
882 platform_set_drvdata(pdev, host);
883 mmc_add_host(mmc);
884
885 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
886 if (ret) {
887 pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n");
888 goto clean_up2;
889 }
890 ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
891 if (ret) {
892 free_irq(irq[0], host);
893 pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n");
894 goto clean_up2;
895 }
896
897 sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
898 sh_mmcif_detect(host->mmc);
899
900 pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION);
901 pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME,
902 sh_mmcif_readl(host, MMCIF_CE_VERSION) & 0x0000ffff);
903 return ret;
904
905clean_up2:
906 clk_disable(host->hclk);
907clean_up1:
908 mmc_free_host(mmc);
909clean_up:
910 if (reg)
911 iounmap(reg);
912 return ret;
913}
914
915static int __devexit sh_mmcif_remove(struct platform_device *pdev)
916{
917 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
918 int irq[2];
919
920 sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
921
922 irq[0] = platform_get_irq(pdev, 0);
923 irq[1] = platform_get_irq(pdev, 1);
924
925 if (host->addr)
926 iounmap(host->addr);
927
928 platform_set_drvdata(pdev, NULL);
929 mmc_remove_host(host->mmc);
930
931 free_irq(irq[0], host);
932 free_irq(irq[1], host);
933
934 clk_disable(host->hclk);
935 mmc_free_host(host->mmc);
936
937 return 0;
938}
939
940static struct platform_driver sh_mmcif_driver = {
941 .probe = sh_mmcif_probe,
942 .remove = sh_mmcif_remove,
943 .driver = {
944 .name = DRIVER_NAME,
945 },
946};
947
948static int __init sh_mmcif_init(void)
949{
950 return platform_driver_register(&sh_mmcif_driver);
951}
952
953static void __exit sh_mmcif_exit(void)
954{
955 platform_driver_unregister(&sh_mmcif_driver);
956}
957
958module_init(sh_mmcif_init);
959module_exit(sh_mmcif_exit);
960
961
962MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
963MODULE_LICENSE("GPL");
964MODULE_ALIAS(DRIVER_NAME);
965MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 82554ddec6b3..cec99958b652 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -1032,7 +1032,7 @@ static void tifm_sd_remove(struct tifm_dev *sock)
1032 1032
1033static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) 1033static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
1034{ 1034{
1035 return mmc_suspend_host(tifm_get_drvdata(sock), state); 1035 return mmc_suspend_host(tifm_get_drvdata(sock));
1036} 1036}
1037 1037
1038static int tifm_sd_resume(struct tifm_dev *sock) 1038static int tifm_sd_resume(struct tifm_dev *sock)
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 883fcac21004..ee7d0a5a51c4 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -768,7 +768,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
768 struct mmc_host *mmc = platform_get_drvdata(dev); 768 struct mmc_host *mmc = platform_get_drvdata(dev);
769 int ret; 769 int ret;
770 770
771 ret = mmc_suspend_host(mmc, state); 771 ret = mmc_suspend_host(mmc);
772 772
773 /* Tell MFD core it can disable us now.*/ 773 /* Tell MFD core it can disable us now.*/
774 if (!ret && cell->disable) 774 if (!ret && cell->disable)
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 632858a94376..19f2d72dbca5 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1280,7 +1280,7 @@ static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
1280 via_save_pcictrlreg(host); 1280 via_save_pcictrlreg(host);
1281 via_save_sdcreg(host); 1281 via_save_sdcreg(host);
1282 1282
1283 ret = mmc_suspend_host(host->mmc, state); 1283 ret = mmc_suspend_host(host->mmc);
1284 1284
1285 pci_save_state(pcidev); 1285 pci_save_state(pcidev);
1286 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); 1286 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 69efe01eece8..0012f5d13d28 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1819,7 +1819,7 @@ static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
1819{ 1819{
1820 BUG_ON(host == NULL); 1820 BUG_ON(host == NULL);
1821 1821
1822 return mmc_suspend_host(host->mmc, state); 1822 return mmc_suspend_host(host->mmc);
1823} 1823}
1824 1824
1825static int wbsd_resume(struct wbsd_host *host) 1825static int wbsd_resume(struct wbsd_host *host)
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index c32822ad84a4..070211a5955c 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -8,3 +8,27 @@ config RAPIDIO_DISC_TIMEOUT
8 ---help--- 8 ---help---
9 Amount of time a discovery node waits for a host to complete 9 Amount of time a discovery node waits for a host to complete
10 enumeration before giving up. 10 enumeration before giving up.
11
12config RAPIDIO_ENABLE_RX_TX_PORTS
13 bool "Enable RapidIO Input/Output Ports"
14 depends on RAPIDIO
15 ---help---
16 The RapidIO specification describes a Output port transmit
17 enable and a Input port receive enable. The recommended state
18 for Input ports and Output ports should be disabled. When
19 this switch is set the RapidIO subsystem will enable all
20 ports for Input/Output direction to allow other traffic
21 than Maintenance transfers.
22
23source "drivers/rapidio/switches/Kconfig"
24
25config RAPIDIO_DEBUG
26 bool "RapidIO subsystem debug messages"
27 depends on RAPIDIO
28 help
29 Say Y here if you want the RapidIO subsystem to produce a bunch of
30 debug messages to the system log. Select this if you are having a
31 problem with the RapidIO subsystem and want to see more of what is
32 going on.
33
34 If you are unsure about this, say N here.
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile
index 7c0e1818de51..b6139fe187bf 100644
--- a/drivers/rapidio/Makefile
+++ b/drivers/rapidio/Makefile
@@ -4,3 +4,7 @@
4obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o 4obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o
5 5
6obj-$(CONFIG_RAPIDIO) += switches/ 6obj-$(CONFIG_RAPIDIO) += switches/
7
8ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
9EXTRA_CFLAGS += -DDEBUG
10endif
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 45415096c294..566432106cc5 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -4,6 +4,14 @@
4 * Copyright 2005 MontaVista Software, Inc. 4 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org> 5 * Matt Porter <mporter@kernel.crashing.org>
6 * 6 *
7 * Copyright 2009 Integrated Device Technology, Inc.
8 * Alex Bounine <alexandre.bounine@idt.com>
9 * - Added Port-Write/Error Management initialization and handling
10 *
11 * Copyright 2009 Sysgo AG
12 * Thomas Moll <thomas.moll@sysgo.com>
13 * - Added Input- Output- enable functionality, to allow full communication
14 *
7 * This program is free software; you can redistribute it and/or modify it 15 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 16 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your 17 * Free Software Foundation; either version 2 of the License, or (at your
@@ -31,15 +39,16 @@
31LIST_HEAD(rio_devices); 39LIST_HEAD(rio_devices);
32static LIST_HEAD(rio_switches); 40static LIST_HEAD(rio_switches);
33 41
34#define RIO_ENUM_CMPL_MAGIC 0xdeadbeef
35
36static void rio_enum_timeout(unsigned long); 42static void rio_enum_timeout(unsigned long);
37 43
44static void rio_init_em(struct rio_dev *rdev);
45
38DEFINE_SPINLOCK(rio_global_list_lock); 46DEFINE_SPINLOCK(rio_global_list_lock);
39 47
40static int next_destid = 0; 48static int next_destid = 0;
41static int next_switchid = 0; 49static int next_switchid = 0;
42static int next_net = 0; 50static int next_net = 0;
51static int next_comptag;
43 52
44static struct timer_list rio_enum_timer = 53static struct timer_list rio_enum_timer =
45TIMER_INITIALIZER(rio_enum_timeout, 0, 0); 54TIMER_INITIALIZER(rio_enum_timeout, 0, 0);
@@ -52,12 +61,6 @@ static int rio_mport_phys_table[] = {
52 -1, 61 -1,
53}; 62};
54 63
55static int rio_sport_phys_table[] = {
56 RIO_EFB_PAR_EP_FREE_ID,
57 RIO_EFB_SER_EP_FREE_ID,
58 -1,
59};
60
61/** 64/**
62 * rio_get_device_id - Get the base/extended device id for a device 65 * rio_get_device_id - Get the base/extended device id for a device
63 * @port: RIO master port 66 * @port: RIO master port
@@ -118,12 +121,26 @@ static int rio_clear_locks(struct rio_mport *port)
118 u32 result; 121 u32 result;
119 int ret = 0; 122 int ret = 0;
120 123
121 /* Write component tag CSR magic complete value */ 124 /* Assign component tag to all devices */
122 rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, 125 next_comptag = 1;
123 RIO_ENUM_CMPL_MAGIC); 126 rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, next_comptag++);
124 list_for_each_entry(rdev, &rio_devices, global_list) 127
125 rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, 128 list_for_each_entry(rdev, &rio_devices, global_list) {
126 RIO_ENUM_CMPL_MAGIC); 129 /* Mark device as discovered */
130 rio_read_config_32(rdev,
131 rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
132 &result);
133 rio_write_config_32(rdev,
134 rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
135 result | RIO_PORT_GEN_DISCOVERED);
136
137 rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, next_comptag);
138 rdev->comp_tag = next_comptag++;
139 if (next_comptag >= 0x10000) {
140 pr_err("RIO: Component Tag Counter Overflow\n");
141 break;
142 }
143 }
127 144
128 /* Release host device id locks */ 145 /* Release host device id locks */
129 rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR, 146 rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR,
@@ -229,27 +246,37 @@ static int rio_is_switch(struct rio_dev *rdev)
229} 246}
230 247
231/** 248/**
232 * rio_route_set_ops- Sets routing operations for a particular vendor switch 249 * rio_switch_init - Sets switch operations for a particular vendor switch
233 * @rdev: RIO device 250 * @rdev: RIO device
251 * @do_enum: Enumeration/Discovery mode flag
234 * 252 *
235 * Searches the RIO route ops table for known switch types. If the vid 253 * Searches the RIO switch ops table for known switch types. If the vid
236 * and did match a switch table entry, then set the add_entry() and 254 * and did match a switch table entry, then call switch initialization
237 * get_entry() ops to the table entry values. 255 * routine to setup switch-specific routines.
238 */ 256 */
239static void rio_route_set_ops(struct rio_dev *rdev) 257static void rio_switch_init(struct rio_dev *rdev, int do_enum)
240{ 258{
241 struct rio_route_ops *cur = __start_rio_route_ops; 259 struct rio_switch_ops *cur = __start_rio_switch_ops;
242 struct rio_route_ops *end = __end_rio_route_ops; 260 struct rio_switch_ops *end = __end_rio_switch_ops;
243 261
244 while (cur < end) { 262 while (cur < end) {
245 if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) { 263 if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) {
246 pr_debug("RIO: adding routing ops for %s\n", rio_name(rdev)); 264 pr_debug("RIO: calling init routine for %s\n",
247 rdev->rswitch->add_entry = cur->add_hook; 265 rio_name(rdev));
248 rdev->rswitch->get_entry = cur->get_hook; 266 cur->init_hook(rdev, do_enum);
267 break;
249 } 268 }
250 cur++; 269 cur++;
251 } 270 }
252 271
272 if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) {
273 pr_debug("RIO: adding STD routing ops for %s\n",
274 rio_name(rdev));
275 rdev->rswitch->add_entry = rio_std_route_add_entry;
276 rdev->rswitch->get_entry = rio_std_route_get_entry;
277 rdev->rswitch->clr_table = rio_std_route_clr_table;
278 }
279
253 if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry) 280 if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry)
254 printk(KERN_ERR "RIO: missing routing ops for %s\n", 281 printk(KERN_ERR "RIO: missing routing ops for %s\n",
255 rio_name(rdev)); 282 rio_name(rdev));
@@ -281,6 +308,65 @@ static int __devinit rio_add_device(struct rio_dev *rdev)
281} 308}
282 309
283/** 310/**
311 * rio_enable_rx_tx_port - enable input reciever and output transmitter of
312 * given port
313 * @port: Master port associated with the RIO network
314 * @local: local=1 select local port otherwise a far device is reached
315 * @destid: Destination ID of the device to check host bit
316 * @hopcount: Number of hops to reach the target
317 * @port_num: Port (-number on switch) to enable on a far end device
318 *
319 * Returns 0 or 1 from on General Control Command and Status Register
320 * (EXT_PTR+0x3C)
321 */
322inline int rio_enable_rx_tx_port(struct rio_mport *port,
323 int local, u16 destid,
324 u8 hopcount, u8 port_num) {
325#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
326 u32 regval;
327 u32 ext_ftr_ptr;
328
329 /*
330 * enable rx input tx output port
331 */
332 pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
333 "%d, port_num = %d)\n", local, destid, hopcount, port_num);
334
335 ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
336
337 if (local) {
338 rio_local_read_config_32(port, ext_ftr_ptr +
339 RIO_PORT_N_CTL_CSR(0),
340 &regval);
341 } else {
342 if (rio_mport_read_config_32(port, destid, hopcount,
343 ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
344 return -EIO;
345 }
346
347 if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
348 /* serial */
349 regval = regval | RIO_PORT_N_CTL_EN_RX_SER
350 | RIO_PORT_N_CTL_EN_TX_SER;
351 } else {
352 /* parallel */
353 regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
354 | RIO_PORT_N_CTL_EN_TX_PAR;
355 }
356
357 if (local) {
358 rio_local_write_config_32(port, ext_ftr_ptr +
359 RIO_PORT_N_CTL_CSR(0), regval);
360 } else {
361 if (rio_mport_write_config_32(port, destid, hopcount,
362 ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
363 return -EIO;
364 }
365#endif
366 return 0;
367}
368
369/**
284 * rio_setup_device- Allocates and sets up a RIO device 370 * rio_setup_device- Allocates and sets up a RIO device
285 * @net: RIO network 371 * @net: RIO network
286 * @port: Master port to send transactions 372 * @port: Master port to send transactions
@@ -325,8 +411,14 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
325 rdev->asm_rev = result >> 16; 411 rdev->asm_rev = result >> 16;
326 rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR, 412 rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR,
327 &rdev->pef); 413 &rdev->pef);
328 if (rdev->pef & RIO_PEF_EXT_FEATURES) 414 if (rdev->pef & RIO_PEF_EXT_FEATURES) {
329 rdev->efptr = result & 0xffff; 415 rdev->efptr = result & 0xffff;
416 rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid,
417 hopcount);
418
419 rdev->em_efptr = rio_mport_get_feature(port, 0, destid,
420 hopcount, RIO_EFB_ERR_MGMNT);
421 }
330 422
331 rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR, 423 rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
332 &rdev->src_ops); 424 &rdev->src_ops);
@@ -349,12 +441,13 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
349 if (rio_is_switch(rdev)) { 441 if (rio_is_switch(rdev)) {
350 rio_mport_read_config_32(port, destid, hopcount, 442 rio_mport_read_config_32(port, destid, hopcount,
351 RIO_SWP_INFO_CAR, &rdev->swpinfo); 443 RIO_SWP_INFO_CAR, &rdev->swpinfo);
352 rswitch = kmalloc(sizeof(struct rio_switch), GFP_KERNEL); 444 rswitch = kzalloc(sizeof(struct rio_switch), GFP_KERNEL);
353 if (!rswitch) 445 if (!rswitch)
354 goto cleanup; 446 goto cleanup;
355 rswitch->switchid = next_switchid; 447 rswitch->switchid = next_switchid;
356 rswitch->hopcount = hopcount; 448 rswitch->hopcount = hopcount;
357 rswitch->destid = destid; 449 rswitch->destid = destid;
450 rswitch->port_ok = 0;
358 rswitch->route_table = kzalloc(sizeof(u8)* 451 rswitch->route_table = kzalloc(sizeof(u8)*
359 RIO_MAX_ROUTE_ENTRIES(port->sys_size), 452 RIO_MAX_ROUTE_ENTRIES(port->sys_size),
360 GFP_KERNEL); 453 GFP_KERNEL);
@@ -367,13 +460,22 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
367 rdev->rswitch = rswitch; 460 rdev->rswitch = rswitch;
368 dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id, 461 dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
369 rdev->rswitch->switchid); 462 rdev->rswitch->switchid);
370 rio_route_set_ops(rdev); 463 rio_switch_init(rdev, do_enum);
464
465 if (do_enum && rdev->rswitch->clr_table)
466 rdev->rswitch->clr_table(port, destid, hopcount,
467 RIO_GLOBAL_TABLE);
371 468
372 list_add_tail(&rswitch->node, &rio_switches); 469 list_add_tail(&rswitch->node, &rio_switches);
373 470
374 } else 471 } else {
472 if (do_enum)
473 /*Enable Input Output Port (transmitter reviever)*/
474 rio_enable_rx_tx_port(port, 0, destid, hopcount, 0);
475
375 dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id, 476 dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id,
376 rdev->destid); 477 rdev->destid);
478 }
377 479
378 rdev->dev.bus = &rio_bus_type; 480 rdev->dev.bus = &rio_bus_type;
379 481
@@ -414,23 +516,29 @@ cleanup:
414 * 516 *
415 * Reads the port error status CSR for a particular switch port to 517 * Reads the port error status CSR for a particular switch port to
416 * determine if the port has an active link. Returns 518 * determine if the port has an active link. Returns
417 * %PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is 519 * %RIO_PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is
418 * inactive. 520 * inactive.
419 */ 521 */
420static int 522static int
421rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) 523rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
422{ 524{
423 u32 result; 525 u32 result = 0;
424 u32 ext_ftr_ptr; 526 u32 ext_ftr_ptr;
425 527
426 int *entry = rio_sport_phys_table; 528 ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, 0);
427
428 do {
429 if ((ext_ftr_ptr =
430 rio_mport_get_feature(port, 0, destid, hopcount, *entry)))
431 529
530 while (ext_ftr_ptr) {
531 rio_mport_read_config_32(port, destid, hopcount,
532 ext_ftr_ptr, &result);
533 result = RIO_GET_BLOCK_ID(result);
534 if ((result == RIO_EFB_SER_EP_FREE_ID) ||
535 (result == RIO_EFB_SER_EP_FREE_ID_V13P) ||
536 (result == RIO_EFB_SER_EP_FREC_ID))
432 break; 537 break;
433 } while (*++entry >= 0); 538
539 ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount,
540 ext_ftr_ptr);
541 }
434 542
435 if (ext_ftr_ptr) 543 if (ext_ftr_ptr)
436 rio_mport_read_config_32(port, destid, hopcount, 544 rio_mport_read_config_32(port, destid, hopcount,
@@ -438,7 +546,81 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
438 RIO_PORT_N_ERR_STS_CSR(sport), 546 RIO_PORT_N_ERR_STS_CSR(sport),
439 &result); 547 &result);
440 548
441 return (result & PORT_N_ERR_STS_PORT_OK); 549 return result & RIO_PORT_N_ERR_STS_PORT_OK;
550}
551
552/**
553 * rio_lock_device - Acquires host device lock for specified device
554 * @port: Master port to send transaction
555 * @destid: Destination ID for device/switch
556 * @hopcount: Hopcount to reach switch
557 * @wait_ms: Max wait time in msec (0 = no timeout)
558 *
559 * Attepts to acquire host device lock for specified device
560 * Returns 0 if device lock acquired or EINVAL if timeout expires.
561 */
562static int
563rio_lock_device(struct rio_mport *port, u16 destid, u8 hopcount, int wait_ms)
564{
565 u32 result;
566 int tcnt = 0;
567
568 /* Attempt to acquire device lock */
569 rio_mport_write_config_32(port, destid, hopcount,
570 RIO_HOST_DID_LOCK_CSR, port->host_deviceid);
571 rio_mport_read_config_32(port, destid, hopcount,
572 RIO_HOST_DID_LOCK_CSR, &result);
573
574 while (result != port->host_deviceid) {
575 if (wait_ms != 0 && tcnt == wait_ms) {
576 pr_debug("RIO: timeout when locking device %x:%x\n",
577 destid, hopcount);
578 return -EINVAL;
579 }
580
581 /* Delay a bit */
582 mdelay(1);
583 tcnt++;
584 /* Try to acquire device lock again */
585 rio_mport_write_config_32(port, destid,
586 hopcount,
587 RIO_HOST_DID_LOCK_CSR,
588 port->host_deviceid);
589 rio_mport_read_config_32(port, destid,
590 hopcount,
591 RIO_HOST_DID_LOCK_CSR, &result);
592 }
593
594 return 0;
595}
596
597/**
598 * rio_unlock_device - Releases host device lock for specified device
599 * @port: Master port to send transaction
600 * @destid: Destination ID for device/switch
601 * @hopcount: Hopcount to reach switch
602 *
603 * Returns 0 if device lock released or EINVAL if fails.
604 */
605static int
606rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount)
607{
608 u32 result;
609
610 /* Release device lock */
611 rio_mport_write_config_32(port, destid,
612 hopcount,
613 RIO_HOST_DID_LOCK_CSR,
614 port->host_deviceid);
615 rio_mport_read_config_32(port, destid, hopcount,
616 RIO_HOST_DID_LOCK_CSR, &result);
617 if ((result & 0xffff) != 0xffff) {
618 pr_debug("RIO: badness when releasing device lock %x:%x\n",
619 destid, hopcount);
620 return -EINVAL;
621 }
622
623 return 0;
442} 624}
443 625
444/** 626/**
@@ -448,6 +630,7 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
448 * @table: Routing table ID 630 * @table: Routing table ID
449 * @route_destid: Destination ID to be routed 631 * @route_destid: Destination ID to be routed
450 * @route_port: Port number to be routed 632 * @route_port: Port number to be routed
633 * @lock: lock switch device flag
451 * 634 *
452 * Calls the switch specific add_entry() method to add a route entry 635 * Calls the switch specific add_entry() method to add a route entry
453 * on a switch. The route table can be specified using the @table 636 * on a switch. The route table can be specified using the @table
@@ -456,12 +639,26 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
456 * %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL 639 * %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL
457 * on failure. 640 * on failure.
458 */ 641 */
459static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch, 642static int
460 u16 table, u16 route_destid, u8 route_port) 643rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch,
644 u16 table, u16 route_destid, u8 route_port, int lock)
461{ 645{
462 return rswitch->add_entry(mport, rswitch->destid, 646 int rc;
647
648 if (lock) {
649 rc = rio_lock_device(mport, rswitch->destid,
650 rswitch->hopcount, 1000);
651 if (rc)
652 return rc;
653 }
654
655 rc = rswitch->add_entry(mport, rswitch->destid,
463 rswitch->hopcount, table, 656 rswitch->hopcount, table,
464 route_destid, route_port); 657 route_destid, route_port);
658 if (lock)
659 rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
660
661 return rc;
465} 662}
466 663
467/** 664/**
@@ -471,6 +668,7 @@ static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswit
471 * @table: Routing table ID 668 * @table: Routing table ID
472 * @route_destid: Destination ID to be routed 669 * @route_destid: Destination ID to be routed
473 * @route_port: Pointer to read port number into 670 * @route_port: Pointer to read port number into
671 * @lock: lock switch device flag
474 * 672 *
475 * Calls the switch specific get_entry() method to read a route entry 673 * Calls the switch specific get_entry() method to read a route entry
476 * in a switch. The route table can be specified using the @table 674 * in a switch. The route table can be specified using the @table
@@ -481,11 +679,24 @@ static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswit
481 */ 679 */
482static int 680static int
483rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table, 681rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table,
484 u16 route_destid, u8 * route_port) 682 u16 route_destid, u8 *route_port, int lock)
485{ 683{
486 return rswitch->get_entry(mport, rswitch->destid, 684 int rc;
685
686 if (lock) {
687 rc = rio_lock_device(mport, rswitch->destid,
688 rswitch->hopcount, 1000);
689 if (rc)
690 return rc;
691 }
692
693 rc = rswitch->get_entry(mport, rswitch->destid,
487 rswitch->hopcount, table, 694 rswitch->hopcount, table,
488 route_destid, route_port); 695 route_destid, route_port);
696 if (lock)
697 rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
698
699 return rc;
489} 700}
490 701
491/** 702/**
@@ -625,14 +836,14 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
625 sw_inport = rio_get_swpinfo_inport(port, 836 sw_inport = rio_get_swpinfo_inport(port,
626 RIO_ANY_DESTID(port->sys_size), hopcount); 837 RIO_ANY_DESTID(port->sys_size), hopcount);
627 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, 838 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
628 port->host_deviceid, sw_inport); 839 port->host_deviceid, sw_inport, 0);
629 rdev->rswitch->route_table[port->host_deviceid] = sw_inport; 840 rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
630 841
631 for (destid = 0; destid < next_destid; destid++) { 842 for (destid = 0; destid < next_destid; destid++) {
632 if (destid == port->host_deviceid) 843 if (destid == port->host_deviceid)
633 continue; 844 continue;
634 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, 845 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
635 destid, sw_inport); 846 destid, sw_inport, 0);
636 rdev->rswitch->route_table[destid] = sw_inport; 847 rdev->rswitch->route_table[destid] = sw_inport;
637 } 848 }
638 849
@@ -644,8 +855,15 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
644 rio_name(rdev), rdev->vid, rdev->did, num_ports); 855 rio_name(rdev), rdev->vid, rdev->did, num_ports);
645 sw_destid = next_destid; 856 sw_destid = next_destid;
646 for (port_num = 0; port_num < num_ports; port_num++) { 857 for (port_num = 0; port_num < num_ports; port_num++) {
647 if (sw_inport == port_num) 858 /*Enable Input Output Port (transmitter reviever)*/
859 rio_enable_rx_tx_port(port, 0,
860 RIO_ANY_DESTID(port->sys_size),
861 hopcount, port_num);
862
863 if (sw_inport == port_num) {
864 rdev->rswitch->port_ok |= (1 << port_num);
648 continue; 865 continue;
866 }
649 867
650 cur_destid = next_destid; 868 cur_destid = next_destid;
651 869
@@ -655,10 +873,11 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
655 pr_debug( 873 pr_debug(
656 "RIO: scanning device on port %d\n", 874 "RIO: scanning device on port %d\n",
657 port_num); 875 port_num);
876 rdev->rswitch->port_ok |= (1 << port_num);
658 rio_route_add_entry(port, rdev->rswitch, 877 rio_route_add_entry(port, rdev->rswitch,
659 RIO_GLOBAL_TABLE, 878 RIO_GLOBAL_TABLE,
660 RIO_ANY_DESTID(port->sys_size), 879 RIO_ANY_DESTID(port->sys_size),
661 port_num); 880 port_num, 0);
662 881
663 if (rio_enum_peer(net, port, hopcount + 1) < 0) 882 if (rio_enum_peer(net, port, hopcount + 1) < 0)
664 return -1; 883 return -1;
@@ -672,15 +891,35 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
672 rio_route_add_entry(port, rdev->rswitch, 891 rio_route_add_entry(port, rdev->rswitch,
673 RIO_GLOBAL_TABLE, 892 RIO_GLOBAL_TABLE,
674 destid, 893 destid,
675 port_num); 894 port_num,
895 0);
676 rdev->rswitch-> 896 rdev->rswitch->
677 route_table[destid] = 897 route_table[destid] =
678 port_num; 898 port_num;
679 } 899 }
680 } 900 }
901 } else {
902 /* If switch supports Error Management,
903 * set PORT_LOCKOUT bit for unused port
904 */
905 if (rdev->em_efptr)
906 rio_set_port_lockout(rdev, port_num, 1);
907
908 rdev->rswitch->port_ok &= ~(1 << port_num);
681 } 909 }
682 } 910 }
683 911
912 /* Direct Port-write messages to the enumeratiing host */
913 if ((rdev->src_ops & RIO_SRC_OPS_PORT_WRITE) &&
914 (rdev->em_efptr)) {
915 rio_write_config_32(rdev,
916 rdev->em_efptr + RIO_EM_PW_TGT_DEVID,
917 (port->host_deviceid << 16) |
918 (port->sys_size << 15));
919 }
920
921 rio_init_em(rdev);
922
684 /* Check for empty switch */ 923 /* Check for empty switch */
685 if (next_destid == sw_destid) { 924 if (next_destid == sw_destid) {
686 next_destid++; 925 next_destid++;
@@ -700,21 +939,16 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
700 * rio_enum_complete- Tests if enumeration of a network is complete 939 * rio_enum_complete- Tests if enumeration of a network is complete
701 * @port: Master port to send transaction 940 * @port: Master port to send transaction
702 * 941 *
703 * Tests the Component Tag CSR for presence of the magic enumeration 942 * Tests the Component Tag CSR for non-zero value (enumeration
704 * complete flag. Return %1 if enumeration is complete or %0 if 943 * complete flag). Return %1 if enumeration is complete or %0 if
705 * enumeration is incomplete. 944 * enumeration is incomplete.
706 */ 945 */
707static int rio_enum_complete(struct rio_mport *port) 946static int rio_enum_complete(struct rio_mport *port)
708{ 947{
709 u32 tag_csr; 948 u32 tag_csr;
710 int ret = 0;
711 949
712 rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr); 950 rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr);
713 951 return (tag_csr & 0xffff) ? 1 : 0;
714 if (tag_csr == RIO_ENUM_CMPL_MAGIC)
715 ret = 1;
716
717 return ret;
718} 952}
719 953
720/** 954/**
@@ -763,17 +997,21 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
763 pr_debug( 997 pr_debug(
764 "RIO: scanning device on port %d\n", 998 "RIO: scanning device on port %d\n",
765 port_num); 999 port_num);
1000
1001 rio_lock_device(port, destid, hopcount, 1000);
1002
766 for (ndestid = 0; 1003 for (ndestid = 0;
767 ndestid < RIO_ANY_DESTID(port->sys_size); 1004 ndestid < RIO_ANY_DESTID(port->sys_size);
768 ndestid++) { 1005 ndestid++) {
769 rio_route_get_entry(port, rdev->rswitch, 1006 rio_route_get_entry(port, rdev->rswitch,
770 RIO_GLOBAL_TABLE, 1007 RIO_GLOBAL_TABLE,
771 ndestid, 1008 ndestid,
772 &route_port); 1009 &route_port, 0);
773 if (route_port == port_num) 1010 if (route_port == port_num)
774 break; 1011 break;
775 } 1012 }
776 1013
1014 rio_unlock_device(port, destid, hopcount);
777 if (rio_disc_peer 1015 if (rio_disc_peer
778 (net, port, ndestid, hopcount + 1) < 0) 1016 (net, port, ndestid, hopcount + 1) < 0)
779 return -1; 1017 return -1;
@@ -792,7 +1030,7 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
792 * 1030 *
793 * Reads the port error status CSR for the master port to 1031 * Reads the port error status CSR for the master port to
794 * determine if the port has an active link. Returns 1032 * determine if the port has an active link. Returns
795 * %PORT_N_ERR_STS_PORT_OK if the master port is active 1033 * %RIO_PORT_N_ERR_STS_PORT_OK if the master port is active
796 * or %0 if it is inactive. 1034 * or %0 if it is inactive.
797 */ 1035 */
798static int rio_mport_is_active(struct rio_mport *port) 1036static int rio_mport_is_active(struct rio_mport *port)
@@ -813,7 +1051,7 @@ static int rio_mport_is_active(struct rio_mport *port)
813 RIO_PORT_N_ERR_STS_CSR(port->index), 1051 RIO_PORT_N_ERR_STS_CSR(port->index),
814 &result); 1052 &result);
815 1053
816 return (result & PORT_N_ERR_STS_PORT_OK); 1054 return result & RIO_PORT_N_ERR_STS_PORT_OK;
817} 1055}
818 1056
819/** 1057/**
@@ -866,12 +1104,17 @@ static void rio_update_route_tables(struct rio_mport *port)
866 continue; 1104 continue;
867 1105
868 if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) { 1106 if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) {
1107 /* Skip if destid ends in empty switch*/
1108 if (rswitch->destid == destid)
1109 continue;
869 1110
870 sport = rio_get_swpinfo_inport(port, 1111 sport = rio_get_swpinfo_inport(port,
871 rswitch->destid, rswitch->hopcount); 1112 rswitch->destid, rswitch->hopcount);
872 1113
873 if (rswitch->add_entry) { 1114 if (rswitch->add_entry) {
874 rio_route_add_entry(port, rswitch, RIO_GLOBAL_TABLE, destid, sport); 1115 rio_route_add_entry(port, rswitch,
1116 RIO_GLOBAL_TABLE, destid,
1117 sport, 0);
875 rswitch->route_table[destid] = sport; 1118 rswitch->route_table[destid] = sport;
876 } 1119 }
877 } 1120 }
@@ -880,6 +1123,32 @@ static void rio_update_route_tables(struct rio_mport *port)
880} 1123}
881 1124
882/** 1125/**
1126 * rio_init_em - Initializes RIO Error Management (for switches)
1127 * @port: Master port associated with the RIO network
1128 *
1129 * For each enumerated switch, call device-specific error management
1130 * initialization routine (if supplied by the switch driver).
1131 */
1132static void rio_init_em(struct rio_dev *rdev)
1133{
1134 if (rio_is_switch(rdev) && (rdev->em_efptr) &&
1135 (rdev->rswitch->em_init)) {
1136 rdev->rswitch->em_init(rdev);
1137 }
1138}
1139
1140/**
1141 * rio_pw_enable - Enables/disables port-write handling by a master port
1142 * @port: Master port associated with port-write handling
1143 * @enable: 1=enable, 0=disable
1144 */
1145static void rio_pw_enable(struct rio_mport *port, int enable)
1146{
1147 if (port->ops->pwenable)
1148 port->ops->pwenable(port, enable);
1149}
1150
1151/**
883 * rio_enum_mport- Start enumeration through a master port 1152 * rio_enum_mport- Start enumeration through a master port
884 * @mport: Master port to send transactions 1153 * @mport: Master port to send transactions
885 * 1154 *
@@ -911,6 +1180,10 @@ int __devinit rio_enum_mport(struct rio_mport *mport)
911 rc = -ENOMEM; 1180 rc = -ENOMEM;
912 goto out; 1181 goto out;
913 } 1182 }
1183
1184 /* Enable Input Output Port (transmitter reviever) */
1185 rio_enable_rx_tx_port(mport, 1, 0, 0, 0);
1186
914 if (rio_enum_peer(net, mport, 0) < 0) { 1187 if (rio_enum_peer(net, mport, 0) < 0) {
915 /* A higher priority host won enumeration, bail. */ 1188 /* A higher priority host won enumeration, bail. */
916 printk(KERN_INFO 1189 printk(KERN_INFO
@@ -922,6 +1195,7 @@ int __devinit rio_enum_mport(struct rio_mport *mport)
922 } 1195 }
923 rio_update_route_tables(mport); 1196 rio_update_route_tables(mport);
924 rio_clear_locks(mport); 1197 rio_clear_locks(mport);
1198 rio_pw_enable(mport, 1);
925 } else { 1199 } else {
926 printk(KERN_INFO "RIO: master port %d link inactive\n", 1200 printk(KERN_INFO "RIO: master port %d link inactive\n",
927 mport->id); 1201 mport->id);
@@ -945,15 +1219,22 @@ static void rio_build_route_tables(void)
945 u8 sport; 1219 u8 sport;
946 1220
947 list_for_each_entry(rdev, &rio_devices, global_list) 1221 list_for_each_entry(rdev, &rio_devices, global_list)
948 if (rio_is_switch(rdev)) 1222 if (rio_is_switch(rdev)) {
949 for (i = 0; 1223 rio_lock_device(rdev->net->hport, rdev->rswitch->destid,
950 i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); 1224 rdev->rswitch->hopcount, 1000);
951 i++) { 1225 for (i = 0;
952 if (rio_route_get_entry 1226 i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
953 (rdev->net->hport, rdev->rswitch, RIO_GLOBAL_TABLE, 1227 i++) {
954 i, &sport) < 0) 1228 if (rio_route_get_entry
955 continue; 1229 (rdev->net->hport, rdev->rswitch,
956 rdev->rswitch->route_table[i] = sport; 1230 RIO_GLOBAL_TABLE, i, &sport, 0) < 0)
1231 continue;
1232 rdev->rswitch->route_table[i] = sport;
1233 }
1234
1235 rio_unlock_device(rdev->net->hport,
1236 rdev->rswitch->destid,
1237 rdev->rswitch->hopcount);
957 } 1238 }
958} 1239}
959 1240
@@ -1012,6 +1293,13 @@ int __devinit rio_disc_mport(struct rio_mport *mport)
1012 del_timer_sync(&rio_enum_timer); 1293 del_timer_sync(&rio_enum_timer);
1013 1294
1014 pr_debug("done\n"); 1295 pr_debug("done\n");
1296
1297 /* Read DestID assigned by enumerator */
1298 rio_local_read_config_32(mport, RIO_DID_CSR,
1299 &mport->host_deviceid);
1300 mport->host_deviceid = RIO_GET_DID(mport->sys_size,
1301 mport->host_deviceid);
1302
1015 if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size), 1303 if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size),
1016 0) < 0) { 1304 0) < 0) {
1017 printk(KERN_INFO 1305 printk(KERN_INFO
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 6395c780008b..777e099a3d8f 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -5,6 +5,10 @@
5 * Copyright 2005 MontaVista Software, Inc. 5 * Copyright 2005 MontaVista Software, Inc.
6 * Matt Porter <mporter@kernel.crashing.org> 6 * Matt Porter <mporter@kernel.crashing.org>
7 * 7 *
8 * Copyright 2009 Integrated Device Technology, Inc.
9 * Alex Bounine <alexandre.bounine@idt.com>
10 * - Added Port-Write/Error Management initialization and handling
11 *
8 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 13 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your 14 * Free Software Foundation; either version 2 of the License, or (at your
@@ -333,6 +337,328 @@ int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res)
333} 337}
334 338
335/** 339/**
340 * rio_request_inb_pwrite - request inbound port-write message service
341 * @mport: RIO device to which register inbound port-write callback routine
342 * @pwcback: Callback routine to execute when port-write is received
343 *
344 * Binds a port-write callback function to the RapidIO device.
345 * Returns 0 if the request has been satisfied.
346 */
347int rio_request_inb_pwrite(struct rio_dev *rdev,
348 int (*pwcback)(struct rio_dev *rdev, union rio_pw_msg *msg, int step))
349{
350 int rc = 0;
351
352 spin_lock(&rio_global_list_lock);
353 if (rdev->pwcback != NULL)
354 rc = -ENOMEM;
355 else
356 rdev->pwcback = pwcback;
357
358 spin_unlock(&rio_global_list_lock);
359 return rc;
360}
361EXPORT_SYMBOL_GPL(rio_request_inb_pwrite);
362
363/**
364 * rio_release_inb_pwrite - release inbound port-write message service
365 * @rdev: RIO device which registered for inbound port-write callback
366 *
367 * Removes callback from the rio_dev structure. Returns 0 if the request
368 * has been satisfied.
369 */
370int rio_release_inb_pwrite(struct rio_dev *rdev)
371{
372 int rc = -ENOMEM;
373
374 spin_lock(&rio_global_list_lock);
375 if (rdev->pwcback) {
376 rdev->pwcback = NULL;
377 rc = 0;
378 }
379
380 spin_unlock(&rio_global_list_lock);
381 return rc;
382}
383EXPORT_SYMBOL_GPL(rio_release_inb_pwrite);
384
385/**
386 * rio_mport_get_physefb - Helper function that returns register offset
387 * for Physical Layer Extended Features Block.
388 * @rdev: RIO device
389 */
390u32
391rio_mport_get_physefb(struct rio_mport *port, int local,
392 u16 destid, u8 hopcount)
393{
394 u32 ext_ftr_ptr;
395 u32 ftr_header;
396
397 ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, 0);
398
399 while (ext_ftr_ptr) {
400 if (local)
401 rio_local_read_config_32(port, ext_ftr_ptr,
402 &ftr_header);
403 else
404 rio_mport_read_config_32(port, destid, hopcount,
405 ext_ftr_ptr, &ftr_header);
406
407 ftr_header = RIO_GET_BLOCK_ID(ftr_header);
408 switch (ftr_header) {
409
410 case RIO_EFB_SER_EP_ID_V13P:
411 case RIO_EFB_SER_EP_REC_ID_V13P:
412 case RIO_EFB_SER_EP_FREE_ID_V13P:
413 case RIO_EFB_SER_EP_ID:
414 case RIO_EFB_SER_EP_REC_ID:
415 case RIO_EFB_SER_EP_FREE_ID:
416 case RIO_EFB_SER_EP_FREC_ID:
417
418 return ext_ftr_ptr;
419
420 default:
421 break;
422 }
423
424 ext_ftr_ptr = rio_mport_get_efb(port, local, destid,
425 hopcount, ext_ftr_ptr);
426 }
427
428 return ext_ftr_ptr;
429}
430
431/**
432 * rio_get_comptag - Begin or continue searching for a RIO device by component tag
433 * @comp_tag: RIO component tad to match
434 * @from: Previous RIO device found in search, or %NULL for new search
435 *
436 * Iterates through the list of known RIO devices. If a RIO device is
437 * found with a matching @comp_tag, a pointer to its device
438 * structure is returned. Otherwise, %NULL is returned. A new search
439 * is initiated by passing %NULL to the @from argument. Otherwise, if
440 * @from is not %NULL, searches continue from next device on the global
441 * list.
442 */
443static struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
444{
445 struct list_head *n;
446 struct rio_dev *rdev;
447
448 spin_lock(&rio_global_list_lock);
449 n = from ? from->global_list.next : rio_devices.next;
450
451 while (n && (n != &rio_devices)) {
452 rdev = rio_dev_g(n);
453 if (rdev->comp_tag == comp_tag)
454 goto exit;
455 n = n->next;
456 }
457 rdev = NULL;
458exit:
459 spin_unlock(&rio_global_list_lock);
460 return rdev;
461}
462
463/**
464 * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port.
465 * @rdev: Pointer to RIO device control structure
466 * @pnum: Switch port number to set LOCKOUT bit
467 * @lock: Operation : set (=1) or clear (=0)
468 */
469int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
470{
471 u8 hopcount = 0xff;
472 u16 destid = rdev->destid;
473 u32 regval;
474
475 if (rdev->rswitch) {
476 destid = rdev->rswitch->destid;
477 hopcount = rdev->rswitch->hopcount;
478 }
479
480 rio_mport_read_config_32(rdev->net->hport, destid, hopcount,
481 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
482 &regval);
483 if (lock)
484 regval |= RIO_PORT_N_CTL_LOCKOUT;
485 else
486 regval &= ~RIO_PORT_N_CTL_LOCKOUT;
487
488 rio_mport_write_config_32(rdev->net->hport, destid, hopcount,
489 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
490 regval);
491 return 0;
492}
493
494/**
495 * rio_inb_pwrite_handler - process inbound port-write message
496 * @pw_msg: pointer to inbound port-write message
497 *
498 * Processes an inbound port-write message. Returns 0 if the request
499 * has been satisfied.
500 */
501int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
502{
503 struct rio_dev *rdev;
504 struct rio_mport *mport;
505 u8 hopcount;
506 u16 destid;
507 u32 err_status;
508 int rc, portnum;
509
510 rdev = rio_get_comptag(pw_msg->em.comptag, NULL);
511 if (rdev == NULL) {
512 /* Someting bad here (probably enumeration error) */
513 pr_err("RIO: %s No matching device for CTag 0x%08x\n",
514 __func__, pw_msg->em.comptag);
515 return -EIO;
516 }
517
518 pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev));
519
520#ifdef DEBUG_PW
521 {
522 u32 i;
523 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) {
524 pr_debug("0x%02x: %08x %08x %08x %08x",
525 i*4, pw_msg->raw[i], pw_msg->raw[i + 1],
526 pw_msg->raw[i + 2], pw_msg->raw[i + 3]);
527 i += 4;
528 }
529 pr_debug("\n");
530 }
531#endif
532
533 /* Call an external service function (if such is registered
534 * for this device). This may be the service for endpoints that send
535 * device-specific port-write messages. End-point messages expected
536 * to be handled completely by EP specific device driver.
537 * For switches rc==0 signals that no standard processing required.
538 */
539 if (rdev->pwcback != NULL) {
540 rc = rdev->pwcback(rdev, pw_msg, 0);
541 if (rc == 0)
542 return 0;
543 }
544
545 /* For End-point devices processing stops here */
546 if (!(rdev->pef & RIO_PEF_SWITCH))
547 return 0;
548
549 if (rdev->phys_efptr == 0) {
550 pr_err("RIO_PW: Bad switch initialization for %s\n",
551 rio_name(rdev));
552 return 0;
553 }
554
555 mport = rdev->net->hport;
556 destid = rdev->rswitch->destid;
557 hopcount = rdev->rswitch->hopcount;
558
559 /*
560 * Process the port-write notification from switch
561 */
562
563 portnum = pw_msg->em.is_port & 0xFF;
564
565 if (rdev->rswitch->em_handle)
566 rdev->rswitch->em_handle(rdev, portnum);
567
568 rio_mport_read_config_32(mport, destid, hopcount,
569 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
570 &err_status);
571 pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
572
573 if (pw_msg->em.errdetect) {
574 pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
575 portnum, pw_msg->em.errdetect);
576 /* Clear EM Port N Error Detect CSR */
577 rio_mport_write_config_32(mport, destid, hopcount,
578 rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
579 }
580
581 if (pw_msg->em.ltlerrdet) {
582 pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
583 pw_msg->em.ltlerrdet);
584 /* Clear EM L/T Layer Error Detect CSR */
585 rio_mport_write_config_32(mport, destid, hopcount,
586 rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
587 }
588
589 /* Clear Port Errors */
590 rio_mport_write_config_32(mport, destid, hopcount,
591 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
592 err_status & RIO_PORT_N_ERR_STS_CLR_MASK);
593
594 if (rdev->rswitch->port_ok & (1 << portnum)) {
595 if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) {
596 rdev->rswitch->port_ok &= ~(1 << portnum);
597 rio_set_port_lockout(rdev, portnum, 1);
598
599 rio_mport_write_config_32(mport, destid, hopcount,
600 rdev->phys_efptr +
601 RIO_PORT_N_ACK_STS_CSR(portnum),
602 RIO_PORT_N_ACK_CLEAR);
603
604 /* Schedule Extraction Service */
605 pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n",
606 rio_name(rdev), portnum);
607 }
608 } else {
609 if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
610 rdev->rswitch->port_ok |= (1 << portnum);
611 rio_set_port_lockout(rdev, portnum, 0);
612
613 /* Schedule Insertion Service */
614 pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
615 rio_name(rdev), portnum);
616 }
617 }
618
619 /* Clear Port-Write Pending bit */
620 rio_mport_write_config_32(mport, destid, hopcount,
621 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
622 RIO_PORT_N_ERR_STS_PW_PEND);
623
624 return 0;
625}
626EXPORT_SYMBOL_GPL(rio_inb_pwrite_handler);
627
628/**
629 * rio_mport_get_efb - get pointer to next extended features block
630 * @port: Master port to issue transaction
631 * @local: Indicate a local master port or remote device access
632 * @destid: Destination ID of the device
633 * @hopcount: Number of switch hops to the device
634 * @from: Offset of current Extended Feature block header (if 0 starts
635 * from ExtFeaturePtr)
636 */
637u32
638rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
639 u8 hopcount, u32 from)
640{
641 u32 reg_val;
642
643 if (from == 0) {
644 if (local)
645 rio_local_read_config_32(port, RIO_ASM_INFO_CAR,
646 &reg_val);
647 else
648 rio_mport_read_config_32(port, destid, hopcount,
649 RIO_ASM_INFO_CAR, &reg_val);
650 return reg_val & RIO_EXT_FTR_PTR_MASK;
651 } else {
652 if (local)
653 rio_local_read_config_32(port, from, &reg_val);
654 else
655 rio_mport_read_config_32(port, destid, hopcount,
656 from, &reg_val);
657 return RIO_GET_BLOCK_ID(reg_val);
658 }
659}
660
661/**
336 * rio_mport_get_feature - query for devices' extended features 662 * rio_mport_get_feature - query for devices' extended features
337 * @port: Master port to issue transaction 663 * @port: Master port to issue transaction
338 * @local: Indicate a local master port or remote device access 664 * @local: Indicate a local master port or remote device access
@@ -451,6 +777,111 @@ struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from)
451 return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from); 777 return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from);
452} 778}
453 779
780/**
781 * rio_std_route_add_entry - Add switch route table entry using standard
782 * registers defined in RIO specification rev.1.3
783 * @mport: Master port to issue transaction
784 * @destid: Destination ID of the device
785 * @hopcount: Number of switch hops to the device
786 * @table: routing table ID (global or port-specific)
787 * @route_destid: destID entry in the RT
788 * @route_port: destination port for specified destID
789 */
790int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
791 u16 table, u16 route_destid, u8 route_port)
792{
793 if (table == RIO_GLOBAL_TABLE) {
794 rio_mport_write_config_32(mport, destid, hopcount,
795 RIO_STD_RTE_CONF_DESTID_SEL_CSR,
796 (u32)route_destid);
797 rio_mport_write_config_32(mport, destid, hopcount,
798 RIO_STD_RTE_CONF_PORT_SEL_CSR,
799 (u32)route_port);
800 }
801
802 udelay(10);
803 return 0;
804}
805
806/**
807 * rio_std_route_get_entry - Read switch route table entry (port number)
808 * assosiated with specified destID using standard registers defined in RIO
809 * specification rev.1.3
810 * @mport: Master port to issue transaction
811 * @destid: Destination ID of the device
812 * @hopcount: Number of switch hops to the device
813 * @table: routing table ID (global or port-specific)
814 * @route_destid: destID entry in the RT
815 * @route_port: returned destination port for specified destID
816 */
817int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
818 u16 table, u16 route_destid, u8 *route_port)
819{
820 u32 result;
821
822 if (table == RIO_GLOBAL_TABLE) {
823 rio_mport_write_config_32(mport, destid, hopcount,
824 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
825 rio_mport_read_config_32(mport, destid, hopcount,
826 RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
827
828 *route_port = (u8)result;
829 }
830
831 return 0;
832}
833
834/**
835 * rio_std_route_clr_table - Clear swotch route table using standard registers
836 * defined in RIO specification rev.1.3.
837 * @mport: Master port to issue transaction
838 * @local: Indicate a local master port or remote device access
839 * @destid: Destination ID of the device
840 * @hopcount: Number of switch hops to the device
841 * @table: routing table ID (global or port-specific)
842 */
843int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
844 u16 table)
845{
846 u32 max_destid = 0xff;
847 u32 i, pef, id_inc = 1, ext_cfg = 0;
848 u32 port_sel = RIO_INVALID_ROUTE;
849
850 if (table == RIO_GLOBAL_TABLE) {
851 rio_mport_read_config_32(mport, destid, hopcount,
852 RIO_PEF_CAR, &pef);
853
854 if (mport->sys_size) {
855 rio_mport_read_config_32(mport, destid, hopcount,
856 RIO_SWITCH_RT_LIMIT,
857 &max_destid);
858 max_destid &= RIO_RT_MAX_DESTID;
859 }
860
861 if (pef & RIO_PEF_EXT_RT) {
862 ext_cfg = 0x80000000;
863 id_inc = 4;
864 port_sel = (RIO_INVALID_ROUTE << 24) |
865 (RIO_INVALID_ROUTE << 16) |
866 (RIO_INVALID_ROUTE << 8) |
867 RIO_INVALID_ROUTE;
868 }
869
870 for (i = 0; i <= max_destid;) {
871 rio_mport_write_config_32(mport, destid, hopcount,
872 RIO_STD_RTE_CONF_DESTID_SEL_CSR,
873 ext_cfg | i);
874 rio_mport_write_config_32(mport, destid, hopcount,
875 RIO_STD_RTE_CONF_PORT_SEL_CSR,
876 port_sel);
877 i += id_inc;
878 }
879 }
880
881 udelay(10);
882 return 0;
883}
884
454static void rio_fixup_device(struct rio_dev *dev) 885static void rio_fixup_device(struct rio_dev *dev)
455{ 886{
456} 887}
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index 7786d02581f2..f27b7a9c47d2 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -18,38 +18,50 @@
18 18
19extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid, 19extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid,
20 u8 hopcount, int ftr); 20 u8 hopcount, int ftr);
21extern u32 rio_mport_get_physefb(struct rio_mport *port, int local,
22 u16 destid, u8 hopcount);
23extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
24 u8 hopcount, u32 from);
21extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); 25extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
22extern int rio_enum_mport(struct rio_mport *mport); 26extern int rio_enum_mport(struct rio_mport *mport);
23extern int rio_disc_mport(struct rio_mport *mport); 27extern int rio_disc_mport(struct rio_mport *mport);
28extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid,
29 u8 hopcount, u16 table, u16 route_destid,
30 u8 route_port);
31extern int rio_std_route_get_entry(struct rio_mport *mport, u16 destid,
32 u8 hopcount, u16 table, u16 route_destid,
33 u8 *route_port);
34extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid,
35 u8 hopcount, u16 table);
36extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
24 37
25/* Structures internal to the RIO core code */ 38/* Structures internal to the RIO core code */
26extern struct device_attribute rio_dev_attrs[]; 39extern struct device_attribute rio_dev_attrs[];
27extern spinlock_t rio_global_list_lock; 40extern spinlock_t rio_global_list_lock;
28 41
29extern struct rio_route_ops __start_rio_route_ops[]; 42extern struct rio_switch_ops __start_rio_switch_ops[];
30extern struct rio_route_ops __end_rio_route_ops[]; 43extern struct rio_switch_ops __end_rio_switch_ops[];
31 44
32/* Helpers internal to the RIO core code */ 45/* Helpers internal to the RIO core code */
33#define DECLARE_RIO_ROUTE_SECTION(section, vid, did, add_hook, get_hook) \ 46#define DECLARE_RIO_SWITCH_SECTION(section, name, vid, did, init_hook) \
34 static struct rio_route_ops __rio_route_ops __used \ 47 static const struct rio_switch_ops __rio_switch_##name __used \
35 __section(section)= { vid, did, add_hook, get_hook }; 48 __section(section) = { vid, did, init_hook };
36 49
37/** 50/**
38 * DECLARE_RIO_ROUTE_OPS - Registers switch routing operations 51 * DECLARE_RIO_SWITCH_INIT - Registers switch initialization routine
39 * @vid: RIO vendor ID 52 * @vid: RIO vendor ID
40 * @did: RIO device ID 53 * @did: RIO device ID
41 * @add_hook: Callback that adds a route entry 54 * @init_hook: Callback that performs switch-specific initialization
42 * @get_hook: Callback that gets a route entry
43 * 55 *
44 * Manipulating switch route tables in RIO is switch specific. This 56 * Manipulating switch route tables and error management in RIO
45 * registers a switch by vendor and device ID with two callbacks for 57 * is switch specific. This registers a switch by vendor and device ID with
46 * modifying and retrieving route entries in a switch. A &struct 58 * initialization callback for setting up switch operations and (if required)
47 * rio_route_ops is initialized with the ops and placed into a 59 * hardware initialization. A &struct rio_switch_ops is initialized with
48 * RIO-specific kernel section. 60 * pointer to the init routine and placed into a RIO-specific kernel section.
49 */ 61 */
50#define DECLARE_RIO_ROUTE_OPS(vid, did, add_hook, get_hook) \ 62#define DECLARE_RIO_SWITCH_INIT(vid, did, init_hook) \
51 DECLARE_RIO_ROUTE_SECTION(.rio_route_ops, \ 63 DECLARE_RIO_SWITCH_SECTION(.rio_switch_ops, vid##did, \
52 vid, did, add_hook, get_hook) 64 vid, did, init_hook)
53 65
54#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16)) 66#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16))
55#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16)) 67#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16))
diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig
new file mode 100644
index 000000000000..2b4e9b2b6631
--- /dev/null
+++ b/drivers/rapidio/switches/Kconfig
@@ -0,0 +1,28 @@
1#
2# RapidIO switches configuration
3#
4config RAPIDIO_TSI57X
5 bool "IDT Tsi57x SRIO switches support"
6 depends on RAPIDIO
7 ---help---
8 Includes support for IDT Tsi57x family of serial RapidIO switches.
9
10config RAPIDIO_CPS_XX
11 bool "IDT CPS-xx SRIO switches support"
12 depends on RAPIDIO
13 ---help---
14 Includes support for IDT CPS-16/12/10/8 serial RapidIO switches.
15
16config RAPIDIO_TSI568
17 bool "Tsi568 SRIO switch support"
18 depends on RAPIDIO
19 default n
20 ---help---
21 Includes support for IDT Tsi568 serial RapidIO switch.
22
23config RAPIDIO_TSI500
24 bool "Tsi500 Parallel RapidIO switch support"
25 depends on RAPIDIO
26 default n
27 ---help---
28 Includes support for IDT Tsi500 parallel RapidIO switch.
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile
index b924f8301761..fe4adc3e8d5f 100644
--- a/drivers/rapidio/switches/Makefile
+++ b/drivers/rapidio/switches/Makefile
@@ -2,4 +2,11 @@
2# Makefile for RIO switches 2# Makefile for RIO switches
3# 3#
4 4
5obj-$(CONFIG_RAPIDIO) += tsi500.o 5obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o
6obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o
7obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o
8obj-$(CONFIG_RAPIDIO_TSI500) += tsi500.o
9
10ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
11EXTRA_CFLAGS += -DDEBUG
12endif
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
new file mode 100644
index 000000000000..2c790c144f89
--- /dev/null
+++ b/drivers/rapidio/switches/idtcps.c
@@ -0,0 +1,137 @@
1/*
2 * IDT CPS RapidIO switches support
3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/rio.h>
14#include <linux/rio_drv.h>
15#include <linux/rio_ids.h>
16#include "../rio.h"
17
18#define CPS_DEFAULT_ROUTE 0xde
19#define CPS_NO_ROUTE 0xdf
20
21#define IDTCPS_RIO_DOMAIN 0xf20020
22
23static int
24idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
25 u16 table, u16 route_destid, u8 route_port)
26{
27 u32 result;
28
29 if (table == RIO_GLOBAL_TABLE) {
30 rio_mport_write_config_32(mport, destid, hopcount,
31 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
32
33 rio_mport_read_config_32(mport, destid, hopcount,
34 RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
35
36 result = (0xffffff00 & result) | (u32)route_port;
37 rio_mport_write_config_32(mport, destid, hopcount,
38 RIO_STD_RTE_CONF_PORT_SEL_CSR, result);
39 }
40
41 return 0;
42}
43
44static int
45idtcps_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
46 u16 table, u16 route_destid, u8 *route_port)
47{
48 u32 result;
49
50 if (table == RIO_GLOBAL_TABLE) {
51 rio_mport_write_config_32(mport, destid, hopcount,
52 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
53
54 rio_mport_read_config_32(mport, destid, hopcount,
55 RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
56
57 if (CPS_DEFAULT_ROUTE == (u8)result ||
58 CPS_NO_ROUTE == (u8)result)
59 *route_port = RIO_INVALID_ROUTE;
60 else
61 *route_port = (u8)result;
62 }
63
64 return 0;
65}
66
67static int
68idtcps_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
69 u16 table)
70{
71 u32 i;
72
73 if (table == RIO_GLOBAL_TABLE) {
74 for (i = 0x80000000; i <= 0x800000ff;) {
75 rio_mport_write_config_32(mport, destid, hopcount,
76 RIO_STD_RTE_CONF_DESTID_SEL_CSR, i);
77 rio_mport_write_config_32(mport, destid, hopcount,
78 RIO_STD_RTE_CONF_PORT_SEL_CSR,
79 (CPS_DEFAULT_ROUTE << 24) |
80 (CPS_DEFAULT_ROUTE << 16) |
81 (CPS_DEFAULT_ROUTE << 8) | CPS_DEFAULT_ROUTE);
82 i += 4;
83 }
84 }
85
86 return 0;
87}
88
89static int
90idtcps_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
91 u8 sw_domain)
92{
93 /*
94 * Switch domain configuration operates only at global level
95 */
96 rio_mport_write_config_32(mport, destid, hopcount,
97 IDTCPS_RIO_DOMAIN, (u32)sw_domain);
98 return 0;
99}
100
101static int
102idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
103 u8 *sw_domain)
104{
105 u32 regval;
106
107 /*
108 * Switch domain configuration operates only at global level
109 */
110 rio_mport_read_config_32(mport, destid, hopcount,
111 IDTCPS_RIO_DOMAIN, &regval);
112
113 *sw_domain = (u8)(regval & 0xff);
114
115 return 0;
116}
117
118static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
119{
120 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
121 rdev->rswitch->add_entry = idtcps_route_add_entry;
122 rdev->rswitch->get_entry = idtcps_route_get_entry;
123 rdev->rswitch->clr_table = idtcps_route_clr_table;
124 rdev->rswitch->set_domain = idtcps_set_domain;
125 rdev->rswitch->get_domain = idtcps_get_domain;
126 rdev->rswitch->em_init = NULL;
127 rdev->rswitch->em_handle = NULL;
128
129 return 0;
130}
131
132DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS6Q, idtcps_switch_init);
133DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS8, idtcps_switch_init);
134DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS10Q, idtcps_switch_init);
135DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS12, idtcps_switch_init);
136DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS16, idtcps_switch_init);
137DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDT70K200, idtcps_switch_init);
diff --git a/drivers/rapidio/switches/tsi500.c b/drivers/rapidio/switches/tsi500.c
index c77c23bd9840..914eddd5aa42 100644
--- a/drivers/rapidio/switches/tsi500.c
+++ b/drivers/rapidio/switches/tsi500.c
@@ -1,6 +1,10 @@
1/* 1/*
2 * RapidIO Tsi500 switch support 2 * RapidIO Tsi500 switch support
3 * 3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * - Modified switch operations initialization.
7 *
4 * Copyright 2005 MontaVista Software, Inc. 8 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org> 9 * Matt Porter <mporter@kernel.crashing.org>
6 * 10 *
@@ -57,4 +61,18 @@ tsi500_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 tab
57 return ret; 61 return ret;
58} 62}
59 63
60DECLARE_RIO_ROUTE_OPS(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_route_add_entry, tsi500_route_get_entry); 64static int tsi500_switch_init(struct rio_dev *rdev, int do_enum)
65{
66 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
67 rdev->rswitch->add_entry = tsi500_route_add_entry;
68 rdev->rswitch->get_entry = tsi500_route_get_entry;
69 rdev->rswitch->clr_table = NULL;
70 rdev->rswitch->set_domain = NULL;
71 rdev->rswitch->get_domain = NULL;
72 rdev->rswitch->em_init = NULL;
73 rdev->rswitch->em_handle = NULL;
74
75 return 0;
76}
77
78DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_switch_init);
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
new file mode 100644
index 000000000000..f7fd7898606e
--- /dev/null
+++ b/drivers/rapidio/switches/tsi568.c
@@ -0,0 +1,146 @@
1/*
2 * RapidIO Tsi568 switch support
3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * - Added EM support
7 * - Modified switch operations initialization.
8 *
9 * Copyright 2005 MontaVista Software, Inc.
10 * Matt Porter <mporter@kernel.crashing.org>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/rio.h>
19#include <linux/rio_drv.h>
20#include <linux/rio_ids.h>
21#include <linux/delay.h>
22#include "../rio.h"
23
24/* Global (broadcast) route registers */
25#define SPBC_ROUTE_CFG_DESTID 0x10070
26#define SPBC_ROUTE_CFG_PORT 0x10074
27
28/* Per port route registers */
29#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
30#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
31
32#define TSI568_SP_MODE_BC 0x10004
33#define TSI568_SP_MODE_PW_DIS 0x08000000
34
35static int
36tsi568_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
37 u16 table, u16 route_destid, u8 route_port)
38{
39 if (table == RIO_GLOBAL_TABLE) {
40 rio_mport_write_config_32(mport, destid, hopcount,
41 SPBC_ROUTE_CFG_DESTID, route_destid);
42 rio_mport_write_config_32(mport, destid, hopcount,
43 SPBC_ROUTE_CFG_PORT, route_port);
44 } else {
45 rio_mport_write_config_32(mport, destid, hopcount,
46 SPP_ROUTE_CFG_DESTID(table),
47 route_destid);
48 rio_mport_write_config_32(mport, destid, hopcount,
49 SPP_ROUTE_CFG_PORT(table), route_port);
50 }
51
52 udelay(10);
53
54 return 0;
55}
56
57static int
58tsi568_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
59 u16 table, u16 route_destid, u8 *route_port)
60{
61 int ret = 0;
62 u32 result;
63
64 if (table == RIO_GLOBAL_TABLE) {
65 rio_mport_write_config_32(mport, destid, hopcount,
66 SPBC_ROUTE_CFG_DESTID, route_destid);
67 rio_mport_read_config_32(mport, destid, hopcount,
68 SPBC_ROUTE_CFG_PORT, &result);
69 } else {
70 rio_mport_write_config_32(mport, destid, hopcount,
71 SPP_ROUTE_CFG_DESTID(table),
72 route_destid);
73 rio_mport_read_config_32(mport, destid, hopcount,
74 SPP_ROUTE_CFG_PORT(table), &result);
75 }
76
77 *route_port = result;
78 if (*route_port > 15)
79 ret = -1;
80
81 return ret;
82}
83
84static int
85tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
86 u16 table)
87{
88 u32 route_idx;
89 u32 lut_size;
90
91 lut_size = (mport->sys_size) ? 0x1ff : 0xff;
92
93 if (table == RIO_GLOBAL_TABLE) {
94 rio_mport_write_config_32(mport, destid, hopcount,
95 SPBC_ROUTE_CFG_DESTID, 0x80000000);
96 for (route_idx = 0; route_idx <= lut_size; route_idx++)
97 rio_mport_write_config_32(mport, destid, hopcount,
98 SPBC_ROUTE_CFG_PORT,
99 RIO_INVALID_ROUTE);
100 } else {
101 rio_mport_write_config_32(mport, destid, hopcount,
102 SPP_ROUTE_CFG_DESTID(table),
103 0x80000000);
104 for (route_idx = 0; route_idx <= lut_size; route_idx++)
105 rio_mport_write_config_32(mport, destid, hopcount,
106 SPP_ROUTE_CFG_PORT(table),
107 RIO_INVALID_ROUTE);
108 }
109
110 return 0;
111}
112
113static int
114tsi568_em_init(struct rio_dev *rdev)
115{
116 struct rio_mport *mport = rdev->net->hport;
117 u16 destid = rdev->rswitch->destid;
118 u8 hopcount = rdev->rswitch->hopcount;
119 u32 regval;
120
121 pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount);
122
123 /* Make sure that Port-Writes are disabled (for all ports) */
124 rio_mport_read_config_32(mport, destid, hopcount,
125 TSI568_SP_MODE_BC, &regval);
126 rio_mport_write_config_32(mport, destid, hopcount,
127 TSI568_SP_MODE_BC, regval | TSI568_SP_MODE_PW_DIS);
128
129 return 0;
130}
131
132static int tsi568_switch_init(struct rio_dev *rdev, int do_enum)
133{
134 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
135 rdev->rswitch->add_entry = tsi568_route_add_entry;
136 rdev->rswitch->get_entry = tsi568_route_get_entry;
137 rdev->rswitch->clr_table = tsi568_route_clr_table;
138 rdev->rswitch->set_domain = NULL;
139 rdev->rswitch->get_domain = NULL;
140 rdev->rswitch->em_init = tsi568_em_init;
141 rdev->rswitch->em_handle = NULL;
142
143 return 0;
144}
145
146DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI568, tsi568_switch_init);
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
new file mode 100644
index 000000000000..d34df722d95f
--- /dev/null
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -0,0 +1,315 @@
1/*
2 * RapidIO Tsi57x switch family support
3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * - Added EM support
7 * - Modified switch operations initialization.
8 *
9 * Copyright 2005 MontaVista Software, Inc.
10 * Matt Porter <mporter@kernel.crashing.org>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/rio.h>
19#include <linux/rio_drv.h>
20#include <linux/rio_ids.h>
21#include <linux/delay.h>
22#include "../rio.h"
23
24/* Global (broadcast) route registers */
25#define SPBC_ROUTE_CFG_DESTID 0x10070
26#define SPBC_ROUTE_CFG_PORT 0x10074
27
28/* Per port route registers */
29#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
30#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
31
32#define TSI578_SP_MODE(n) (0x11004 + n*0x100)
33#define TSI578_SP_MODE_GLBL 0x10004
34#define TSI578_SP_MODE_PW_DIS 0x08000000
35#define TSI578_SP_MODE_LUT_512 0x01000000
36
37#define TSI578_SP_CTL_INDEP(n) (0x13004 + n*0x100)
38#define TSI578_SP_LUT_PEINF(n) (0x13010 + n*0x100)
39#define TSI578_SP_CS_TX(n) (0x13014 + n*0x100)
40#define TSI578_SP_INT_STATUS(n) (0x13018 + n*0x100)
41
42#define TSI578_GLBL_ROUTE_BASE 0x10078
43
44static int
45tsi57x_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
46 u16 table, u16 route_destid, u8 route_port)
47{
48 if (table == RIO_GLOBAL_TABLE) {
49 rio_mport_write_config_32(mport, destid, hopcount,
50 SPBC_ROUTE_CFG_DESTID, route_destid);
51 rio_mport_write_config_32(mport, destid, hopcount,
52 SPBC_ROUTE_CFG_PORT, route_port);
53 } else {
54 rio_mport_write_config_32(mport, destid, hopcount,
55 SPP_ROUTE_CFG_DESTID(table), route_destid);
56 rio_mport_write_config_32(mport, destid, hopcount,
57 SPP_ROUTE_CFG_PORT(table), route_port);
58 }
59
60 udelay(10);
61
62 return 0;
63}
64
65static int
66tsi57x_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
67 u16 table, u16 route_destid, u8 *route_port)
68{
69 int ret = 0;
70 u32 result;
71
72 if (table == RIO_GLOBAL_TABLE) {
73 /* Use local RT of the ingress port to avoid possible
74 race condition */
75 rio_mport_read_config_32(mport, destid, hopcount,
76 RIO_SWP_INFO_CAR, &result);
77 table = (result & RIO_SWP_INFO_PORT_NUM_MASK);
78 }
79
80 rio_mport_write_config_32(mport, destid, hopcount,
81 SPP_ROUTE_CFG_DESTID(table), route_destid);
82 rio_mport_read_config_32(mport, destid, hopcount,
83 SPP_ROUTE_CFG_PORT(table), &result);
84
85 *route_port = (u8)result;
86 if (*route_port > 15)
87 ret = -1;
88
89 return ret;
90}
91
92static int
93tsi57x_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
94 u16 table)
95{
96 u32 route_idx;
97 u32 lut_size;
98
99 lut_size = (mport->sys_size) ? 0x1ff : 0xff;
100
101 if (table == RIO_GLOBAL_TABLE) {
102 rio_mport_write_config_32(mport, destid, hopcount,
103 SPBC_ROUTE_CFG_DESTID, 0x80000000);
104 for (route_idx = 0; route_idx <= lut_size; route_idx++)
105 rio_mport_write_config_32(mport, destid, hopcount,
106 SPBC_ROUTE_CFG_PORT,
107 RIO_INVALID_ROUTE);
108 } else {
109 rio_mport_write_config_32(mport, destid, hopcount,
110 SPP_ROUTE_CFG_DESTID(table), 0x80000000);
111 for (route_idx = 0; route_idx <= lut_size; route_idx++)
112 rio_mport_write_config_32(mport, destid, hopcount,
113 SPP_ROUTE_CFG_PORT(table) , RIO_INVALID_ROUTE);
114 }
115
116 return 0;
117}
118
119static int
120tsi57x_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
121 u8 sw_domain)
122{
123 u32 regval;
124
125 /*
126 * Switch domain configuration operates only at global level
127 */
128
129 /* Turn off flat (LUT_512) mode */
130 rio_mport_read_config_32(mport, destid, hopcount,
131 TSI578_SP_MODE_GLBL, &regval);
132 rio_mport_write_config_32(mport, destid, hopcount, TSI578_SP_MODE_GLBL,
133 regval & ~TSI578_SP_MODE_LUT_512);
134 /* Set switch domain base */
135 rio_mport_write_config_32(mport, destid, hopcount,
136 TSI578_GLBL_ROUTE_BASE,
137 (u32)(sw_domain << 24));
138 return 0;
139}
140
141static int
142tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
143 u8 *sw_domain)
144{
145 u32 regval;
146
147 /*
148 * Switch domain configuration operates only at global level
149 */
150 rio_mport_read_config_32(mport, destid, hopcount,
151 TSI578_GLBL_ROUTE_BASE, &regval);
152
153 *sw_domain = (u8)(regval >> 24);
154
155 return 0;
156}
157
158static int
159tsi57x_em_init(struct rio_dev *rdev)
160{
161 struct rio_mport *mport = rdev->net->hport;
162 u16 destid = rdev->rswitch->destid;
163 u8 hopcount = rdev->rswitch->hopcount;
164 u32 regval;
165 int portnum;
166
167 pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount);
168
169 for (portnum = 0; portnum < 16; portnum++) {
170 /* Make sure that Port-Writes are enabled (for all ports) */
171 rio_mport_read_config_32(mport, destid, hopcount,
172 TSI578_SP_MODE(portnum), &regval);
173 rio_mport_write_config_32(mport, destid, hopcount,
174 TSI578_SP_MODE(portnum),
175 regval & ~TSI578_SP_MODE_PW_DIS);
176
177 /* Clear all pending interrupts */
178 rio_mport_read_config_32(mport, destid, hopcount,
179 rdev->phys_efptr +
180 RIO_PORT_N_ERR_STS_CSR(portnum),
181 &regval);
182 rio_mport_write_config_32(mport, destid, hopcount,
183 rdev->phys_efptr +
184 RIO_PORT_N_ERR_STS_CSR(portnum),
185 regval & 0x07120214);
186
187 rio_mport_read_config_32(mport, destid, hopcount,
188 TSI578_SP_INT_STATUS(portnum), &regval);
189 rio_mport_write_config_32(mport, destid, hopcount,
190 TSI578_SP_INT_STATUS(portnum),
191 regval & 0x000700bd);
192
193 /* Enable all interrupts to allow ports to send a port-write */
194 rio_mport_read_config_32(mport, destid, hopcount,
195 TSI578_SP_CTL_INDEP(portnum), &regval);
196 rio_mport_write_config_32(mport, destid, hopcount,
197 TSI578_SP_CTL_INDEP(portnum),
198 regval | 0x000b0000);
199
200 /* Skip next (odd) port if the current port is in x4 mode */
201 rio_mport_read_config_32(mport, destid, hopcount,
202 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
203 &regval);
204 if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
205 portnum++;
206 }
207
208 return 0;
209}
210
211static int
212tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
213{
214 struct rio_mport *mport = rdev->net->hport;
215 u16 destid = rdev->rswitch->destid;
216 u8 hopcount = rdev->rswitch->hopcount;
217 u32 intstat, err_status;
218 int sendcount, checkcount;
219 u8 route_port;
220 u32 regval;
221
222 rio_mport_read_config_32(mport, destid, hopcount,
223 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
224 &err_status);
225
226 if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) &&
227 (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
228 RIO_PORT_N_ERR_STS_PW_INP_ES))) {
229 /* Remove any queued packets by locking/unlocking port */
230 rio_mport_read_config_32(mport, destid, hopcount,
231 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
232 &regval);
233 if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
234 rio_mport_write_config_32(mport, destid, hopcount,
235 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
236 regval | RIO_PORT_N_CTL_LOCKOUT);
237 udelay(50);
238 rio_mport_write_config_32(mport, destid, hopcount,
239 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
240 regval);
241 }
242
243 /* Read from link maintenance response register to clear
244 * valid bit
245 */
246 rio_mport_read_config_32(mport, destid, hopcount,
247 rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum),
248 &regval);
249
250 /* Send a Packet-Not-Accepted/Link-Request-Input-Status control
251 * symbol to recover from IES/OES
252 */
253 sendcount = 3;
254 while (sendcount) {
255 rio_mport_write_config_32(mport, destid, hopcount,
256 TSI578_SP_CS_TX(portnum), 0x40fc8000);
257 checkcount = 3;
258 while (checkcount--) {
259 udelay(50);
260 rio_mport_read_config_32(
261 mport, destid, hopcount,
262 rdev->phys_efptr +
263 RIO_PORT_N_MNT_RSP_CSR(portnum),
264 &regval);
265 if (regval & RIO_PORT_N_MNT_RSP_RVAL)
266 goto exit_es;
267 }
268
269 sendcount--;
270 }
271 }
272
273exit_es:
274 /* Clear implementation specific error status bits */
275 rio_mport_read_config_32(mport, destid, hopcount,
276 TSI578_SP_INT_STATUS(portnum), &intstat);
277 pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n",
278 destid, hopcount, portnum, intstat);
279
280 if (intstat & 0x10000) {
281 rio_mport_read_config_32(mport, destid, hopcount,
282 TSI578_SP_LUT_PEINF(portnum), &regval);
283 regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24);
284 route_port = rdev->rswitch->route_table[regval];
285 pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n",
286 rio_name(rdev), portnum, regval);
287 tsi57x_route_add_entry(mport, destid, hopcount,
288 RIO_GLOBAL_TABLE, regval, route_port);
289 }
290
291 rio_mport_write_config_32(mport, destid, hopcount,
292 TSI578_SP_INT_STATUS(portnum),
293 intstat & 0x000700bd);
294
295 return 0;
296}
297
298static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum)
299{
300 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
301 rdev->rswitch->add_entry = tsi57x_route_add_entry;
302 rdev->rswitch->get_entry = tsi57x_route_get_entry;
303 rdev->rswitch->clr_table = tsi57x_route_clr_table;
304 rdev->rswitch->set_domain = tsi57x_set_domain;
305 rdev->rswitch->get_domain = tsi57x_get_domain;
306 rdev->rswitch->em_init = tsi57x_em_init;
307 rdev->rswitch->em_handle = tsi57x_em_handler;
308
309 return 0;
310}
311
312DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI572, tsi57x_switch_init);
313DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI574, tsi57x_switch_init);
314DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI577, tsi57x_switch_init);
315DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI578, tsi57x_switch_init);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f1598324344c..10ba12c8c5e0 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -611,6 +611,13 @@ config RTC_DRV_AB3100
611 Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC 611 Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC
612 support. This chip contains a battery- and capacitor-backed RTC. 612 support. This chip contains a battery- and capacitor-backed RTC.
613 613
614config RTC_DRV_AB8500
615 tristate "ST-Ericsson AB8500 RTC"
616 depends on AB8500_CORE
617 help
618 Select this to enable the ST-Ericsson AB8500 power management IC RTC
619 support. This chip contains a battery- and capacitor-backed RTC.
620
614config RTC_DRV_NUC900 621config RTC_DRV_NUC900
615 tristate "NUC910/NUC920 RTC driver" 622 tristate "NUC910/NUC920 RTC driver"
616 depends on RTC_CLASS && ARCH_W90X900 623 depends on RTC_CLASS && ARCH_W90X900
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 245311a1348f..5adbba7cf89c 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -18,6 +18,7 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
18# Keep the list ordered. 18# Keep the list ordered.
19 19
20obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o 20obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
21obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
21obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o 22obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
22obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o 23obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
23obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o 24obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
new file mode 100644
index 000000000000..2fda03125e55
--- /dev/null
+++ b/drivers/rtc/rtc-ab8500.c
@@ -0,0 +1,363 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License terms: GNU General Public License (GPL) version 2
5 * Author: Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>
6 *
7 * RTC clock driver for the RTC part of the AB8500 Power management chip.
8 * Based on RTC clock driver for the AB3100 Analog Baseband Chip by
9 * Linus Walleij <linus.walleij@stericsson.com>
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/platform_device.h>
16#include <linux/rtc.h>
17#include <linux/mfd/ab8500.h>
18#include <linux/delay.h>
19
20#define AB8500_RTC_SOFF_STAT_REG 0x0F00
21#define AB8500_RTC_CC_CONF_REG 0x0F01
22#define AB8500_RTC_READ_REQ_REG 0x0F02
23#define AB8500_RTC_WATCH_TSECMID_REG 0x0F03
24#define AB8500_RTC_WATCH_TSECHI_REG 0x0F04
25#define AB8500_RTC_WATCH_TMIN_LOW_REG 0x0F05
26#define AB8500_RTC_WATCH_TMIN_MID_REG 0x0F06
27#define AB8500_RTC_WATCH_TMIN_HI_REG 0x0F07
28#define AB8500_RTC_ALRM_MIN_LOW_REG 0x0F08
29#define AB8500_RTC_ALRM_MIN_MID_REG 0x0F09
30#define AB8500_RTC_ALRM_MIN_HI_REG 0x0F0A
31#define AB8500_RTC_STAT_REG 0x0F0B
32#define AB8500_RTC_BKUP_CHG_REG 0x0F0C
33#define AB8500_RTC_FORCE_BKUP_REG 0x0F0D
34#define AB8500_RTC_CALIB_REG 0x0F0E
35#define AB8500_RTC_SWITCH_STAT_REG 0x0F0F
36#define AB8500_REV_REG 0x1080
37
38/* RtcReadRequest bits */
39#define RTC_READ_REQUEST 0x01
40#define RTC_WRITE_REQUEST 0x02
41
42/* RtcCtrl bits */
43#define RTC_ALARM_ENA 0x04
44#define RTC_STATUS_DATA 0x01
45
46#define COUNTS_PER_SEC (0xF000 / 60)
47#define AB8500_RTC_EPOCH 2000
48
49static const unsigned long ab8500_rtc_time_regs[] = {
50 AB8500_RTC_WATCH_TMIN_HI_REG, AB8500_RTC_WATCH_TMIN_MID_REG,
51 AB8500_RTC_WATCH_TMIN_LOW_REG, AB8500_RTC_WATCH_TSECHI_REG,
52 AB8500_RTC_WATCH_TSECMID_REG
53};
54
55static const unsigned long ab8500_rtc_alarm_regs[] = {
56 AB8500_RTC_ALRM_MIN_HI_REG, AB8500_RTC_ALRM_MIN_MID_REG,
57 AB8500_RTC_ALRM_MIN_LOW_REG
58};
59
60/* Calculate the seconds from 1970 to 01-01-2000 00:00:00 */
61static unsigned long get_elapsed_seconds(int year)
62{
63 unsigned long secs;
64 struct rtc_time tm = {
65 .tm_year = year - 1900,
66 .tm_mday = 1,
67 };
68
69 /*
70 * This function calculates secs from 1970 and not from
71 * 1900, even if we supply the offset from year 1900.
72 */
73 rtc_tm_to_time(&tm, &secs);
74 return secs;
75}
76
77static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
78{
79 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
80 unsigned long timeout = jiffies + HZ;
81 int retval, i;
82 unsigned long mins, secs;
83 unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
84
85 /* Request a data read */
86 retval = ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG,
87 RTC_READ_REQUEST);
88 if (retval < 0)
89 return retval;
90
91 /* Early AB8500 chips will not clear the rtc read request bit */
92 if (ab8500->revision == 0) {
93 msleep(1);
94 } else {
95 /* Wait for some cycles after enabling the rtc read in ab8500 */
96 while (time_before(jiffies, timeout)) {
97 retval = ab8500_read(ab8500, AB8500_RTC_READ_REQ_REG);
98 if (retval < 0)
99 return retval;
100
101 if (!(retval & RTC_READ_REQUEST))
102 break;
103
104 msleep(1);
105 }
106 }
107
108 /* Read the Watchtime registers */
109 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) {
110 retval = ab8500_read(ab8500, ab8500_rtc_time_regs[i]);
111 if (retval < 0)
112 return retval;
113 buf[i] = retval;
114 }
115
116 mins = (buf[0] << 16) | (buf[1] << 8) | buf[2];
117
118 secs = (buf[3] << 8) | buf[4];
119 secs = secs / COUNTS_PER_SEC;
120 secs = secs + (mins * 60);
121
122 /* Add back the initially subtracted number of seconds */
123 secs += get_elapsed_seconds(AB8500_RTC_EPOCH);
124
125 rtc_time_to_tm(secs, tm);
126 return rtc_valid_tm(tm);
127}
128
129static int ab8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
130{
131 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
132 int retval, i;
133 unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
134 unsigned long no_secs, no_mins, secs = 0;
135
136 if (tm->tm_year < (AB8500_RTC_EPOCH - 1900)) {
137 dev_dbg(dev, "year should be equal to or greater than %d\n",
138 AB8500_RTC_EPOCH);
139 return -EINVAL;
140 }
141
142 /* Get the number of seconds since 1970 */
143 rtc_tm_to_time(tm, &secs);
144
145 /*
146 * Convert it to the number of seconds since 01-01-2000 00:00:00, since
147 * we only have a small counter in the RTC.
148 */
149 secs -= get_elapsed_seconds(AB8500_RTC_EPOCH);
150
151 no_mins = secs / 60;
152
153 no_secs = secs % 60;
154 /* Make the seconds count as per the RTC resolution */
155 no_secs = no_secs * COUNTS_PER_SEC;
156
157 buf[4] = no_secs & 0xFF;
158 buf[3] = (no_secs >> 8) & 0xFF;
159
160 buf[2] = no_mins & 0xFF;
161 buf[1] = (no_mins >> 8) & 0xFF;
162 buf[0] = (no_mins >> 16) & 0xFF;
163
164 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) {
165 retval = ab8500_write(ab8500, ab8500_rtc_time_regs[i], buf[i]);
166 if (retval < 0)
167 return retval;
168 }
169
170 /* Request a data write */
171 return ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG, RTC_WRITE_REQUEST);
172}
173
174static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
175{
176 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
177 int retval, i;
178 int rtc_ctrl;
179 unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
180 unsigned long secs, mins;
181
182 /* Check if the alarm is enabled or not */
183 rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG);
184 if (rtc_ctrl < 0)
185 return rtc_ctrl;
186
187 if (rtc_ctrl & RTC_ALARM_ENA)
188 alarm->enabled = 1;
189 else
190 alarm->enabled = 0;
191
192 alarm->pending = 0;
193
194 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) {
195 retval = ab8500_read(ab8500, ab8500_rtc_alarm_regs[i]);
196 if (retval < 0)
197 return retval;
198 buf[i] = retval;
199 }
200
201 mins = (buf[0] << 16) | (buf[1] << 8) | (buf[2]);
202 secs = mins * 60;
203
204 /* Add back the initially subtracted number of seconds */
205 secs += get_elapsed_seconds(AB8500_RTC_EPOCH);
206
207 rtc_time_to_tm(secs, &alarm->time);
208
209 return rtc_valid_tm(&alarm->time);
210}
211
212static int ab8500_rtc_irq_enable(struct device *dev, unsigned int enabled)
213{
214 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
215
216 return ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_ALARM_ENA,
217 enabled ? RTC_ALARM_ENA : 0);
218}
219
220static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
221{
222 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
223 int retval, i;
224 unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
225 unsigned long mins, secs = 0;
226
227 if (alarm->time.tm_year < (AB8500_RTC_EPOCH - 1900)) {
228 dev_dbg(dev, "year should be equal to or greater than %d\n",
229 AB8500_RTC_EPOCH);
230 return -EINVAL;
231 }
232
233 /* Get the number of seconds since 1970 */
234 rtc_tm_to_time(&alarm->time, &secs);
235
236 /*
237 * Convert it to the number of seconds since 01-01-2000 00:00:00, since
238 * we only have a small counter in the RTC.
239 */
240 secs -= get_elapsed_seconds(AB8500_RTC_EPOCH);
241
242 mins = secs / 60;
243
244 buf[2] = mins & 0xFF;
245 buf[1] = (mins >> 8) & 0xFF;
246 buf[0] = (mins >> 16) & 0xFF;
247
248 /* Set the alarm time */
249 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) {
250 retval = ab8500_write(ab8500, ab8500_rtc_alarm_regs[i], buf[i]);
251 if (retval < 0)
252 return retval;
253 }
254
255 return ab8500_rtc_irq_enable(dev, alarm->enabled);
256}
257
258static irqreturn_t rtc_alarm_handler(int irq, void *data)
259{
260 struct rtc_device *rtc = data;
261 unsigned long events = RTC_IRQF | RTC_AF;
262
263 dev_dbg(&rtc->dev, "%s\n", __func__);
264 rtc_update_irq(rtc, 1, events);
265
266 return IRQ_HANDLED;
267}
268
269static const struct rtc_class_ops ab8500_rtc_ops = {
270 .read_time = ab8500_rtc_read_time,
271 .set_time = ab8500_rtc_set_time,
272 .read_alarm = ab8500_rtc_read_alarm,
273 .set_alarm = ab8500_rtc_set_alarm,
274 .alarm_irq_enable = ab8500_rtc_irq_enable,
275};
276
277static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
278{
279 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
280 int err;
281 struct rtc_device *rtc;
282 int rtc_ctrl;
283 int irq;
284
285 irq = platform_get_irq_byname(pdev, "ALARM");
286 if (irq < 0)
287 return irq;
288
289 /* For RTC supply test */
290 err = ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_STATUS_DATA,
291 RTC_STATUS_DATA);
292 if (err < 0)
293 return err;
294
295 /* Wait for reset by the PorRtc */
296 msleep(1);
297
298 rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG);
299 if (rtc_ctrl < 0)
300 return rtc_ctrl;
301
302 /* Check if the RTC Supply fails */
303 if (!(rtc_ctrl & RTC_STATUS_DATA)) {
304 dev_err(&pdev->dev, "RTC supply failure\n");
305 return -ENODEV;
306 }
307
308 rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab8500_rtc_ops,
309 THIS_MODULE);
310 if (IS_ERR(rtc)) {
311 dev_err(&pdev->dev, "Registration failed\n");
312 err = PTR_ERR(rtc);
313 return err;
314 }
315
316 err = request_threaded_irq(irq, NULL, rtc_alarm_handler, 0,
317 "ab8500-rtc", rtc);
318 if (err < 0) {
319 rtc_device_unregister(rtc);
320 return err;
321 }
322
323 platform_set_drvdata(pdev, rtc);
324
325 return 0;
326}
327
328static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
329{
330 struct rtc_device *rtc = platform_get_drvdata(pdev);
331 int irq = platform_get_irq_byname(pdev, "ALARM");
332
333 free_irq(irq, rtc);
334 rtc_device_unregister(rtc);
335 platform_set_drvdata(pdev, NULL);
336
337 return 0;
338}
339
340static struct platform_driver ab8500_rtc_driver = {
341 .driver = {
342 .name = "ab8500-rtc",
343 .owner = THIS_MODULE,
344 },
345 .probe = ab8500_rtc_probe,
346 .remove = __devexit_p(ab8500_rtc_remove),
347};
348
349static int __init ab8500_rtc_init(void)
350{
351 return platform_driver_register(&ab8500_rtc_driver);
352}
353
354static void __exit ab8500_rtc_exit(void)
355{
356 platform_driver_unregister(&ab8500_rtc_driver);
357}
358
359module_init(ab8500_rtc_init);
360module_exit(ab8500_rtc_exit);
361MODULE_AUTHOR("Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>");
362MODULE_DESCRIPTION("AB8500 RTC Driver");
363MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 038095d99976..6dc4e6241418 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -595,10 +595,6 @@ static void wdt_disable(void)
595static ssize_t wdt_write(struct file *file, const char __user *buf, 595static ssize_t wdt_write(struct file *file, const char __user *buf,
596 size_t count, loff_t *ppos) 596 size_t count, loff_t *ppos)
597{ 597{
598 /* Can't seek (pwrite) on this device
599 if (ppos != &file->f_pos)
600 return -ESPIPE;
601 */
602 if (count) { 598 if (count) {
603 wdt_ping(); 599 wdt_ping();
604 return 1; 600 return 1;
@@ -707,7 +703,7 @@ static int wdt_open(struct inode *inode, struct file *file)
707 */ 703 */
708 wdt_is_open = 1; 704 wdt_is_open = 1;
709 unlock_kernel(); 705 unlock_kernel();
710 return 0; 706 return nonseekable_open(inode, file);
711 } 707 }
712 return -ENODEV; 708 return -ENODEV;
713} 709}
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 8dbf1c3afb7b..d64b7178fa08 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -3587,7 +3587,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
3587 if (i == (-ENOSPC)) { 3587 if (i == (-ENOSPC)) {
3588 transfer = STp->buffer->writing; /* FIXME -- check this logic */ 3588 transfer = STp->buffer->writing; /* FIXME -- check this logic */
3589 if (transfer <= do_count) { 3589 if (transfer <= do_count) {
3590 filp->f_pos += do_count - transfer; 3590 *ppos += do_count - transfer;
3591 count -= do_count - transfer; 3591 count -= do_count - transfer;
3592 if (STps->drv_block >= 0) { 3592 if (STps->drv_block >= 0) {
3593 STps->drv_block += (do_count - transfer) / STp->block_size; 3593 STps->drv_block += (do_count - transfer) / STp->block_size;
@@ -3625,7 +3625,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
3625 goto out; 3625 goto out;
3626 } 3626 }
3627 3627
3628 filp->f_pos += do_count; 3628 *ppos += do_count;
3629 b_point += do_count; 3629 b_point += do_count;
3630 count -= do_count; 3630 count -= do_count;
3631 if (STps->drv_block >= 0) { 3631 if (STps->drv_block >= 0) {
@@ -3647,7 +3647,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
3647 if (STps->drv_block >= 0) { 3647 if (STps->drv_block >= 0) {
3648 STps->drv_block += blks; 3648 STps->drv_block += blks;
3649 } 3649 }
3650 filp->f_pos += count; 3650 *ppos += count;
3651 count = 0; 3651 count = 0;
3652 } 3652 }
3653 3653
@@ -3823,7 +3823,7 @@ static ssize_t osst_read(struct file * filp, char __user * buf, size_t count, lo
3823 } 3823 }
3824 STp->logical_blk_num += transfer / STp->block_size; 3824 STp->logical_blk_num += transfer / STp->block_size;
3825 STps->drv_block += transfer / STp->block_size; 3825 STps->drv_block += transfer / STp->block_size;
3826 filp->f_pos += transfer; 3826 *ppos += transfer;
3827 buf += transfer; 3827 buf += transfer;
3828 total += transfer; 3828 total += transfer;
3829 } 3829 }
@@ -5626,6 +5626,7 @@ static const struct file_operations osst_fops = {
5626 .open = os_scsi_tape_open, 5626 .open = os_scsi_tape_open,
5627 .flush = os_scsi_tape_flush, 5627 .flush = os_scsi_tape_flush,
5628 .release = os_scsi_tape_close, 5628 .release = os_scsi_tape_close,
5629 .llseek = noop_llseek,
5629}; 5630};
5630 5631
5631static int osst_supports(struct scsi_device * SDp) 5632static int osst_supports(struct scsi_device * SDp)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 3ea1a713ef25..24211d0efa6d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3962,6 +3962,7 @@ static const struct file_operations st_fops =
3962 .open = st_open, 3962 .open = st_open,
3963 .flush = st_flush, 3963 .flush = st_flush,
3964 .release = st_release, 3964 .release = st_release,
3965 .llseek = noop_llseek,
3965}; 3966};
3966 3967
3967static int st_probe(struct device *dev) 3968static int st_probe(struct device *dev)
diff --git a/drivers/staging/go7007/saa7134-go7007.c b/drivers/staging/go7007/saa7134-go7007.c
index 49f0d31c118a..cf7c34a99459 100644
--- a/drivers/staging/go7007/saa7134-go7007.c
+++ b/drivers/staging/go7007/saa7134-go7007.c
@@ -242,13 +242,13 @@ static void saa7134_go7007_irq_ts_done(struct saa7134_dev *dev,
242 printk(KERN_DEBUG "saa7134-go7007: irq: lost %ld\n", 242 printk(KERN_DEBUG "saa7134-go7007: irq: lost %ld\n",
243 (status >> 16) & 0x0f); 243 (status >> 16) & 0x0f);
244 if (status & 0x100000) { 244 if (status & 0x100000) {
245 dma_sync_single(&dev->pci->dev, 245 dma_sync_single_for_cpu(&dev->pci->dev,
246 saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE); 246 saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE);
247 go7007_parse_video_stream(go, saa->bottom, PAGE_SIZE); 247 go7007_parse_video_stream(go, saa->bottom, PAGE_SIZE);
248 saa_writel(SAA7134_RS_BA2(5), cpu_to_le32(saa->bottom_dma)); 248 saa_writel(SAA7134_RS_BA2(5), cpu_to_le32(saa->bottom_dma));
249 } else { 249 } else {
250 dma_sync_single(&dev->pci->dev, 250 dma_sync_single_for_cpu(&dev->pci->dev,
251 saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE); 251 saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE);
252 go7007_parse_video_stream(go, saa->top, PAGE_SIZE); 252 go7007_parse_video_stream(go, saa->top, PAGE_SIZE);
253 saa_writel(SAA7134_RS_BA1(5), cpu_to_le32(saa->top_dma)); 253 saa_writel(SAA7134_RS_BA1(5), cpu_to_le32(saa->top_dma));
254 } 254 }
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index e89304c72568..b53deee25d74 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -5879,20 +5879,13 @@ out:
5879static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp) 5879static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp)
5880{ 5880{
5881 IXJ_FILTER_CADENCE *lcp; 5881 IXJ_FILTER_CADENCE *lcp;
5882 lcp = kmalloc(sizeof(IXJ_FILTER_CADENCE), GFP_KERNEL); 5882 lcp = memdup_user(cp, sizeof(IXJ_FILTER_CADENCE));
5883 if (lcp == NULL) { 5883 if (IS_ERR(lcp)) {
5884 if(ixjdebug & 0x0001) { 5884 if(ixjdebug & 0x0001) {
5885 printk(KERN_INFO "Could not allocate memory for cadence\n"); 5885 printk(KERN_INFO "Could not allocate memory for cadence or could not copy cadence to kernel\n");
5886 } 5886 }
5887 return -ENOMEM; 5887 return PTR_ERR(lcp);
5888 } 5888 }
5889 if (copy_from_user(lcp, cp, sizeof(IXJ_FILTER_CADENCE))) {
5890 if(ixjdebug & 0x0001) {
5891 printk(KERN_INFO "Could not copy cadence to kernel\n");
5892 }
5893 kfree(lcp);
5894 return -EFAULT;
5895 }
5896 if (lcp->filter > 5) { 5889 if (lcp->filter > 5) {
5897 if(ixjdebug & 0x0001) { 5890 if(ixjdebug & 0x0001) {
5898 printk(KERN_INFO "Cadence out of range\n"); 5891 printk(KERN_INFO "Cadence out of range\n");
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 23b2a8c0dbfc..b020ba7f1cf2 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -501,7 +501,9 @@ static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id)
501 501
502static int __devinit bfin_bf54x_probe(struct platform_device *pdev) 502static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
503{ 503{
504#ifndef NO_BL_SUPPORT
504 struct backlight_properties props; 505 struct backlight_properties props;
506#endif
505 struct bfin_bf54xfb_info *info; 507 struct bfin_bf54xfb_info *info;
506 struct fb_info *fbinfo; 508 struct fb_info *fbinfo;
507 int ret; 509 int ret;
@@ -654,7 +656,8 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
654 printk(KERN_ERR DRIVER_NAME 656 printk(KERN_ERR DRIVER_NAME
655 ": unable to register backlight.\n"); 657 ": unable to register backlight.\n");
656 ret = -EINVAL; 658 ret = -EINVAL;
657 goto out9; 659 unregister_framebuffer(fbinfo);
660 goto out8;
658 } 661 }
659 662
660 lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops); 663 lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops);
@@ -663,8 +666,6 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
663 666
664 return 0; 667 return 0;
665 668
666out9:
667 unregister_framebuffer(fbinfo);
668out8: 669out8:
669 free_irq(info->irq, info); 670 free_irq(info->irq, info);
670out7: 671out7:
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index c2ec3dcd4e91..7a50272eaab9 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -420,7 +420,9 @@ static irqreturn_t bfin_t350mcqb_irq_error(int irq, void *dev_id)
420 420
421static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) 421static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
422{ 422{
423#ifndef NO_BL_SUPPORT
423 struct backlight_properties props; 424 struct backlight_properties props;
425#endif
424 struct bfin_t350mcqbfb_info *info; 426 struct bfin_t350mcqbfb_info *info;
425 struct fb_info *fbinfo; 427 struct fb_info *fbinfo;
426 int ret; 428 int ret;
@@ -550,7 +552,8 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
550 printk(KERN_ERR DRIVER_NAME 552 printk(KERN_ERR DRIVER_NAME
551 ": unable to register backlight.\n"); 553 ": unable to register backlight.\n");
552 ret = -EINVAL; 554 ret = -EINVAL;
553 goto out9; 555 unregister_framebuffer(fbinfo);
556 goto out8;
554 } 557 }
555 558
556 lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops); 559 lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops);
@@ -559,8 +562,6 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
559 562
560 return 0; 563 return 0;
561 564
562out9:
563 unregister_framebuffer(fbinfo);
564out8: 565out8:
565 free_irq(info->irq, info); 566 free_irq(info->irq, info);
566out7: 567out7:
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index d4471b4c0374..dce8c97b4333 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -71,7 +71,8 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
71 "S3 Trio64UV+", "S3 Trio64V2/DX", "S3 Trio64V2/GX", 71 "S3 Trio64UV+", "S3 Trio64V2/DX", "S3 Trio64V2/GX",
72 "S3 Plato/PX", "S3 Aurora64VP", "S3 Virge", 72 "S3 Plato/PX", "S3 Aurora64VP", "S3 Virge",
73 "S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX", 73 "S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX",
74 "S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P"}; 74 "S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P",
75 "S3 Trio3D/1X", "S3 Trio3D/2X", "S3 Trio3D/2X"};
75 76
76#define CHIP_UNKNOWN 0x00 77#define CHIP_UNKNOWN 0x00
77#define CHIP_732_TRIO32 0x01 78#define CHIP_732_TRIO32 0x01
@@ -89,10 +90,14 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
89#define CHIP_356_VIRGE_GX2 0x0D 90#define CHIP_356_VIRGE_GX2 0x0D
90#define CHIP_357_VIRGE_GX2P 0x0E 91#define CHIP_357_VIRGE_GX2P 0x0E
91#define CHIP_359_VIRGE_GX2P 0x0F 92#define CHIP_359_VIRGE_GX2P 0x0F
93#define CHIP_360_TRIO3D_1X 0x10
94#define CHIP_362_TRIO3D_2X 0x11
95#define CHIP_368_TRIO3D_2X 0x12
92 96
93#define CHIP_XXX_TRIO 0x80 97#define CHIP_XXX_TRIO 0x80
94#define CHIP_XXX_TRIO64V2_DXGX 0x81 98#define CHIP_XXX_TRIO64V2_DXGX 0x81
95#define CHIP_XXX_VIRGE_DXGX 0x82 99#define CHIP_XXX_VIRGE_DXGX 0x82
100#define CHIP_36X_TRIO3D_1X_2X 0x83
96 101
97#define CHIP_UNDECIDED_FLAG 0x80 102#define CHIP_UNDECIDED_FLAG 0x80
98#define CHIP_MASK 0xFF 103#define CHIP_MASK 0xFF
@@ -324,6 +329,7 @@ static void s3fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
324 329
325static void s3_set_pixclock(struct fb_info *info, u32 pixclock) 330static void s3_set_pixclock(struct fb_info *info, u32 pixclock)
326{ 331{
332 struct s3fb_info *par = info->par;
327 u16 m, n, r; 333 u16 m, n, r;
328 u8 regval; 334 u8 regval;
329 int rv; 335 int rv;
@@ -339,7 +345,13 @@ static void s3_set_pixclock(struct fb_info *info, u32 pixclock)
339 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD); 345 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
340 346
341 /* Set S3 clock registers */ 347 /* Set S3 clock registers */
342 vga_wseq(NULL, 0x12, ((n - 2) | (r << 5))); 348 if (par->chip == CHIP_360_TRIO3D_1X ||
349 par->chip == CHIP_362_TRIO3D_2X ||
350 par->chip == CHIP_368_TRIO3D_2X) {
351 vga_wseq(NULL, 0x12, (n - 2) | ((r & 3) << 6)); /* n and two bits of r */
352 vga_wseq(NULL, 0x29, r >> 2); /* remaining highest bit of r */
353 } else
354 vga_wseq(NULL, 0x12, (n - 2) | (r << 5));
343 vga_wseq(NULL, 0x13, m - 2); 355 vga_wseq(NULL, 0x13, m - 2);
344 356
345 udelay(1000); 357 udelay(1000);
@@ -456,7 +468,7 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
456static int s3fb_set_par(struct fb_info *info) 468static int s3fb_set_par(struct fb_info *info)
457{ 469{
458 struct s3fb_info *par = info->par; 470 struct s3fb_info *par = info->par;
459 u32 value, mode, hmul, offset_value, screen_size, multiplex; 471 u32 value, mode, hmul, offset_value, screen_size, multiplex, dbytes;
460 u32 bpp = info->var.bits_per_pixel; 472 u32 bpp = info->var.bits_per_pixel;
461 473
462 if (bpp != 0) { 474 if (bpp != 0) {
@@ -518,7 +530,7 @@ static int s3fb_set_par(struct fb_info *info)
518 svga_wcrt_mask(0x33, 0x00, 0x08); /* no DDR ? */ 530 svga_wcrt_mask(0x33, 0x00, 0x08); /* no DDR ? */
519 svga_wcrt_mask(0x43, 0x00, 0x01); /* no DDR ? */ 531 svga_wcrt_mask(0x43, 0x00, 0x01); /* no DDR ? */
520 532
521 svga_wcrt_mask(0x5D, 0x00, 0x28); // Clear strange HSlen bits 533 svga_wcrt_mask(0x5D, 0x00, 0x28); /* Clear strange HSlen bits */
522 534
523/* svga_wcrt_mask(0x58, 0x03, 0x03); */ 535/* svga_wcrt_mask(0x58, 0x03, 0x03); */
524 536
@@ -530,10 +542,14 @@ static int s3fb_set_par(struct fb_info *info)
530 pr_debug("fb%d: offset register : %d\n", info->node, offset_value); 542 pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
531 svga_wcrt_multi(s3_offset_regs, offset_value); 543 svga_wcrt_multi(s3_offset_regs, offset_value);
532 544
533 vga_wcrt(NULL, 0x54, 0x18); /* M parameter */ 545 if (par->chip != CHIP_360_TRIO3D_1X &&
534 vga_wcrt(NULL, 0x60, 0xff); /* N parameter */ 546 par->chip != CHIP_362_TRIO3D_2X &&
535 vga_wcrt(NULL, 0x61, 0xff); /* L parameter */ 547 par->chip != CHIP_368_TRIO3D_2X) {
536 vga_wcrt(NULL, 0x62, 0xff); /* L parameter */ 548 vga_wcrt(NULL, 0x54, 0x18); /* M parameter */
549 vga_wcrt(NULL, 0x60, 0xff); /* N parameter */
550 vga_wcrt(NULL, 0x61, 0xff); /* L parameter */
551 vga_wcrt(NULL, 0x62, 0xff); /* L parameter */
552 }
537 553
538 vga_wcrt(NULL, 0x3A, 0x35); 554 vga_wcrt(NULL, 0x3A, 0x35);
539 svga_wattr(0x33, 0x00); 555 svga_wattr(0x33, 0x00);
@@ -570,6 +586,16 @@ static int s3fb_set_par(struct fb_info *info)
570 vga_wcrt(NULL, 0x66, 0x90); 586 vga_wcrt(NULL, 0x66, 0x90);
571 } 587 }
572 588
589 if (par->chip == CHIP_360_TRIO3D_1X ||
590 par->chip == CHIP_362_TRIO3D_2X ||
591 par->chip == CHIP_368_TRIO3D_2X) {
592 dbytes = info->var.xres * ((bpp+7)/8);
593 vga_wcrt(NULL, 0x91, (dbytes + 7) / 8);
594 vga_wcrt(NULL, 0x90, (((dbytes + 7) / 8) >> 8) | 0x80);
595
596 vga_wcrt(NULL, 0x66, 0x81);
597 }
598
573 svga_wcrt_mask(0x31, 0x00, 0x40); 599 svga_wcrt_mask(0x31, 0x00, 0x40);
574 multiplex = 0; 600 multiplex = 0;
575 hmul = 1; 601 hmul = 1;
@@ -615,11 +641,13 @@ static int s3fb_set_par(struct fb_info *info)
615 break; 641 break;
616 case 3: 642 case 3:
617 pr_debug("fb%d: 8 bit pseudocolor\n", info->node); 643 pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
618 if (info->var.pixclock > 20000) { 644 svga_wcrt_mask(0x50, 0x00, 0x30);
619 svga_wcrt_mask(0x50, 0x00, 0x30); 645 if (info->var.pixclock > 20000 ||
646 par->chip == CHIP_360_TRIO3D_1X ||
647 par->chip == CHIP_362_TRIO3D_2X ||
648 par->chip == CHIP_368_TRIO3D_2X)
620 svga_wcrt_mask(0x67, 0x00, 0xF0); 649 svga_wcrt_mask(0x67, 0x00, 0xF0);
621 } else { 650 else {
622 svga_wcrt_mask(0x50, 0x00, 0x30);
623 svga_wcrt_mask(0x67, 0x10, 0xF0); 651 svga_wcrt_mask(0x67, 0x10, 0xF0);
624 multiplex = 1; 652 multiplex = 1;
625 } 653 }
@@ -634,7 +662,10 @@ static int s3fb_set_par(struct fb_info *info)
634 } else { 662 } else {
635 svga_wcrt_mask(0x50, 0x10, 0x30); 663 svga_wcrt_mask(0x50, 0x10, 0x30);
636 svga_wcrt_mask(0x67, 0x30, 0xF0); 664 svga_wcrt_mask(0x67, 0x30, 0xF0);
637 hmul = 2; 665 if (par->chip != CHIP_360_TRIO3D_1X &&
666 par->chip != CHIP_362_TRIO3D_2X &&
667 par->chip != CHIP_368_TRIO3D_2X)
668 hmul = 2;
638 } 669 }
639 break; 670 break;
640 case 5: 671 case 5:
@@ -647,7 +678,10 @@ static int s3fb_set_par(struct fb_info *info)
647 } else { 678 } else {
648 svga_wcrt_mask(0x50, 0x10, 0x30); 679 svga_wcrt_mask(0x50, 0x10, 0x30);
649 svga_wcrt_mask(0x67, 0x50, 0xF0); 680 svga_wcrt_mask(0x67, 0x50, 0xF0);
650 hmul = 2; 681 if (par->chip != CHIP_360_TRIO3D_1X &&
682 par->chip != CHIP_362_TRIO3D_2X &&
683 par->chip != CHIP_368_TRIO3D_2X)
684 hmul = 2;
651 } 685 }
652 break; 686 break;
653 case 6: 687 case 6:
@@ -866,6 +900,17 @@ static int __devinit s3_identification(int chip)
866 return CHIP_385_VIRGE_GX; 900 return CHIP_385_VIRGE_GX;
867 } 901 }
868 902
903 if (chip == CHIP_36X_TRIO3D_1X_2X) {
904 switch (vga_rcrt(NULL, 0x2f)) {
905 case 0x00:
906 return CHIP_360_TRIO3D_1X;
907 case 0x01:
908 return CHIP_362_TRIO3D_2X;
909 case 0x02:
910 return CHIP_368_TRIO3D_2X;
911 }
912 }
913
869 return CHIP_UNKNOWN; 914 return CHIP_UNKNOWN;
870} 915}
871 916
@@ -930,17 +975,32 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
930 vga_wcrt(NULL, 0x38, 0x48); 975 vga_wcrt(NULL, 0x38, 0x48);
931 vga_wcrt(NULL, 0x39, 0xA5); 976 vga_wcrt(NULL, 0x39, 0xA5);
932 977
933 /* Find how many physical memory there is on card */ 978 /* Identify chip type */
934 /* 0x36 register is accessible even if other registers are locked */
935 regval = vga_rcrt(NULL, 0x36);
936 info->screen_size = s3_memsizes[regval >> 5] << 10;
937 info->fix.smem_len = info->screen_size;
938
939 par->chip = id->driver_data & CHIP_MASK; 979 par->chip = id->driver_data & CHIP_MASK;
940 par->rev = vga_rcrt(NULL, 0x2f); 980 par->rev = vga_rcrt(NULL, 0x2f);
941 if (par->chip & CHIP_UNDECIDED_FLAG) 981 if (par->chip & CHIP_UNDECIDED_FLAG)
942 par->chip = s3_identification(par->chip); 982 par->chip = s3_identification(par->chip);
943 983
984 /* Find how many physical memory there is on card */
985 /* 0x36 register is accessible even if other registers are locked */
986 regval = vga_rcrt(NULL, 0x36);
987 if (par->chip == CHIP_360_TRIO3D_1X ||
988 par->chip == CHIP_362_TRIO3D_2X ||
989 par->chip == CHIP_368_TRIO3D_2X) {
990 switch ((regval & 0xE0) >> 5) {
991 case 0: /* 8MB -- only 4MB usable for display */
992 case 1: /* 4MB with 32-bit bus */
993 case 2: /* 4MB */
994 info->screen_size = 4 << 20;
995 break;
996 case 6: /* 2MB */
997 info->screen_size = 2 << 20;
998 break;
999 }
1000 } else
1001 info->screen_size = s3_memsizes[regval >> 5] << 10;
1002 info->fix.smem_len = info->screen_size;
1003
944 /* Find MCLK frequency */ 1004 /* Find MCLK frequency */
945 regval = vga_rseq(NULL, 0x10); 1005 regval = vga_rseq(NULL, 0x10);
946 par->mclk_freq = ((vga_rseq(NULL, 0x11) + 2) * 14318) / ((regval & 0x1F) + 2); 1006 par->mclk_freq = ((vga_rseq(NULL, 0x11) + 2) * 14318) / ((regval & 0x1F) + 2);
@@ -1131,6 +1191,7 @@ static struct pci_device_id s3_devices[] __devinitdata = {
1131 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A10), .driver_data = CHIP_356_VIRGE_GX2}, 1191 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A10), .driver_data = CHIP_356_VIRGE_GX2},
1132 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A11), .driver_data = CHIP_357_VIRGE_GX2P}, 1192 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A11), .driver_data = CHIP_357_VIRGE_GX2P},
1133 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P}, 1193 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P},
1194 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A13), .driver_data = CHIP_36X_TRIO3D_1X_2X},
1134 1195
1135 {0, 0, 0, 0, 0, 0, 0} 1196 {0, 0, 0, 0, 0, 0, 0}
1136}; 1197};
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 2bc40e682f95..1082541358f0 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -578,14 +578,9 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
578 break; 578 break;
579 579
580 case VIAFB_SET_GAMMA_LUT: 580 case VIAFB_SET_GAMMA_LUT:
581 viafb_gamma_table = kmalloc(256 * sizeof(u32), GFP_KERNEL); 581 viafb_gamma_table = memdup_user(argp, 256 * sizeof(u32));
582 if (!viafb_gamma_table) 582 if (IS_ERR(viafb_gamma_table))
583 return -ENOMEM; 583 return PTR_ERR(viafb_gamma_table);
584 if (copy_from_user(viafb_gamma_table, argp,
585 256 * sizeof(u32))) {
586 kfree(viafb_gamma_table);
587 return -EFAULT;
588 }
589 viafb_set_gamma_table(viafb_bpp, viafb_gamma_table); 584 viafb_set_gamma_table(viafb_bpp, viafb_gamma_table);
590 kfree(viafb_gamma_table); 585 kfree(viafb_gamma_table);
591 break; 586 break;
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index d70bbbac6b7b..914d1c0bc07a 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -224,7 +224,7 @@ affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
224 affs_brelse(bh); 224 affs_brelse(bh);
225 inode = affs_iget(sb, ino); 225 inode = affs_iget(sb, ino);
226 if (IS_ERR(inode)) 226 if (IS_ERR(inode))
227 return ERR_PTR(PTR_ERR(inode)); 227 return ERR_CAST(inode);
228 } 228 }
229 dentry->d_op = AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations; 229 dentry->d_op = AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations;
230 d_add(dentry, inode); 230 d_add(dentry, inode);
diff --git a/fs/aio.c b/fs/aio.c
index 1cf12b3dd83a..48fdeebdb544 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -36,6 +36,7 @@
36#include <linux/blkdev.h> 36#include <linux/blkdev.h>
37#include <linux/mempool.h> 37#include <linux/mempool.h>
38#include <linux/hash.h> 38#include <linux/hash.h>
39#include <linux/compat.h>
39 40
40#include <asm/kmap_types.h> 41#include <asm/kmap_types.h>
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
@@ -1384,13 +1385,22 @@ static ssize_t aio_fsync(struct kiocb *iocb)
1384 return ret; 1385 return ret;
1385} 1386}
1386 1387
1387static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb) 1388static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
1388{ 1389{
1389 ssize_t ret; 1390 ssize_t ret;
1390 1391
1391 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf, 1392#ifdef CONFIG_COMPAT
1392 kiocb->ki_nbytes, 1, 1393 if (compat)
1393 &kiocb->ki_inline_vec, &kiocb->ki_iovec); 1394 ret = compat_rw_copy_check_uvector(type,
1395 (struct compat_iovec __user *)kiocb->ki_buf,
1396 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1397 &kiocb->ki_iovec);
1398 else
1399#endif
1400 ret = rw_copy_check_uvector(type,
1401 (struct iovec __user *)kiocb->ki_buf,
1402 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1403 &kiocb->ki_iovec);
1394 if (ret < 0) 1404 if (ret < 0)
1395 goto out; 1405 goto out;
1396 1406
@@ -1420,7 +1430,7 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
1420 * Performs the initial checks and aio retry method 1430 * Performs the initial checks and aio retry method
1421 * setup for the kiocb at the time of io submission. 1431 * setup for the kiocb at the time of io submission.
1422 */ 1432 */
1423static ssize_t aio_setup_iocb(struct kiocb *kiocb) 1433static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
1424{ 1434{
1425 struct file *file = kiocb->ki_filp; 1435 struct file *file = kiocb->ki_filp;
1426 ssize_t ret = 0; 1436 ssize_t ret = 0;
@@ -1469,7 +1479,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
1469 ret = security_file_permission(file, MAY_READ); 1479 ret = security_file_permission(file, MAY_READ);
1470 if (unlikely(ret)) 1480 if (unlikely(ret))
1471 break; 1481 break;
1472 ret = aio_setup_vectored_rw(READ, kiocb); 1482 ret = aio_setup_vectored_rw(READ, kiocb, compat);
1473 if (ret) 1483 if (ret)
1474 break; 1484 break;
1475 ret = -EINVAL; 1485 ret = -EINVAL;
@@ -1483,7 +1493,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
1483 ret = security_file_permission(file, MAY_WRITE); 1493 ret = security_file_permission(file, MAY_WRITE);
1484 if (unlikely(ret)) 1494 if (unlikely(ret))
1485 break; 1495 break;
1486 ret = aio_setup_vectored_rw(WRITE, kiocb); 1496 ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
1487 if (ret) 1497 if (ret)
1488 break; 1498 break;
1489 ret = -EINVAL; 1499 ret = -EINVAL;
@@ -1548,7 +1558,8 @@ static void aio_batch_free(struct hlist_head *batch_hash)
1548} 1558}
1549 1559
1550static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1560static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1551 struct iocb *iocb, struct hlist_head *batch_hash) 1561 struct iocb *iocb, struct hlist_head *batch_hash,
1562 bool compat)
1552{ 1563{
1553 struct kiocb *req; 1564 struct kiocb *req;
1554 struct file *file; 1565 struct file *file;
@@ -1609,7 +1620,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1609 req->ki_left = req->ki_nbytes = iocb->aio_nbytes; 1620 req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
1610 req->ki_opcode = iocb->aio_lio_opcode; 1621 req->ki_opcode = iocb->aio_lio_opcode;
1611 1622
1612 ret = aio_setup_iocb(req); 1623 ret = aio_setup_iocb(req, compat);
1613 1624
1614 if (ret) 1625 if (ret)
1615 goto out_put_req; 1626 goto out_put_req;
@@ -1637,20 +1648,8 @@ out_put_req:
1637 return ret; 1648 return ret;
1638} 1649}
1639 1650
1640/* sys_io_submit: 1651long do_io_submit(aio_context_t ctx_id, long nr,
1641 * Queue the nr iocbs pointed to by iocbpp for processing. Returns 1652 struct iocb __user *__user *iocbpp, bool compat)
1642 * the number of iocbs queued. May return -EINVAL if the aio_context
1643 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
1644 * *iocbpp[0] is not properly initialized, if the operation specified
1645 * is invalid for the file descriptor in the iocb. May fail with
1646 * -EFAULT if any of the data structures point to invalid data. May
1647 * fail with -EBADF if the file descriptor specified in the first
1648 * iocb is invalid. May fail with -EAGAIN if insufficient resources
1649 * are available to queue any iocbs. Will return 0 if nr is 0. Will
1650 * fail with -ENOSYS if not implemented.
1651 */
1652SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1653 struct iocb __user * __user *, iocbpp)
1654{ 1653{
1655 struct kioctx *ctx; 1654 struct kioctx *ctx;
1656 long ret = 0; 1655 long ret = 0;
@@ -1687,7 +1686,7 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1687 break; 1686 break;
1688 } 1687 }
1689 1688
1690 ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash); 1689 ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat);
1691 if (ret) 1690 if (ret)
1692 break; 1691 break;
1693 } 1692 }
@@ -1697,6 +1696,24 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1697 return i ? i : ret; 1696 return i ? i : ret;
1698} 1697}
1699 1698
1699/* sys_io_submit:
1700 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
1701 * the number of iocbs queued. May return -EINVAL if the aio_context
1702 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
1703 * *iocbpp[0] is not properly initialized, if the operation specified
1704 * is invalid for the file descriptor in the iocb. May fail with
1705 * -EFAULT if any of the data structures point to invalid data. May
1706 * fail with -EBADF if the file descriptor specified in the first
1707 * iocb is invalid. May fail with -EAGAIN if insufficient resources
1708 * are available to queue any iocbs. Will return 0 if nr is 0. Will
1709 * fail with -ENOSYS if not implemented.
1710 */
1711SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1712 struct iocb __user * __user *, iocbpp)
1713{
1714 return do_io_submit(ctx_id, nr, iocbpp, 0);
1715}
1716
1700/* lookup_kiocb 1717/* lookup_kiocb
1701 * Finds a given iocb for cancellation. 1718 * Finds a given iocb for cancellation.
1702 */ 1719 */
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 8713c7cfbc79..9a0520b50663 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -28,6 +28,7 @@ static int autofs_root_mkdir(struct inode *,struct dentry *,int);
28static int autofs_root_ioctl(struct inode *, struct file *,unsigned int,unsigned long); 28static int autofs_root_ioctl(struct inode *, struct file *,unsigned int,unsigned long);
29 29
30const struct file_operations autofs_root_operations = { 30const struct file_operations autofs_root_operations = {
31 .llseek = generic_file_llseek,
31 .read = generic_read_dir, 32 .read = generic_read_dir,
32 .readdir = autofs_root_readdir, 33 .readdir = autofs_root_readdir,
33 .ioctl = autofs_root_ioctl, 34 .ioctl = autofs_root_ioctl,
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index d832062869f6..ba4a38b9c22f 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param)
95 */ 95 */
96static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in) 96static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
97{ 97{
98 struct autofs_dev_ioctl tmp, *ads; 98 struct autofs_dev_ioctl tmp;
99 99
100 if (copy_from_user(&tmp, in, sizeof(tmp))) 100 if (copy_from_user(&tmp, in, sizeof(tmp)))
101 return ERR_PTR(-EFAULT); 101 return ERR_PTR(-EFAULT);
@@ -103,16 +103,7 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i
103 if (tmp.size < sizeof(tmp)) 103 if (tmp.size < sizeof(tmp))
104 return ERR_PTR(-EINVAL); 104 return ERR_PTR(-EINVAL);
105 105
106 ads = kmalloc(tmp.size, GFP_KERNEL); 106 return memdup_user(in, tmp.size);
107 if (!ads)
108 return ERR_PTR(-ENOMEM);
109
110 if (copy_from_user(ads, in, tmp.size)) {
111 kfree(ads);
112 return ERR_PTR(-EFAULT);
113 }
114
115 return ads;
116} 107}
117 108
118static inline void free_dev_ioctl(struct autofs_dev_ioctl *param) 109static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
diff --git a/fs/compat.c b/fs/compat.c
index 05448730f840..f0b391c50552 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -568,6 +568,79 @@ out:
568 return ret; 568 return ret;
569} 569}
570 570
571/* A write operation does a read from user space and vice versa */
572#define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
573
574ssize_t compat_rw_copy_check_uvector(int type,
575 const struct compat_iovec __user *uvector, unsigned long nr_segs,
576 unsigned long fast_segs, struct iovec *fast_pointer,
577 struct iovec **ret_pointer)
578{
579 compat_ssize_t tot_len;
580 struct iovec *iov = *ret_pointer = fast_pointer;
581 ssize_t ret = 0;
582 int seg;
583
584 /*
585 * SuS says "The readv() function *may* fail if the iovcnt argument
586 * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
587 * traditionally returned zero for zero segments, so...
588 */
589 if (nr_segs == 0)
590 goto out;
591
592 ret = -EINVAL;
593 if (nr_segs > UIO_MAXIOV || nr_segs < 0)
594 goto out;
595 if (nr_segs > fast_segs) {
596 ret = -ENOMEM;
597 iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
598 if (iov == NULL) {
599 *ret_pointer = fast_pointer;
600 goto out;
601 }
602 }
603 *ret_pointer = iov;
604
605 /*
606 * Single unix specification:
607 * We should -EINVAL if an element length is not >= 0 and fitting an
608 * ssize_t. The total length is fitting an ssize_t
609 *
610 * Be careful here because iov_len is a size_t not an ssize_t
611 */
612 tot_len = 0;
613 ret = -EINVAL;
614 for (seg = 0; seg < nr_segs; seg++) {
615 compat_ssize_t tmp = tot_len;
616 compat_uptr_t buf;
617 compat_ssize_t len;
618
619 if (__get_user(len, &uvector->iov_len) ||
620 __get_user(buf, &uvector->iov_base)) {
621 ret = -EFAULT;
622 goto out;
623 }
624 if (len < 0) /* size_t not fitting in compat_ssize_t .. */
625 goto out;
626 tot_len += len;
627 if (tot_len < tmp) /* maths overflow on the compat_ssize_t */
628 goto out;
629 if (!access_ok(vrfy_dir(type), buf, len)) {
630 ret = -EFAULT;
631 goto out;
632 }
633 iov->iov_base = compat_ptr(buf);
634 iov->iov_len = (compat_size_t) len;
635 uvector++;
636 iov++;
637 }
638 ret = tot_len;
639
640out:
641 return ret;
642}
643
571static inline long 644static inline long
572copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64) 645copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64)
573{ 646{
@@ -600,7 +673,7 @@ compat_sys_io_submit(aio_context_t ctx_id, int nr, u32 __user *iocb)
600 iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64)); 673 iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64));
601 ret = copy_iocb(nr, iocb, iocb64); 674 ret = copy_iocb(nr, iocb, iocb64);
602 if (!ret) 675 if (!ret)
603 ret = sys_io_submit(ctx_id, nr, iocb64); 676 ret = do_io_submit(ctx_id, nr, iocb64, 1);
604 return ret; 677 return ret;
605} 678}
606 679
@@ -1077,70 +1150,21 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
1077{ 1150{
1078 compat_ssize_t tot_len; 1151 compat_ssize_t tot_len;
1079 struct iovec iovstack[UIO_FASTIOV]; 1152 struct iovec iovstack[UIO_FASTIOV];
1080 struct iovec *iov=iovstack, *vector; 1153 struct iovec *iov;
1081 ssize_t ret; 1154 ssize_t ret;
1082 int seg;
1083 io_fn_t fn; 1155 io_fn_t fn;
1084 iov_fn_t fnv; 1156 iov_fn_t fnv;
1085 1157
1086 /*
1087 * SuS says "The readv() function *may* fail if the iovcnt argument
1088 * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
1089 * traditionally returned zero for zero segments, so...
1090 */
1091 ret = 0;
1092 if (nr_segs == 0)
1093 goto out;
1094
1095 /*
1096 * First get the "struct iovec" from user memory and
1097 * verify all the pointers
1098 */
1099 ret = -EINVAL; 1158 ret = -EINVAL;
1100 if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
1101 goto out;
1102 if (!file->f_op) 1159 if (!file->f_op)
1103 goto out; 1160 goto out;
1104 if (nr_segs > UIO_FASTIOV) { 1161
1105 ret = -ENOMEM;
1106 iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
1107 if (!iov)
1108 goto out;
1109 }
1110 ret = -EFAULT; 1162 ret = -EFAULT;
1111 if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector))) 1163 if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
1112 goto out; 1164 goto out;
1113 1165
1114 /* 1166 tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs,
1115 * Single unix specification: 1167 UIO_FASTIOV, iovstack, &iov);
1116 * We should -EINVAL if an element length is not >= 0 and fitting an
1117 * ssize_t. The total length is fitting an ssize_t
1118 *
1119 * Be careful here because iov_len is a size_t not an ssize_t
1120 */
1121 tot_len = 0;
1122 vector = iov;
1123 ret = -EINVAL;
1124 for (seg = 0 ; seg < nr_segs; seg++) {
1125 compat_ssize_t tmp = tot_len;
1126 compat_ssize_t len;
1127 compat_uptr_t buf;
1128
1129 if (__get_user(len, &uvector->iov_len) ||
1130 __get_user(buf, &uvector->iov_base)) {
1131 ret = -EFAULT;
1132 goto out;
1133 }
1134 if (len < 0) /* size_t not fitting an compat_ssize_t .. */
1135 goto out;
1136 tot_len += len;
1137 if (tot_len < tmp) /* maths overflow on the compat_ssize_t */
1138 goto out;
1139 vector->iov_base = compat_ptr(buf);
1140 vector->iov_len = (compat_size_t) len;
1141 uvector++;
1142 vector++;
1143 }
1144 if (tot_len == 0) { 1168 if (tot_len == 0) {
1145 ret = 0; 1169 ret = 0;
1146 goto out; 1170 goto out;
diff --git a/fs/exec.c b/fs/exec.c
index 9badbc0bfb1d..e19de6a80339 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -768,7 +768,6 @@ static int de_thread(struct task_struct *tsk)
768 struct signal_struct *sig = tsk->signal; 768 struct signal_struct *sig = tsk->signal;
769 struct sighand_struct *oldsighand = tsk->sighand; 769 struct sighand_struct *oldsighand = tsk->sighand;
770 spinlock_t *lock = &oldsighand->siglock; 770 spinlock_t *lock = &oldsighand->siglock;
771 int count;
772 771
773 if (thread_group_empty(tsk)) 772 if (thread_group_empty(tsk))
774 goto no_thread_group; 773 goto no_thread_group;
@@ -785,13 +784,13 @@ static int de_thread(struct task_struct *tsk)
785 spin_unlock_irq(lock); 784 spin_unlock_irq(lock);
786 return -EAGAIN; 785 return -EAGAIN;
787 } 786 }
787
788 sig->group_exit_task = tsk; 788 sig->group_exit_task = tsk;
789 zap_other_threads(tsk); 789 sig->notify_count = zap_other_threads(tsk);
790 if (!thread_group_leader(tsk))
791 sig->notify_count--;
790 792
791 /* Account for the thread group leader hanging around: */ 793 while (sig->notify_count) {
792 count = thread_group_leader(tsk) ? 1 : 2;
793 sig->notify_count = count;
794 while (atomic_read(&sig->count) > count) {
795 __set_current_state(TASK_UNINTERRUPTIBLE); 794 __set_current_state(TASK_UNINTERRUPTIBLE);
796 spin_unlock_irq(lock); 795 spin_unlock_irq(lock);
797 schedule(); 796 schedule();
@@ -1662,12 +1661,15 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
1662 struct task_struct *tsk = current; 1661 struct task_struct *tsk = current;
1663 struct mm_struct *mm = tsk->mm; 1662 struct mm_struct *mm = tsk->mm;
1664 struct completion *vfork_done; 1663 struct completion *vfork_done;
1665 int core_waiters; 1664 int core_waiters = -EBUSY;
1666 1665
1667 init_completion(&core_state->startup); 1666 init_completion(&core_state->startup);
1668 core_state->dumper.task = tsk; 1667 core_state->dumper.task = tsk;
1669 core_state->dumper.next = NULL; 1668 core_state->dumper.next = NULL;
1670 core_waiters = zap_threads(tsk, mm, core_state, exit_code); 1669
1670 down_write(&mm->mmap_sem);
1671 if (!mm->core_state)
1672 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
1671 up_write(&mm->mmap_sem); 1673 up_write(&mm->mmap_sem);
1672 1674
1673 if (unlikely(core_waiters < 0)) 1675 if (unlikely(core_waiters < 0))
@@ -1787,21 +1789,61 @@ static void wait_for_dump_helpers(struct file *file)
1787} 1789}
1788 1790
1789 1791
1792/*
1793 * uhm_pipe_setup
1794 * helper function to customize the process used
1795 * to collect the core in userspace. Specifically
1796 * it sets up a pipe and installs it as fd 0 (stdin)
1797 * for the process. Returns 0 on success, or
1798 * PTR_ERR on failure.
1799 * Note that it also sets the core limit to 1. This
1800 * is a special value that we use to trap recursive
1801 * core dumps
1802 */
1803static int umh_pipe_setup(struct subprocess_info *info)
1804{
1805 struct file *rp, *wp;
1806 struct fdtable *fdt;
1807 struct coredump_params *cp = (struct coredump_params *)info->data;
1808 struct files_struct *cf = current->files;
1809
1810 wp = create_write_pipe(0);
1811 if (IS_ERR(wp))
1812 return PTR_ERR(wp);
1813
1814 rp = create_read_pipe(wp, 0);
1815 if (IS_ERR(rp)) {
1816 free_write_pipe(wp);
1817 return PTR_ERR(rp);
1818 }
1819
1820 cp->file = wp;
1821
1822 sys_close(0);
1823 fd_install(0, rp);
1824 spin_lock(&cf->file_lock);
1825 fdt = files_fdtable(cf);
1826 FD_SET(0, fdt->open_fds);
1827 FD_CLR(0, fdt->close_on_exec);
1828 spin_unlock(&cf->file_lock);
1829
1830 /* and disallow core files too */
1831 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
1832
1833 return 0;
1834}
1835
1790void do_coredump(long signr, int exit_code, struct pt_regs *regs) 1836void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1791{ 1837{
1792 struct core_state core_state; 1838 struct core_state core_state;
1793 char corename[CORENAME_MAX_SIZE + 1]; 1839 char corename[CORENAME_MAX_SIZE + 1];
1794 struct mm_struct *mm = current->mm; 1840 struct mm_struct *mm = current->mm;
1795 struct linux_binfmt * binfmt; 1841 struct linux_binfmt * binfmt;
1796 struct inode * inode;
1797 const struct cred *old_cred; 1842 const struct cred *old_cred;
1798 struct cred *cred; 1843 struct cred *cred;
1799 int retval = 0; 1844 int retval = 0;
1800 int flag = 0; 1845 int flag = 0;
1801 int ispipe = 0; 1846 int ispipe;
1802 char **helper_argv = NULL;
1803 int helper_argc = 0;
1804 int dump_count = 0;
1805 static atomic_t core_dump_count = ATOMIC_INIT(0); 1847 static atomic_t core_dump_count = ATOMIC_INIT(0);
1806 struct coredump_params cprm = { 1848 struct coredump_params cprm = {
1807 .signr = signr, 1849 .signr = signr,
@@ -1820,23 +1862,12 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1820 binfmt = mm->binfmt; 1862 binfmt = mm->binfmt;
1821 if (!binfmt || !binfmt->core_dump) 1863 if (!binfmt || !binfmt->core_dump)
1822 goto fail; 1864 goto fail;
1823 1865 if (!__get_dumpable(cprm.mm_flags))
1824 cred = prepare_creds();
1825 if (!cred) {
1826 retval = -ENOMEM;
1827 goto fail; 1866 goto fail;
1828 }
1829 1867
1830 down_write(&mm->mmap_sem); 1868 cred = prepare_creds();
1831 /* 1869 if (!cred)
1832 * If another thread got here first, or we are not dumpable, bail out.
1833 */
1834 if (mm->core_state || !__get_dumpable(cprm.mm_flags)) {
1835 up_write(&mm->mmap_sem);
1836 put_cred(cred);
1837 goto fail; 1870 goto fail;
1838 }
1839
1840 /* 1871 /*
1841 * We cannot trust fsuid as being the "true" uid of the 1872 * We cannot trust fsuid as being the "true" uid of the
1842 * process nor do we know its entire history. We only know it 1873 * process nor do we know its entire history. We only know it
@@ -1849,10 +1880,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1849 } 1880 }
1850 1881
1851 retval = coredump_wait(exit_code, &core_state); 1882 retval = coredump_wait(exit_code, &core_state);
1852 if (retval < 0) { 1883 if (retval < 0)
1853 put_cred(cred); 1884 goto fail_creds;
1854 goto fail;
1855 }
1856 1885
1857 old_cred = override_creds(cred); 1886 old_cred = override_creds(cred);
1858 1887
@@ -1870,19 +1899,19 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1870 ispipe = format_corename(corename, signr); 1899 ispipe = format_corename(corename, signr);
1871 unlock_kernel(); 1900 unlock_kernel();
1872 1901
1873 if ((!ispipe) && (cprm.limit < binfmt->min_coredump))
1874 goto fail_unlock;
1875
1876 if (ispipe) { 1902 if (ispipe) {
1877 if (cprm.limit == 0) { 1903 int dump_count;
1904 char **helper_argv;
1905
1906 if (cprm.limit == 1) {
1878 /* 1907 /*
1879 * Normally core limits are irrelevant to pipes, since 1908 * Normally core limits are irrelevant to pipes, since
1880 * we're not writing to the file system, but we use 1909 * we're not writing to the file system, but we use
1881 * cprm.limit of 0 here as a speacial value. Any 1910 * cprm.limit of 1 here as a speacial value. Any
1882 * non-zero limit gets set to RLIM_INFINITY below, but 1911 * non-1 limit gets set to RLIM_INFINITY below, but
1883 * a limit of 0 skips the dump. This is a consistent 1912 * a limit of 0 skips the dump. This is a consistent
1884 * way to catch recursive crashes. We can still crash 1913 * way to catch recursive crashes. We can still crash
1885 * if the core_pattern binary sets RLIM_CORE = !0 1914 * if the core_pattern binary sets RLIM_CORE = !1
1886 * but it runs as root, and can do lots of stupid things 1915 * but it runs as root, and can do lots of stupid things
1887 * Note that we use task_tgid_vnr here to grab the pid 1916 * Note that we use task_tgid_vnr here to grab the pid
1888 * of the process group leader. That way we get the 1917 * of the process group leader. That way we get the
@@ -1890,11 +1919,12 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1890 * core_pattern process dies. 1919 * core_pattern process dies.
1891 */ 1920 */
1892 printk(KERN_WARNING 1921 printk(KERN_WARNING
1893 "Process %d(%s) has RLIMIT_CORE set to 0\n", 1922 "Process %d(%s) has RLIMIT_CORE set to 1\n",
1894 task_tgid_vnr(current), current->comm); 1923 task_tgid_vnr(current), current->comm);
1895 printk(KERN_WARNING "Aborting core\n"); 1924 printk(KERN_WARNING "Aborting core\n");
1896 goto fail_unlock; 1925 goto fail_unlock;
1897 } 1926 }
1927 cprm.limit = RLIM_INFINITY;
1898 1928
1899 dump_count = atomic_inc_return(&core_dump_count); 1929 dump_count = atomic_inc_return(&core_dump_count);
1900 if (core_pipe_limit && (core_pipe_limit < dump_count)) { 1930 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
@@ -1904,71 +1934,74 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1904 goto fail_dropcount; 1934 goto fail_dropcount;
1905 } 1935 }
1906 1936
1907 helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc); 1937 helper_argv = argv_split(GFP_KERNEL, corename+1, NULL);
1908 if (!helper_argv) { 1938 if (!helper_argv) {
1909 printk(KERN_WARNING "%s failed to allocate memory\n", 1939 printk(KERN_WARNING "%s failed to allocate memory\n",
1910 __func__); 1940 __func__);
1911 goto fail_dropcount; 1941 goto fail_dropcount;
1912 } 1942 }
1913 1943
1914 cprm.limit = RLIM_INFINITY; 1944 retval = call_usermodehelper_fns(helper_argv[0], helper_argv,
1915 1945 NULL, UMH_WAIT_EXEC, umh_pipe_setup,
1916 /* SIGPIPE can happen, but it's just never processed */ 1946 NULL, &cprm);
1917 if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL, 1947 argv_free(helper_argv);
1918 &cprm.file)) { 1948 if (retval) {
1919 printk(KERN_INFO "Core dump to %s pipe failed\n", 1949 printk(KERN_INFO "Core dump to %s pipe failed\n",
1920 corename); 1950 corename);
1921 goto fail_dropcount; 1951 goto close_fail;
1922 } 1952 }
1923 } else 1953 } else {
1954 struct inode *inode;
1955
1956 if (cprm.limit < binfmt->min_coredump)
1957 goto fail_unlock;
1958
1924 cprm.file = filp_open(corename, 1959 cprm.file = filp_open(corename,
1925 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, 1960 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
1926 0600); 1961 0600);
1927 if (IS_ERR(cprm.file)) 1962 if (IS_ERR(cprm.file))
1928 goto fail_dropcount; 1963 goto fail_unlock;
1929 inode = cprm.file->f_path.dentry->d_inode;
1930 if (inode->i_nlink > 1)
1931 goto close_fail; /* multiple links - don't dump */
1932 if (!ispipe && d_unhashed(cprm.file->f_path.dentry))
1933 goto close_fail;
1934
1935 /* AK: actually i see no reason to not allow this for named pipes etc.,
1936 but keep the previous behaviour for now. */
1937 if (!ispipe && !S_ISREG(inode->i_mode))
1938 goto close_fail;
1939 /*
1940 * Dont allow local users get cute and trick others to coredump
1941 * into their pre-created files:
1942 * Note, this is not relevant for pipes
1943 */
1944 if (!ispipe && (inode->i_uid != current_fsuid()))
1945 goto close_fail;
1946 if (!cprm.file->f_op)
1947 goto close_fail;
1948 if (!cprm.file->f_op->write)
1949 goto close_fail;
1950 if (!ispipe &&
1951 do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file) != 0)
1952 goto close_fail;
1953 1964
1954 retval = binfmt->core_dump(&cprm); 1965 inode = cprm.file->f_path.dentry->d_inode;
1966 if (inode->i_nlink > 1)
1967 goto close_fail;
1968 if (d_unhashed(cprm.file->f_path.dentry))
1969 goto close_fail;
1970 /*
1971 * AK: actually i see no reason to not allow this for named
1972 * pipes etc, but keep the previous behaviour for now.
1973 */
1974 if (!S_ISREG(inode->i_mode))
1975 goto close_fail;
1976 /*
1977 * Dont allow local users get cute and trick others to coredump
1978 * into their pre-created files.
1979 */
1980 if (inode->i_uid != current_fsuid())
1981 goto close_fail;
1982 if (!cprm.file->f_op || !cprm.file->f_op->write)
1983 goto close_fail;
1984 if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
1985 goto close_fail;
1986 }
1955 1987
1988 retval = binfmt->core_dump(&cprm);
1956 if (retval) 1989 if (retval)
1957 current->signal->group_exit_code |= 0x80; 1990 current->signal->group_exit_code |= 0x80;
1958close_fail: 1991
1959 if (ispipe && core_pipe_limit) 1992 if (ispipe && core_pipe_limit)
1960 wait_for_dump_helpers(cprm.file); 1993 wait_for_dump_helpers(cprm.file);
1961 filp_close(cprm.file, NULL); 1994close_fail:
1995 if (cprm.file)
1996 filp_close(cprm.file, NULL);
1962fail_dropcount: 1997fail_dropcount:
1963 if (dump_count) 1998 if (ispipe)
1964 atomic_dec(&core_dump_count); 1999 atomic_dec(&core_dump_count);
1965fail_unlock: 2000fail_unlock:
1966 if (helper_argv) 2001 coredump_finish(mm);
1967 argv_free(helper_argv);
1968
1969 revert_creds(old_cred); 2002 revert_creds(old_cred);
2003fail_creds:
1970 put_cred(cred); 2004 put_cred(cred);
1971 coredump_finish(mm);
1972fail: 2005fail:
1973 return; 2006 return;
1974} 2007}
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index aee049cb9f84..0ec7bb2c95c6 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -57,6 +57,8 @@ const struct inode_operations vxfs_dir_inode_ops = {
57}; 57};
58 58
59const struct file_operations vxfs_dir_operations = { 59const struct file_operations vxfs_dir_operations = {
60 .llseek = generic_file_llseek,
61 .read = generic_read_dir,
60 .readdir = vxfs_readdir, 62 .readdir = vxfs_readdir,
61}; 63};
62 64
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index 1e1f286dd70e..4a8eb31c5338 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -103,7 +103,7 @@ static struct fscache_object *fscache_objlist_lookup(loff_t *_pos)
103 /* banners (can't represent line 0 by pos 0 as that would involve 103 /* banners (can't represent line 0 by pos 0 as that would involve
104 * returning a NULL pointer) */ 104 * returning a NULL pointer) */
105 if (pos == 0) 105 if (pos == 0)
106 return (struct fscache_object *) ++(*_pos); 106 return (struct fscache_object *)(long)++(*_pos);
107 if (pos < 3) 107 if (pos < 3)
108 return (struct fscache_object *)pos; 108 return (struct fscache_object *)pos;
109 109
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
index b9ab69b3a482..e0aca9a0ac68 100644
--- a/fs/isofs/dir.c
+++ b/fs/isofs/dir.c
@@ -272,6 +272,7 @@ static int isofs_readdir(struct file *filp,
272 272
273const struct file_operations isofs_dir_operations = 273const struct file_operations isofs_dir_operations =
274{ 274{
275 .llseek = generic_file_llseek,
275 .read = generic_read_dir, 276 .read = generic_read_dir,
276 .readdir = isofs_readdir, 277 .readdir = isofs_readdir,
277}; 278};
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 92dde6f8d893..9578cbe0cd58 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -49,6 +49,7 @@ extern int ncp_symlink(struct inode *, struct dentry *, const char *);
49 49
50const struct file_operations ncp_dir_operations = 50const struct file_operations ncp_dir_operations =
51{ 51{
52 .llseek = generic_file_llseek,
52 .read = generic_read_dir, 53 .read = generic_read_dir,
53 .readdir = ncp_readdir, 54 .readdir = ncp_readdir,
54 .unlocked_ioctl = ncp_ioctl, 55 .unlocked_ioctl = ncp_ioctl,
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 885ab5513ac5..9b58d38bc911 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -267,7 +267,7 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
267 shpending = p->signal->shared_pending.signal; 267 shpending = p->signal->shared_pending.signal;
268 blocked = p->blocked; 268 blocked = p->blocked;
269 collect_sigign_sigcatch(p, &ignored, &caught); 269 collect_sigign_sigcatch(p, &ignored, &caught);
270 num_threads = atomic_read(&p->signal->count); 270 num_threads = get_nr_threads(p);
271 rcu_read_lock(); /* FIXME: is this correct? */ 271 rcu_read_lock(); /* FIXME: is this correct? */
272 qsize = atomic_read(&__task_cred(p)->user->sigpending); 272 qsize = atomic_read(&__task_cred(p)->user->sigpending);
273 rcu_read_unlock(); 273 rcu_read_unlock();
@@ -410,7 +410,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
410 tty_nr = new_encode_dev(tty_devnum(sig->tty)); 410 tty_nr = new_encode_dev(tty_devnum(sig->tty));
411 } 411 }
412 412
413 num_threads = atomic_read(&sig->count); 413 num_threads = get_nr_threads(task);
414 collect_sigign_sigcatch(task, &sigign, &sigcatch); 414 collect_sigign_sigcatch(task, &sigign, &sigcatch);
415 415
416 cmin_flt = sig->cmin_flt; 416 cmin_flt = sig->cmin_flt;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c7f9f23449dc..acb7ef80ea4f 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -166,18 +166,6 @@ static int get_fs_path(struct task_struct *task, struct path *path, bool root)
166 return result; 166 return result;
167} 167}
168 168
169static int get_nr_threads(struct task_struct *tsk)
170{
171 unsigned long flags;
172 int count = 0;
173
174 if (lock_task_sighand(tsk, &flags)) {
175 count = atomic_read(&tsk->signal->count);
176 unlock_task_sighand(tsk, &flags);
177 }
178 return count;
179}
180
181static int proc_cwd_link(struct inode *inode, struct path *path) 169static int proc_cwd_link(struct inode *inode, struct path *path)
182{ 170{
183 struct task_struct *task = get_proc_task(inode); 171 struct task_struct *task = get_proc_task(inode);
@@ -2444,7 +2432,7 @@ static struct dentry *proc_base_instantiate(struct inode *dir,
2444 const struct pid_entry *p = ptr; 2432 const struct pid_entry *p = ptr;
2445 struct inode *inode; 2433 struct inode *inode;
2446 struct proc_inode *ei; 2434 struct proc_inode *ei;
2447 struct dentry *error = ERR_PTR(-EINVAL); 2435 struct dentry *error;
2448 2436
2449 /* Allocate the inode */ 2437 /* Allocate the inode */
2450 error = ERR_PTR(-ENOMEM); 2438 error = ERR_PTR(-ENOMEM);
@@ -2794,7 +2782,7 @@ out:
2794 2782
2795struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) 2783struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
2796{ 2784{
2797 struct dentry *result = ERR_PTR(-ENOENT); 2785 struct dentry *result;
2798 struct task_struct *task; 2786 struct task_struct *task;
2799 unsigned tgid; 2787 unsigned tgid;
2800 struct pid_namespace *ns; 2788 struct pid_namespace *ns;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 43c127490606..2791907744ed 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -343,21 +343,6 @@ static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */
343/* 343/*
344 * Return an inode number between PROC_DYNAMIC_FIRST and 344 * Return an inode number between PROC_DYNAMIC_FIRST and
345 * 0xffffffff, or zero on failure. 345 * 0xffffffff, or zero on failure.
346 *
347 * Current inode allocations in the proc-fs (hex-numbers):
348 *
349 * 00000000 reserved
350 * 00000001-00000fff static entries (goners)
351 * 001 root-ino
352 *
353 * 00001000-00001fff unused
354 * 0001xxxx-7fffxxxx pid-dir entries for pid 1-7fff
355 * 80000000-efffffff unused
356 * f0000000-ffffffff dynamic entries
357 *
358 * Goal:
359 * Once we split the thing into several virtual filesystems,
360 * we will get rid of magical ranges (and this comment, BTW).
361 */ 346 */
362static unsigned int get_inode_number(void) 347static unsigned int get_inode_number(void)
363{ 348{
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index c837a77351be..6f37c391468d 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -588,7 +588,7 @@ static struct kcore_list kcore_text;
588 */ 588 */
589static void __init proc_kcore_text_init(void) 589static void __init proc_kcore_text_init(void)
590{ 590{
591 kclist_add(&kcore_text, _stext, _end - _stext, KCORE_TEXT); 591 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
592} 592}
593#else 593#else
594static void __init proc_kcore_text_init(void) 594static void __init proc_kcore_text_init(void)
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 757c069f2a65..4258384ed22d 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -110,7 +110,6 @@ void __init proc_root_init(void)
110 if (err) 110 if (err)
111 return; 111 return;
112 proc_mnt = kern_mount_data(&proc_fs_type, &init_pid_ns); 112 proc_mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
113 err = PTR_ERR(proc_mnt);
114 if (IS_ERR(proc_mnt)) { 113 if (IS_ERR(proc_mnt)) {
115 unregister_filesystem(&proc_fs_type); 114 unregister_filesystem(&proc_fs_type);
116 return; 115 return;
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index 6f30c3d5bcbf..3d3fd4692133 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -77,6 +77,7 @@ out:
77 77
78const struct file_operations qnx4_dir_operations = 78const struct file_operations qnx4_dir_operations =
79{ 79{
80 .llseek = generic_file_llseek,
80 .read = generic_read_dir, 81 .read = generic_read_dir,
81 .readdir = qnx4_readdir, 82 .readdir = qnx4_readdir,
82 .fsync = simple_fsync, 83 .fsync = simple_fsync,
diff --git a/fs/read_write.c b/fs/read_write.c
index 113386d6fd2d..9c0485236e68 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -97,6 +97,23 @@ loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
97} 97}
98EXPORT_SYMBOL(generic_file_llseek); 98EXPORT_SYMBOL(generic_file_llseek);
99 99
100/**
101 * noop_llseek - No Operation Performed llseek implementation
102 * @file: file structure to seek on
103 * @offset: file offset to seek to
104 * @origin: type of seek
105 *
106 * This is an implementation of ->llseek useable for the rare special case when
107 * userspace expects the seek to succeed but the (device) file is actually not
108 * able to perform the seek. In this case you use noop_llseek() instead of
109 * falling back to the default implementation of ->llseek.
110 */
111loff_t noop_llseek(struct file *file, loff_t offset, int origin)
112{
113 return file->f_pos;
114}
115EXPORT_SYMBOL(noop_llseek);
116
100loff_t no_llseek(struct file *file, loff_t offset, int origin) 117loff_t no_llseek(struct file *file, loff_t offset, int origin)
101{ 118{
102 return -ESPIPE; 119 return -ESPIPE;
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 07930449a958..4455fbe269a3 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -18,6 +18,7 @@ static int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry,
18 int datasync); 18 int datasync);
19 19
20const struct file_operations reiserfs_dir_operations = { 20const struct file_operations reiserfs_dir_operations = {
21 .llseek = generic_file_llseek,
21 .read = generic_read_dir, 22 .read = generic_read_dir,
22 .readdir = reiserfs_readdir, 23 .readdir = reiserfs_readdir,
23 .fsync = reiserfs_dir_fsync, 24 .fsync = reiserfs_dir_fsync,
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c
index 6c978428892d..00a70cab1f36 100644
--- a/fs/smbfs/dir.c
+++ b/fs/smbfs/dir.c
@@ -37,6 +37,7 @@ static int smb_link(struct dentry *, struct inode *, struct dentry *);
37 37
38const struct file_operations smb_dir_operations = 38const struct file_operations smb_dir_operations =
39{ 39{
40 .llseek = generic_file_llseek,
40 .read = generic_read_dir, 41 .read = generic_read_dir,
41 .readdir = smb_readdir, 42 .readdir = smb_readdir,
42 .unlocked_ioctl = smb_ioctl, 43 .unlocked_ioctl = smb_ioctl,
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index 25a00d19d686..cc6ce8a84c21 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -26,6 +26,17 @@ config SQUASHFS
26 26
27 If unsure, say N. 27 If unsure, say N.
28 28
29config SQUASHFS_XATTRS
30 bool "Squashfs XATTR support"
31 depends on SQUASHFS
32 default n
33 help
34 Saying Y here includes support for extended attributes (xattrs).
35 Xattrs are name:value pairs associated with inodes by
36 the kernel or by users (see the attr(5) manual page).
37
38 If unsure, say N.
39
29config SQUASHFS_EMBEDDED 40config SQUASHFS_EMBEDDED
30 41
31 bool "Additional option for memory-constrained systems" 42 bool "Additional option for memory-constrained systems"
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index df8a19ef870d..2cee3e9fa452 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -5,3 +5,5 @@
5obj-$(CONFIG_SQUASHFS) += squashfs.o 5obj-$(CONFIG_SQUASHFS) += squashfs.o
6squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o 6squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
7squashfs-y += namei.o super.o symlink.o zlib_wrapper.o decompressor.o 7squashfs-y += namei.o super.o symlink.o zlib_wrapper.o decompressor.o
8squashfs-$(CONFIG_SQUASHFS_XATTRS) += xattr.o xattr_id.o
9
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index 49daaf669e41..62e63ad25075 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -40,11 +40,13 @@
40 40
41#include <linux/fs.h> 41#include <linux/fs.h>
42#include <linux/vfs.h> 42#include <linux/vfs.h>
43#include <linux/xattr.h>
43 44
44#include "squashfs_fs.h" 45#include "squashfs_fs.h"
45#include "squashfs_fs_sb.h" 46#include "squashfs_fs_sb.h"
46#include "squashfs_fs_i.h" 47#include "squashfs_fs_i.h"
47#include "squashfs.h" 48#include "squashfs.h"
49#include "xattr.h"
48 50
49/* 51/*
50 * Initialise VFS inode with the base inode information common to all 52 * Initialise VFS inode with the base inode information common to all
@@ -111,6 +113,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
111 int err, type, offset = SQUASHFS_INODE_OFFSET(ino); 113 int err, type, offset = SQUASHFS_INODE_OFFSET(ino);
112 union squashfs_inode squashfs_ino; 114 union squashfs_inode squashfs_ino;
113 struct squashfs_base_inode *sqshb_ino = &squashfs_ino.base; 115 struct squashfs_base_inode *sqshb_ino = &squashfs_ino.base;
116 int xattr_id = SQUASHFS_INVALID_XATTR;
114 117
115 TRACE("Entered squashfs_read_inode\n"); 118 TRACE("Entered squashfs_read_inode\n");
116 119
@@ -199,8 +202,10 @@ int squashfs_read_inode(struct inode *inode, long long ino)
199 frag_offset = 0; 202 frag_offset = 0;
200 } 203 }
201 204
205 xattr_id = le32_to_cpu(sqsh_ino->xattr);
202 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink); 206 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
203 inode->i_size = le64_to_cpu(sqsh_ino->file_size); 207 inode->i_size = le64_to_cpu(sqsh_ino->file_size);
208 inode->i_op = &squashfs_inode_ops;
204 inode->i_fop = &generic_ro_fops; 209 inode->i_fop = &generic_ro_fops;
205 inode->i_mode |= S_IFREG; 210 inode->i_mode |= S_IFREG;
206 inode->i_blocks = ((inode->i_size - 211 inode->i_blocks = ((inode->i_size -
@@ -251,6 +256,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
251 if (err < 0) 256 if (err < 0)
252 goto failed_read; 257 goto failed_read;
253 258
259 xattr_id = le32_to_cpu(sqsh_ino->xattr);
254 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink); 260 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
255 inode->i_size = le32_to_cpu(sqsh_ino->file_size); 261 inode->i_size = le32_to_cpu(sqsh_ino->file_size);
256 inode->i_op = &squashfs_dir_inode_ops; 262 inode->i_op = &squashfs_dir_inode_ops;
@@ -280,21 +286,33 @@ int squashfs_read_inode(struct inode *inode, long long ino)
280 286
281 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink); 287 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
282 inode->i_size = le32_to_cpu(sqsh_ino->symlink_size); 288 inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
283 inode->i_op = &page_symlink_inode_operations; 289 inode->i_op = &squashfs_symlink_inode_ops;
284 inode->i_data.a_ops = &squashfs_symlink_aops; 290 inode->i_data.a_ops = &squashfs_symlink_aops;
285 inode->i_mode |= S_IFLNK; 291 inode->i_mode |= S_IFLNK;
286 squashfs_i(inode)->start = block; 292 squashfs_i(inode)->start = block;
287 squashfs_i(inode)->offset = offset; 293 squashfs_i(inode)->offset = offset;
288 294
295 if (type == SQUASHFS_LSYMLINK_TYPE) {
296 __le32 xattr;
297
298 err = squashfs_read_metadata(sb, NULL, &block,
299 &offset, inode->i_size);
300 if (err < 0)
301 goto failed_read;
302 err = squashfs_read_metadata(sb, &xattr, &block,
303 &offset, sizeof(xattr));
304 if (err < 0)
305 goto failed_read;
306 xattr_id = le32_to_cpu(xattr);
307 }
308
289 TRACE("Symbolic link inode %x:%x, start_block %llx, offset " 309 TRACE("Symbolic link inode %x:%x, start_block %llx, offset "
290 "%x\n", SQUASHFS_INODE_BLK(ino), offset, 310 "%x\n", SQUASHFS_INODE_BLK(ino), offset,
291 block, offset); 311 block, offset);
292 break; 312 break;
293 } 313 }
294 case SQUASHFS_BLKDEV_TYPE: 314 case SQUASHFS_BLKDEV_TYPE:
295 case SQUASHFS_CHRDEV_TYPE: 315 case SQUASHFS_CHRDEV_TYPE: {
296 case SQUASHFS_LBLKDEV_TYPE:
297 case SQUASHFS_LCHRDEV_TYPE: {
298 struct squashfs_dev_inode *sqsh_ino = &squashfs_ino.dev; 316 struct squashfs_dev_inode *sqsh_ino = &squashfs_ino.dev;
299 unsigned int rdev; 317 unsigned int rdev;
300 318
@@ -315,10 +333,32 @@ int squashfs_read_inode(struct inode *inode, long long ino)
315 SQUASHFS_INODE_BLK(ino), offset, rdev); 333 SQUASHFS_INODE_BLK(ino), offset, rdev);
316 break; 334 break;
317 } 335 }
336 case SQUASHFS_LBLKDEV_TYPE:
337 case SQUASHFS_LCHRDEV_TYPE: {
338 struct squashfs_ldev_inode *sqsh_ino = &squashfs_ino.ldev;
339 unsigned int rdev;
340
341 err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
342 sizeof(*sqsh_ino));
343 if (err < 0)
344 goto failed_read;
345
346 if (type == SQUASHFS_LCHRDEV_TYPE)
347 inode->i_mode |= S_IFCHR;
348 else
349 inode->i_mode |= S_IFBLK;
350 xattr_id = le32_to_cpu(sqsh_ino->xattr);
351 inode->i_op = &squashfs_inode_ops;
352 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
353 rdev = le32_to_cpu(sqsh_ino->rdev);
354 init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
355
356 TRACE("Device inode %x:%x, rdev %x\n",
357 SQUASHFS_INODE_BLK(ino), offset, rdev);
358 break;
359 }
318 case SQUASHFS_FIFO_TYPE: 360 case SQUASHFS_FIFO_TYPE:
319 case SQUASHFS_SOCKET_TYPE: 361 case SQUASHFS_SOCKET_TYPE: {
320 case SQUASHFS_LFIFO_TYPE:
321 case SQUASHFS_LSOCKET_TYPE: {
322 struct squashfs_ipc_inode *sqsh_ino = &squashfs_ino.ipc; 362 struct squashfs_ipc_inode *sqsh_ino = &squashfs_ino.ipc;
323 363
324 err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, 364 err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
@@ -334,14 +374,52 @@ int squashfs_read_inode(struct inode *inode, long long ino)
334 init_special_inode(inode, inode->i_mode, 0); 374 init_special_inode(inode, inode->i_mode, 0);
335 break; 375 break;
336 } 376 }
377 case SQUASHFS_LFIFO_TYPE:
378 case SQUASHFS_LSOCKET_TYPE: {
379 struct squashfs_lipc_inode *sqsh_ino = &squashfs_ino.lipc;
380
381 err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
382 sizeof(*sqsh_ino));
383 if (err < 0)
384 goto failed_read;
385
386 if (type == SQUASHFS_LFIFO_TYPE)
387 inode->i_mode |= S_IFIFO;
388 else
389 inode->i_mode |= S_IFSOCK;
390 xattr_id = le32_to_cpu(sqsh_ino->xattr);
391 inode->i_op = &squashfs_inode_ops;
392 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
393 init_special_inode(inode, inode->i_mode, 0);
394 break;
395 }
337 default: 396 default:
338 ERROR("Unknown inode type %d in squashfs_iget!\n", type); 397 ERROR("Unknown inode type %d in squashfs_iget!\n", type);
339 return -EINVAL; 398 return -EINVAL;
340 } 399 }
341 400
401 if (xattr_id != SQUASHFS_INVALID_XATTR && msblk->xattr_id_table) {
402 err = squashfs_xattr_lookup(sb, xattr_id,
403 &squashfs_i(inode)->xattr_count,
404 &squashfs_i(inode)->xattr_size,
405 &squashfs_i(inode)->xattr);
406 if (err < 0)
407 goto failed_read;
408 inode->i_blocks += ((squashfs_i(inode)->xattr_size - 1) >> 9)
409 + 1;
410 } else
411 squashfs_i(inode)->xattr_count = 0;
412
342 return 0; 413 return 0;
343 414
344failed_read: 415failed_read:
345 ERROR("Unable to read inode 0x%llx\n", ino); 416 ERROR("Unable to read inode 0x%llx\n", ino);
346 return err; 417 return err;
347} 418}
419
420
421const struct inode_operations squashfs_inode_ops = {
422 .getxattr = generic_getxattr,
423 .listxattr = squashfs_listxattr
424};
425
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
index 5266bd8ad932..7a9464d08cf6 100644
--- a/fs/squashfs/namei.c
+++ b/fs/squashfs/namei.c
@@ -57,11 +57,13 @@
57#include <linux/slab.h> 57#include <linux/slab.h>
58#include <linux/string.h> 58#include <linux/string.h>
59#include <linux/dcache.h> 59#include <linux/dcache.h>
60#include <linux/xattr.h>
60 61
61#include "squashfs_fs.h" 62#include "squashfs_fs.h"
62#include "squashfs_fs_sb.h" 63#include "squashfs_fs_sb.h"
63#include "squashfs_fs_i.h" 64#include "squashfs_fs_i.h"
64#include "squashfs.h" 65#include "squashfs.h"
66#include "xattr.h"
65 67
66/* 68/*
67 * Lookup name in the directory index, returning the location of the metadata 69 * Lookup name in the directory index, returning the location of the metadata
@@ -237,5 +239,7 @@ failed:
237 239
238 240
239const struct inode_operations squashfs_dir_inode_ops = { 241const struct inode_operations squashfs_dir_inode_ops = {
240 .lookup = squashfs_lookup 242 .lookup = squashfs_lookup,
243 .getxattr = generic_getxattr,
244 .listxattr = squashfs_listxattr
241}; 245};
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index fe2587af5512..733a17c42945 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -73,8 +73,11 @@ extern struct inode *squashfs_iget(struct super_block *, long long,
73 unsigned int); 73 unsigned int);
74extern int squashfs_read_inode(struct inode *, long long); 74extern int squashfs_read_inode(struct inode *, long long);
75 75
76/* xattr.c */
77extern ssize_t squashfs_listxattr(struct dentry *, char *, size_t);
78
76/* 79/*
77 * Inodes, files and decompressor operations 80 * Inodes, files, decompressor and xattr operations
78 */ 81 */
79 82
80/* dir.c */ 83/* dir.c */
@@ -86,11 +89,18 @@ extern const struct export_operations squashfs_export_ops;
86/* file.c */ 89/* file.c */
87extern const struct address_space_operations squashfs_aops; 90extern const struct address_space_operations squashfs_aops;
88 91
92/* inode.c */
93extern const struct inode_operations squashfs_inode_ops;
94
89/* namei.c */ 95/* namei.c */
90extern const struct inode_operations squashfs_dir_inode_ops; 96extern const struct inode_operations squashfs_dir_inode_ops;
91 97
92/* symlink.c */ 98/* symlink.c */
93extern const struct address_space_operations squashfs_symlink_aops; 99extern const struct address_space_operations squashfs_symlink_aops;
100extern const struct inode_operations squashfs_symlink_inode_ops;
101
102/* xattr.c */
103extern const struct xattr_handler *squashfs_xattr_handlers[];
94 104
95/* zlib_wrapper.c */ 105/* zlib_wrapper.c */
96extern const struct squashfs_decompressor squashfs_zlib_comp_ops; 106extern const struct squashfs_decompressor squashfs_zlib_comp_ops;
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 79024245ea00..8eabb808b78d 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -46,6 +46,7 @@
46#define SQUASHFS_NAME_LEN 256 46#define SQUASHFS_NAME_LEN 256
47 47
48#define SQUASHFS_INVALID_FRAG (0xffffffffU) 48#define SQUASHFS_INVALID_FRAG (0xffffffffU)
49#define SQUASHFS_INVALID_XATTR (0xffffffffU)
49#define SQUASHFS_INVALID_BLK (-1LL) 50#define SQUASHFS_INVALID_BLK (-1LL)
50 51
51/* Filesystem flags */ 52/* Filesystem flags */
@@ -96,6 +97,13 @@
96#define SQUASHFS_LFIFO_TYPE 13 97#define SQUASHFS_LFIFO_TYPE 13
97#define SQUASHFS_LSOCKET_TYPE 14 98#define SQUASHFS_LSOCKET_TYPE 14
98 99
100/* Xattr types */
101#define SQUASHFS_XATTR_USER 0
102#define SQUASHFS_XATTR_TRUSTED 1
103#define SQUASHFS_XATTR_SECURITY 2
104#define SQUASHFS_XATTR_VALUE_OOL 256
105#define SQUASHFS_XATTR_PREFIX_MASK 0xff
106
99/* Flag whether block is compressed or uncompressed, bit is set if block is 107/* Flag whether block is compressed or uncompressed, bit is set if block is
100 * uncompressed */ 108 * uncompressed */
101#define SQUASHFS_COMPRESSED_BIT (1 << 15) 109#define SQUASHFS_COMPRESSED_BIT (1 << 15)
@@ -174,6 +182,24 @@
174 182
175#define SQUASHFS_ID_BLOCK_BYTES(A) (SQUASHFS_ID_BLOCKS(A) *\ 183#define SQUASHFS_ID_BLOCK_BYTES(A) (SQUASHFS_ID_BLOCKS(A) *\
176 sizeof(u64)) 184 sizeof(u64))
185/* xattr id lookup table defines */
186#define SQUASHFS_XATTR_BYTES(A) ((A) * sizeof(struct squashfs_xattr_id))
187
188#define SQUASHFS_XATTR_BLOCK(A) (SQUASHFS_XATTR_BYTES(A) / \
189 SQUASHFS_METADATA_SIZE)
190
191#define SQUASHFS_XATTR_BLOCK_OFFSET(A) (SQUASHFS_XATTR_BYTES(A) % \
192 SQUASHFS_METADATA_SIZE)
193
194#define SQUASHFS_XATTR_BLOCKS(A) ((SQUASHFS_XATTR_BYTES(A) + \
195 SQUASHFS_METADATA_SIZE - 1) / \
196 SQUASHFS_METADATA_SIZE)
197
198#define SQUASHFS_XATTR_BLOCK_BYTES(A) (SQUASHFS_XATTR_BLOCKS(A) *\
199 sizeof(u64))
200#define SQUASHFS_XATTR_BLK(A) ((unsigned int) ((A) >> 16))
201
202#define SQUASHFS_XATTR_OFFSET(A) ((unsigned int) ((A) & 0xffff))
177 203
178/* cached data constants for filesystem */ 204/* cached data constants for filesystem */
179#define SQUASHFS_CACHED_BLKS 8 205#define SQUASHFS_CACHED_BLKS 8
@@ -228,7 +254,7 @@ struct squashfs_super_block {
228 __le64 root_inode; 254 __le64 root_inode;
229 __le64 bytes_used; 255 __le64 bytes_used;
230 __le64 id_table_start; 256 __le64 id_table_start;
231 __le64 xattr_table_start; 257 __le64 xattr_id_table_start;
232 __le64 inode_table_start; 258 __le64 inode_table_start;
233 __le64 directory_table_start; 259 __le64 directory_table_start;
234 __le64 fragment_table_start; 260 __le64 fragment_table_start;
@@ -261,6 +287,17 @@ struct squashfs_ipc_inode {
261 __le32 nlink; 287 __le32 nlink;
262}; 288};
263 289
290struct squashfs_lipc_inode {
291 __le16 inode_type;
292 __le16 mode;
293 __le16 uid;
294 __le16 guid;
295 __le32 mtime;
296 __le32 inode_number;
297 __le32 nlink;
298 __le32 xattr;
299};
300
264struct squashfs_dev_inode { 301struct squashfs_dev_inode {
265 __le16 inode_type; 302 __le16 inode_type;
266 __le16 mode; 303 __le16 mode;
@@ -272,6 +309,18 @@ struct squashfs_dev_inode {
272 __le32 rdev; 309 __le32 rdev;
273}; 310};
274 311
312struct squashfs_ldev_inode {
313 __le16 inode_type;
314 __le16 mode;
315 __le16 uid;
316 __le16 guid;
317 __le32 mtime;
318 __le32 inode_number;
319 __le32 nlink;
320 __le32 rdev;
321 __le32 xattr;
322};
323
275struct squashfs_symlink_inode { 324struct squashfs_symlink_inode {
276 __le16 inode_type; 325 __le16 inode_type;
277 __le16 mode; 326 __le16 mode;
@@ -349,12 +398,14 @@ struct squashfs_ldir_inode {
349union squashfs_inode { 398union squashfs_inode {
350 struct squashfs_base_inode base; 399 struct squashfs_base_inode base;
351 struct squashfs_dev_inode dev; 400 struct squashfs_dev_inode dev;
401 struct squashfs_ldev_inode ldev;
352 struct squashfs_symlink_inode symlink; 402 struct squashfs_symlink_inode symlink;
353 struct squashfs_reg_inode reg; 403 struct squashfs_reg_inode reg;
354 struct squashfs_lreg_inode lreg; 404 struct squashfs_lreg_inode lreg;
355 struct squashfs_dir_inode dir; 405 struct squashfs_dir_inode dir;
356 struct squashfs_ldir_inode ldir; 406 struct squashfs_ldir_inode ldir;
357 struct squashfs_ipc_inode ipc; 407 struct squashfs_ipc_inode ipc;
408 struct squashfs_lipc_inode lipc;
358}; 409};
359 410
360struct squashfs_dir_entry { 411struct squashfs_dir_entry {
@@ -377,4 +428,27 @@ struct squashfs_fragment_entry {
377 unsigned int unused; 428 unsigned int unused;
378}; 429};
379 430
431struct squashfs_xattr_entry {
432 __le16 type;
433 __le16 size;
434 char data[0];
435};
436
437struct squashfs_xattr_val {
438 __le32 vsize;
439 char value[0];
440};
441
442struct squashfs_xattr_id {
443 __le64 xattr;
444 __le32 count;
445 __le32 size;
446};
447
448struct squashfs_xattr_id_table {
449 __le64 xattr_table_start;
450 __le32 xattr_ids;
451 __le32 unused;
452};
453
380#endif 454#endif
diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h
index fbfca30c0c68..d3e3a37f28a1 100644
--- a/fs/squashfs/squashfs_fs_i.h
+++ b/fs/squashfs/squashfs_fs_i.h
@@ -26,6 +26,9 @@
26struct squashfs_inode_info { 26struct squashfs_inode_info {
27 u64 start; 27 u64 start;
28 int offset; 28 int offset;
29 u64 xattr;
30 unsigned int xattr_size;
31 int xattr_count;
29 union { 32 union {
30 struct { 33 struct {
31 u64 fragment_block; 34 u64 fragment_block;
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index 2e77dc547e25..d9037a5215f0 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -61,6 +61,7 @@ struct squashfs_sb_info {
61 int next_meta_index; 61 int next_meta_index;
62 __le64 *id_table; 62 __le64 *id_table;
63 __le64 *fragment_index; 63 __le64 *fragment_index;
64 __le64 *xattr_id_table;
64 struct mutex read_data_mutex; 65 struct mutex read_data_mutex;
65 struct mutex meta_index_mutex; 66 struct mutex meta_index_mutex;
66 struct meta_index *meta_index; 67 struct meta_index *meta_index;
@@ -68,9 +69,11 @@ struct squashfs_sb_info {
68 __le64 *inode_lookup_table; 69 __le64 *inode_lookup_table;
69 u64 inode_table; 70 u64 inode_table;
70 u64 directory_table; 71 u64 directory_table;
72 u64 xattr_table;
71 unsigned int block_size; 73 unsigned int block_size;
72 unsigned short block_log; 74 unsigned short block_log;
73 long long bytes_used; 75 long long bytes_used;
74 unsigned int inodes; 76 unsigned int inodes;
77 int xattr_ids;
75}; 78};
76#endif 79#endif
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 48b6f4a385a6..88b4f8606652 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -36,12 +36,14 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/magic.h> 38#include <linux/magic.h>
39#include <linux/xattr.h>
39 40
40#include "squashfs_fs.h" 41#include "squashfs_fs.h"
41#include "squashfs_fs_sb.h" 42#include "squashfs_fs_sb.h"
42#include "squashfs_fs_i.h" 43#include "squashfs_fs_i.h"
43#include "squashfs.h" 44#include "squashfs.h"
44#include "decompressor.h" 45#include "decompressor.h"
46#include "xattr.h"
45 47
46static struct file_system_type squashfs_fs_type; 48static struct file_system_type squashfs_fs_type;
47static const struct super_operations squashfs_super_ops; 49static const struct super_operations squashfs_super_ops;
@@ -82,7 +84,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
82 long long root_inode; 84 long long root_inode;
83 unsigned short flags; 85 unsigned short flags;
84 unsigned int fragments; 86 unsigned int fragments;
85 u64 lookup_table_start; 87 u64 lookup_table_start, xattr_id_table_start;
86 int err; 88 int err;
87 89
88 TRACE("Entered squashfs_fill_superblock\n"); 90 TRACE("Entered squashfs_fill_superblock\n");
@@ -139,13 +141,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
139 if (msblk->decompressor == NULL) 141 if (msblk->decompressor == NULL)
140 goto failed_mount; 142 goto failed_mount;
141 143
142 /*
143 * Check if there's xattrs in the filesystem. These are not
144 * supported in this version, so warn that they will be ignored.
145 */
146 if (le64_to_cpu(sblk->xattr_table_start) != SQUASHFS_INVALID_BLK)
147 ERROR("Xattrs in filesystem, these will be ignored\n");
148
149 /* Check the filesystem does not extend beyond the end of the 144 /* Check the filesystem does not extend beyond the end of the
150 block device */ 145 block device */
151 msblk->bytes_used = le64_to_cpu(sblk->bytes_used); 146 msblk->bytes_used = le64_to_cpu(sblk->bytes_used);
@@ -253,7 +248,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
253allocate_lookup_table: 248allocate_lookup_table:
254 lookup_table_start = le64_to_cpu(sblk->lookup_table_start); 249 lookup_table_start = le64_to_cpu(sblk->lookup_table_start);
255 if (lookup_table_start == SQUASHFS_INVALID_BLK) 250 if (lookup_table_start == SQUASHFS_INVALID_BLK)
256 goto allocate_root; 251 goto allocate_xattr_table;
257 252
258 /* Allocate and read inode lookup table */ 253 /* Allocate and read inode lookup table */
259 msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb, 254 msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb,
@@ -266,6 +261,21 @@ allocate_lookup_table:
266 261
267 sb->s_export_op = &squashfs_export_ops; 262 sb->s_export_op = &squashfs_export_ops;
268 263
264allocate_xattr_table:
265 sb->s_xattr = squashfs_xattr_handlers;
266 xattr_id_table_start = le64_to_cpu(sblk->xattr_id_table_start);
267 if (xattr_id_table_start == SQUASHFS_INVALID_BLK)
268 goto allocate_root;
269
270 /* Allocate and read xattr id lookup table */
271 msblk->xattr_id_table = squashfs_read_xattr_id_table(sb,
272 xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids);
273 if (IS_ERR(msblk->xattr_id_table)) {
274 err = PTR_ERR(msblk->xattr_id_table);
275 msblk->xattr_id_table = NULL;
276 if (err != -ENOTSUPP)
277 goto failed_mount;
278 }
269allocate_root: 279allocate_root:
270 root = new_inode(sb); 280 root = new_inode(sb);
271 if (!root) { 281 if (!root) {
@@ -301,6 +311,7 @@ failed_mount:
301 kfree(msblk->inode_lookup_table); 311 kfree(msblk->inode_lookup_table);
302 kfree(msblk->fragment_index); 312 kfree(msblk->fragment_index);
303 kfree(msblk->id_table); 313 kfree(msblk->id_table);
314 kfree(msblk->xattr_id_table);
304 kfree(sb->s_fs_info); 315 kfree(sb->s_fs_info);
305 sb->s_fs_info = NULL; 316 sb->s_fs_info = NULL;
306 kfree(sblk); 317 kfree(sblk);
@@ -355,6 +366,7 @@ static void squashfs_put_super(struct super_block *sb)
355 kfree(sbi->fragment_index); 366 kfree(sbi->fragment_index);
356 kfree(sbi->meta_index); 367 kfree(sbi->meta_index);
357 kfree(sbi->inode_lookup_table); 368 kfree(sbi->inode_lookup_table);
369 kfree(sbi->xattr_id_table);
358 kfree(sb->s_fs_info); 370 kfree(sb->s_fs_info);
359 sb->s_fs_info = NULL; 371 sb->s_fs_info = NULL;
360 } 372 }
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index 32b911f4ee39..ec86434921e1 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -35,11 +35,13 @@
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/string.h> 36#include <linux/string.h>
37#include <linux/pagemap.h> 37#include <linux/pagemap.h>
38#include <linux/xattr.h>
38 39
39#include "squashfs_fs.h" 40#include "squashfs_fs.h"
40#include "squashfs_fs_sb.h" 41#include "squashfs_fs_sb.h"
41#include "squashfs_fs_i.h" 42#include "squashfs_fs_i.h"
42#include "squashfs.h" 43#include "squashfs.h"
44#include "xattr.h"
43 45
44static int squashfs_symlink_readpage(struct file *file, struct page *page) 46static int squashfs_symlink_readpage(struct file *file, struct page *page)
45{ 47{
@@ -114,3 +116,12 @@ error_out:
114const struct address_space_operations squashfs_symlink_aops = { 116const struct address_space_operations squashfs_symlink_aops = {
115 .readpage = squashfs_symlink_readpage 117 .readpage = squashfs_symlink_readpage
116}; 118};
119
120const struct inode_operations squashfs_symlink_inode_ops = {
121 .readlink = generic_readlink,
122 .follow_link = page_follow_link_light,
123 .put_link = page_put_link,
124 .getxattr = generic_getxattr,
125 .listxattr = squashfs_listxattr
126};
127
diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
new file mode 100644
index 000000000000..c7655e8b31cd
--- /dev/null
+++ b/fs/squashfs/xattr.c
@@ -0,0 +1,323 @@
1/*
2 * Squashfs - a compressed read only filesystem for Linux
3 *
4 * Copyright (c) 2010
5 * Phillip Lougher <phillip@lougher.demon.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 *
21 * xattr_id.c
22 */
23
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/string.h>
27#include <linux/fs.h>
28#include <linux/vfs.h>
29#include <linux/xattr.h>
30#include <linux/slab.h>
31
32#include "squashfs_fs.h"
33#include "squashfs_fs_sb.h"
34#include "squashfs_fs_i.h"
35#include "squashfs.h"
36
37static const struct xattr_handler *squashfs_xattr_handler(int);
38
39ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
40 size_t buffer_size)
41{
42 struct inode *inode = d->d_inode;
43 struct super_block *sb = inode->i_sb;
44 struct squashfs_sb_info *msblk = sb->s_fs_info;
45 u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr)
46 + msblk->xattr_table;
47 int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr);
48 int count = squashfs_i(inode)->xattr_count;
49 size_t rest = buffer_size;
50 int err;
51
52 /* check that the file system has xattrs */
53 if (msblk->xattr_id_table == NULL)
54 return -EOPNOTSUPP;
55
56 /* loop reading each xattr name */
57 while (count--) {
58 struct squashfs_xattr_entry entry;
59 struct squashfs_xattr_val val;
60 const struct xattr_handler *handler;
61 int name_size, prefix_size = 0;
62
63 err = squashfs_read_metadata(sb, &entry, &start, &offset,
64 sizeof(entry));
65 if (err < 0)
66 goto failed;
67
68 name_size = le16_to_cpu(entry.size);
69 handler = squashfs_xattr_handler(le16_to_cpu(entry.type));
70 if (handler)
71 prefix_size = handler->list(d, buffer, rest, NULL,
72 name_size, handler->flags);
73 if (prefix_size) {
74 if (buffer) {
75 if (prefix_size + name_size + 1 > rest) {
76 err = -ERANGE;
77 goto failed;
78 }
79 buffer += prefix_size;
80 }
81 err = squashfs_read_metadata(sb, buffer, &start,
82 &offset, name_size);
83 if (err < 0)
84 goto failed;
85 if (buffer) {
86 buffer[name_size] = '\0';
87 buffer += name_size + 1;
88 }
89 rest -= prefix_size + name_size + 1;
90 } else {
91 /* no handler or insuffficient privileges, so skip */
92 err = squashfs_read_metadata(sb, NULL, &start,
93 &offset, name_size);
94 if (err < 0)
95 goto failed;
96 }
97
98
99 /* skip remaining xattr entry */
100 err = squashfs_read_metadata(sb, &val, &start, &offset,
101 sizeof(val));
102 if (err < 0)
103 goto failed;
104
105 err = squashfs_read_metadata(sb, NULL, &start, &offset,
106 le32_to_cpu(val.vsize));
107 if (err < 0)
108 goto failed;
109 }
110 err = buffer_size - rest;
111
112failed:
113 return err;
114}
115
116
117static int squashfs_xattr_get(struct inode *inode, int name_index,
118 const char *name, void *buffer, size_t buffer_size)
119{
120 struct super_block *sb = inode->i_sb;
121 struct squashfs_sb_info *msblk = sb->s_fs_info;
122 u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr)
123 + msblk->xattr_table;
124 int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr);
125 int count = squashfs_i(inode)->xattr_count;
126 int name_len = strlen(name);
127 int err, vsize;
128 char *target = kmalloc(name_len, GFP_KERNEL);
129
130 if (target == NULL)
131 return -ENOMEM;
132
133 /* loop reading each xattr name */
134 for (; count; count--) {
135 struct squashfs_xattr_entry entry;
136 struct squashfs_xattr_val val;
137 int type, prefix, name_size;
138
139 err = squashfs_read_metadata(sb, &entry, &start, &offset,
140 sizeof(entry));
141 if (err < 0)
142 goto failed;
143
144 name_size = le16_to_cpu(entry.size);
145 type = le16_to_cpu(entry.type);
146 prefix = type & SQUASHFS_XATTR_PREFIX_MASK;
147
148 if (prefix == name_index && name_size == name_len)
149 err = squashfs_read_metadata(sb, target, &start,
150 &offset, name_size);
151 else
152 err = squashfs_read_metadata(sb, NULL, &start,
153 &offset, name_size);
154 if (err < 0)
155 goto failed;
156
157 if (prefix == name_index && name_size == name_len &&
158 strncmp(target, name, name_size) == 0) {
159 /* found xattr */
160 if (type & SQUASHFS_XATTR_VALUE_OOL) {
161 __le64 xattr;
162 /* val is a reference to the real location */
163 err = squashfs_read_metadata(sb, &val, &start,
164 &offset, sizeof(val));
165 if (err < 0)
166 goto failed;
167 err = squashfs_read_metadata(sb, &xattr, &start,
168 &offset, sizeof(xattr));
169 if (err < 0)
170 goto failed;
171 xattr = le64_to_cpu(xattr);
172 start = SQUASHFS_XATTR_BLK(xattr) +
173 msblk->xattr_table;
174 offset = SQUASHFS_XATTR_OFFSET(xattr);
175 }
176 /* read xattr value */
177 err = squashfs_read_metadata(sb, &val, &start, &offset,
178 sizeof(val));
179 if (err < 0)
180 goto failed;
181
182 vsize = le32_to_cpu(val.vsize);
183 if (buffer) {
184 if (vsize > buffer_size) {
185 err = -ERANGE;
186 goto failed;
187 }
188 err = squashfs_read_metadata(sb, buffer, &start,
189 &offset, vsize);
190 if (err < 0)
191 goto failed;
192 }
193 break;
194 }
195
196 /* no match, skip remaining xattr entry */
197 err = squashfs_read_metadata(sb, &val, &start, &offset,
198 sizeof(val));
199 if (err < 0)
200 goto failed;
201 err = squashfs_read_metadata(sb, NULL, &start, &offset,
202 le32_to_cpu(val.vsize));
203 if (err < 0)
204 goto failed;
205 }
206 err = count ? vsize : -ENODATA;
207
208failed:
209 kfree(target);
210 return err;
211}
212
213
214/*
215 * User namespace support
216 */
217static size_t squashfs_user_list(struct dentry *d, char *list, size_t list_size,
218 const char *name, size_t name_len, int type)
219{
220 if (list && XATTR_USER_PREFIX_LEN <= list_size)
221 memcpy(list, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
222 return XATTR_USER_PREFIX_LEN;
223}
224
225static int squashfs_user_get(struct dentry *d, const char *name, void *buffer,
226 size_t size, int type)
227{
228 if (name[0] == '\0')
229 return -EINVAL;
230
231 return squashfs_xattr_get(d->d_inode, SQUASHFS_XATTR_USER, name,
232 buffer, size);
233}
234
235static const struct xattr_handler squashfs_xattr_user_handler = {
236 .prefix = XATTR_USER_PREFIX,
237 .list = squashfs_user_list,
238 .get = squashfs_user_get
239};
240
241/*
242 * Trusted namespace support
243 */
244static size_t squashfs_trusted_list(struct dentry *d, char *list,
245 size_t list_size, const char *name, size_t name_len, int type)
246{
247 if (!capable(CAP_SYS_ADMIN))
248 return 0;
249
250 if (list && XATTR_TRUSTED_PREFIX_LEN <= list_size)
251 memcpy(list, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
252 return XATTR_TRUSTED_PREFIX_LEN;
253}
254
255static int squashfs_trusted_get(struct dentry *d, const char *name,
256 void *buffer, size_t size, int type)
257{
258 if (name[0] == '\0')
259 return -EINVAL;
260
261 return squashfs_xattr_get(d->d_inode, SQUASHFS_XATTR_TRUSTED, name,
262 buffer, size);
263}
264
265static const struct xattr_handler squashfs_xattr_trusted_handler = {
266 .prefix = XATTR_TRUSTED_PREFIX,
267 .list = squashfs_trusted_list,
268 .get = squashfs_trusted_get
269};
270
271/*
272 * Security namespace support
273 */
274static size_t squashfs_security_list(struct dentry *d, char *list,
275 size_t list_size, const char *name, size_t name_len, int type)
276{
277 if (list && XATTR_SECURITY_PREFIX_LEN <= list_size)
278 memcpy(list, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN);
279 return XATTR_SECURITY_PREFIX_LEN;
280}
281
282static int squashfs_security_get(struct dentry *d, const char *name,
283 void *buffer, size_t size, int type)
284{
285 if (name[0] == '\0')
286 return -EINVAL;
287
288 return squashfs_xattr_get(d->d_inode, SQUASHFS_XATTR_SECURITY, name,
289 buffer, size);
290}
291
292static const struct xattr_handler squashfs_xattr_security_handler = {
293 .prefix = XATTR_SECURITY_PREFIX,
294 .list = squashfs_security_list,
295 .get = squashfs_security_get
296};
297
298static inline const struct xattr_handler *squashfs_xattr_handler(int type)
299{
300 if (type & ~(SQUASHFS_XATTR_PREFIX_MASK | SQUASHFS_XATTR_VALUE_OOL))
301 /* ignore unrecognised type */
302 return NULL;
303
304 switch (type & SQUASHFS_XATTR_PREFIX_MASK) {
305 case SQUASHFS_XATTR_USER:
306 return &squashfs_xattr_user_handler;
307 case SQUASHFS_XATTR_TRUSTED:
308 return &squashfs_xattr_trusted_handler;
309 case SQUASHFS_XATTR_SECURITY:
310 return &squashfs_xattr_security_handler;
311 default:
312 /* ignore unrecognised type */
313 return NULL;
314 }
315}
316
317const struct xattr_handler *squashfs_xattr_handlers[] = {
318 &squashfs_xattr_user_handler,
319 &squashfs_xattr_trusted_handler,
320 &squashfs_xattr_security_handler,
321 NULL
322};
323
diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h
new file mode 100644
index 000000000000..9da071ae181c
--- /dev/null
+++ b/fs/squashfs/xattr.h
@@ -0,0 +1,46 @@
1/*
2 * Squashfs - a compressed read only filesystem for Linux
3 *
4 * Copyright (c) 2010
5 * Phillip Lougher <phillip@lougher.demon.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 *
21 * xattr.h
22 */
23
24#ifdef CONFIG_SQUASHFS_XATTRS
25extern __le64 *squashfs_read_xattr_id_table(struct super_block *, u64,
26 u64 *, int *);
27extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
28 int *, unsigned long long *);
29#else
30static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
31 u64 start, u64 *xattr_table_start, int *xattr_ids)
32{
33 ERROR("Xattrs in filesystem, these will be ignored\n");
34 return ERR_PTR(-ENOTSUPP);
35}
36
37static inline int squashfs_xattr_lookup(struct super_block *sb,
38 unsigned int index, int *count, int *size,
39 unsigned long long *xattr)
40{
41 return 0;
42}
43#define squashfs_listxattr NULL
44#define generic_getxattr NULL
45#define squashfs_xattr_handlers NULL
46#endif
diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
new file mode 100644
index 000000000000..cfb41106098f
--- /dev/null
+++ b/fs/squashfs/xattr_id.c
@@ -0,0 +1,100 @@
1/*
2 * Squashfs - a compressed read only filesystem for Linux
3 *
4 * Copyright (c) 2010
5 * Phillip Lougher <phillip@lougher.demon.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 *
21 * xattr_id.c
22 */
23
24/*
25 * This file implements code to map the 32-bit xattr id stored in the inode
26 * into the on disk location of the xattr data.
27 */
28
29#include <linux/fs.h>
30#include <linux/vfs.h>
31#include <linux/slab.h>
32
33#include "squashfs_fs.h"
34#include "squashfs_fs_sb.h"
35#include "squashfs_fs_i.h"
36#include "squashfs.h"
37
38/*
39 * Map xattr id using the xattr id look up table
40 */
41int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
42 int *count, unsigned int *size, unsigned long long *xattr)
43{
44 struct squashfs_sb_info *msblk = sb->s_fs_info;
45 int block = SQUASHFS_XATTR_BLOCK(index);
46 int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index);
47 u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]);
48 struct squashfs_xattr_id id;
49 int err;
50
51 err = squashfs_read_metadata(sb, &id, &start_block, &offset,
52 sizeof(id));
53 if (err < 0)
54 return err;
55
56 *xattr = le64_to_cpu(id.xattr);
57 *size = le32_to_cpu(id.size);
58 *count = le32_to_cpu(id.count);
59 return 0;
60}
61
62
63/*
64 * Read uncompressed xattr id lookup table indexes from disk into memory
65 */
66__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
67 u64 *xattr_table_start, int *xattr_ids)
68{
69 unsigned int len;
70 __le64 *xid_table;
71 struct squashfs_xattr_id_table id_table;
72 int err;
73
74 err = squashfs_read_table(sb, &id_table, start, sizeof(id_table));
75 if (err < 0) {
76 ERROR("unable to read xattr id table\n");
77 return ERR_PTR(err);
78 }
79 *xattr_table_start = le64_to_cpu(id_table.xattr_table_start);
80 *xattr_ids = le32_to_cpu(id_table.xattr_ids);
81 len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
82
83 TRACE("In read_xattr_index_table, length %d\n", len);
84
85 /* Allocate xattr id lookup table indexes */
86 xid_table = kmalloc(len, GFP_KERNEL);
87 if (xid_table == NULL) {
88 ERROR("Failed to allocate xattr id index table\n");
89 return ERR_PTR(-ENOMEM);
90 }
91
92 err = squashfs_read_table(sb, xid_table, start + sizeof(id_table), len);
93 if (err < 0) {
94 ERROR("unable to read xattr id index table\n");
95 kfree(xid_table);
96 return ERR_PTR(err);
97 }
98
99 return xid_table;
100}
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 3a84455c2a77..1660c81ffa3d 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -207,6 +207,7 @@ static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
207 207
208/* readdir and lookup functions */ 208/* readdir and lookup functions */
209const struct file_operations udf_dir_operations = { 209const struct file_operations udf_dir_operations = {
210 .llseek = generic_file_llseek,
210 .read = generic_read_dir, 211 .read = generic_read_dir,
211 .readdir = udf_readdir, 212 .readdir = udf_readdir,
212 .unlocked_ioctl = udf_ioctl, 213 .unlocked_ioctl = udf_ioctl,
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 14743d935a93..ad9bc1ebd3a6 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -918,6 +918,7 @@ again:
918 sbi->s_bytesex = BYTESEX_LE; 918 sbi->s_bytesex = BYTESEX_LE;
919 switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) { 919 switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
920 case UFS_MAGIC: 920 case UFS_MAGIC:
921 case UFS_MAGIC_BW:
921 case UFS2_MAGIC: 922 case UFS2_MAGIC:
922 case UFS_MAGIC_LFN: 923 case UFS_MAGIC_LFN:
923 case UFS_MAGIC_FEA: 924 case UFS_MAGIC_FEA:
@@ -927,6 +928,7 @@ again:
927 sbi->s_bytesex = BYTESEX_BE; 928 sbi->s_bytesex = BYTESEX_BE;
928 switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) { 929 switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
929 case UFS_MAGIC: 930 case UFS_MAGIC:
931 case UFS_MAGIC_BW:
930 case UFS2_MAGIC: 932 case UFS2_MAGIC:
931 case UFS_MAGIC_LFN: 933 case UFS_MAGIC_LFN:
932 case UFS_MAGIC_FEA: 934 case UFS_MAGIC_FEA:
diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h
index 6943ec677c0b..8aba544f9fad 100644
--- a/fs/ufs/ufs_fs.h
+++ b/fs/ufs/ufs_fs.h
@@ -48,6 +48,7 @@ typedef __u16 __bitwise __fs16;
48#define UFS_SECTOR_SIZE 512 48#define UFS_SECTOR_SIZE 512
49#define UFS_SECTOR_BITS 9 49#define UFS_SECTOR_BITS 9
50#define UFS_MAGIC 0x00011954 50#define UFS_MAGIC 0x00011954
51#define UFS_MAGIC_BW 0x0f242697
51#define UFS2_MAGIC 0x19540119 52#define UFS2_MAGIC 0x19540119
52#define UFS_CIGAM 0x54190100 /* byteswapped MAGIC */ 53#define UFS_CIGAM 0x54190100 /* byteswapped MAGIC */
53 54
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 69206957b72c..0c80bb38773f 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -123,15 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
123 size_t size, 123 size_t size,
124 enum dma_data_direction dir) 124 enum dma_data_direction dir)
125{ 125{
126 struct dma_map_ops *ops = get_dma_ops(dev); 126 dma_sync_single_for_cpu(dev, addr + offset, size, dir);
127
128 BUG_ON(!valid_dma_direction(dir));
129 if (ops->sync_single_range_for_cpu) {
130 ops->sync_single_range_for_cpu(dev, addr, offset, size, dir);
131 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
132
133 } else
134 dma_sync_single_for_cpu(dev, addr + offset, size, dir);
135} 127}
136 128
137static inline void dma_sync_single_range_for_device(struct device *dev, 129static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -140,15 +132,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
140 size_t size, 132 size_t size,
141 enum dma_data_direction dir) 133 enum dma_data_direction dir)
142{ 134{
143 struct dma_map_ops *ops = get_dma_ops(dev); 135 dma_sync_single_for_device(dev, addr + offset, size, dir);
144
145 BUG_ON(!valid_dma_direction(dir));
146 if (ops->sync_single_range_for_device) {
147 ops->sync_single_range_for_device(dev, addr, offset, size, dir);
148 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
149
150 } else
151 dma_sync_single_for_device(dev, addr + offset, size, dir);
152} 136}
153 137
154static inline void 138static inline void
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 979c6a57f2f1..4f3d75e1ad39 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -60,7 +60,9 @@ struct module;
60 * @names: if set, must be an array of strings to use as alternative 60 * @names: if set, must be an array of strings to use as alternative
61 * names for the GPIOs in this chip. Any entry in the array 61 * names for the GPIOs in this chip. Any entry in the array
62 * may be NULL if there is no alias for the GPIO, however the 62 * may be NULL if there is no alias for the GPIO, however the
63 * array must be @ngpio entries long. 63 * array must be @ngpio entries long. A name can include a single printk
64 * format specifier for an unsigned int. It is substituted by the actual
65 * number of the gpio.
64 * 66 *
65 * A gpio_chip can help platforms abstract various sources of GPIOs so 67 * A gpio_chip can help platforms abstract various sources of GPIOs so
66 * they can all be accessed through a common programing interface. 68 * they can all be accessed through a common programing interface.
@@ -88,6 +90,9 @@ struct gpio_chip {
88 unsigned offset); 90 unsigned offset);
89 int (*direction_output)(struct gpio_chip *chip, 91 int (*direction_output)(struct gpio_chip *chip,
90 unsigned offset, int value); 92 unsigned offset, int value);
93 int (*set_debounce)(struct gpio_chip *chip,
94 unsigned offset, unsigned debounce);
95
91 void (*set)(struct gpio_chip *chip, 96 void (*set)(struct gpio_chip *chip,
92 unsigned offset, int value); 97 unsigned offset, int value);
93 98
@@ -98,7 +103,7 @@ struct gpio_chip {
98 struct gpio_chip *chip); 103 struct gpio_chip *chip);
99 int base; 104 int base;
100 u16 ngpio; 105 u16 ngpio;
101 char **names; 106 const char *const *names;
102 unsigned can_sleep:1; 107 unsigned can_sleep:1;
103 unsigned exported:1; 108 unsigned exported:1;
104}; 109};
@@ -121,6 +126,8 @@ extern void gpio_free(unsigned gpio);
121extern int gpio_direction_input(unsigned gpio); 126extern int gpio_direction_input(unsigned gpio);
122extern int gpio_direction_output(unsigned gpio, int value); 127extern int gpio_direction_output(unsigned gpio, int value);
123 128
129extern int gpio_set_debounce(unsigned gpio, unsigned debounce);
130
124extern int gpio_get_value_cansleep(unsigned gpio); 131extern int gpio_get_value_cansleep(unsigned gpio);
125extern void gpio_set_value_cansleep(unsigned gpio, int value); 132extern void gpio_set_value_cansleep(unsigned gpio, int value);
126 133
diff --git a/include/asm-generic/scatterlist.h b/include/asm-generic/scatterlist.h
index 8b9454496a7c..5de07355fad4 100644
--- a/include/asm-generic/scatterlist.h
+++ b/include/asm-generic/scatterlist.h
@@ -11,7 +11,9 @@ struct scatterlist {
11 unsigned int offset; 11 unsigned int offset;
12 unsigned int length; 12 unsigned int length;
13 dma_addr_t dma_address; 13 dma_addr_t dma_address;
14#ifdef CONFIG_NEED_SG_DMA_LENGTH
14 unsigned int dma_length; 15 unsigned int dma_length;
16#endif
15}; 17};
16 18
17/* 19/*
@@ -22,22 +24,11 @@ struct scatterlist {
22 * is 0. 24 * is 0.
23 */ 25 */
24#define sg_dma_address(sg) ((sg)->dma_address) 26#define sg_dma_address(sg) ((sg)->dma_address)
25#ifndef sg_dma_len 27
26/* 28#ifdef CONFIG_NEED_SG_DMA_LENGTH
27 * Normally, you have an iommu on 64 bit machines, but not on 32 bit
28 * machines. Architectures that are differnt should override this.
29 */
30#if __BITS_PER_LONG == 64
31#define sg_dma_len(sg) ((sg)->dma_length) 29#define sg_dma_len(sg) ((sg)->dma_length)
32#else 30#else
33#define sg_dma_len(sg) ((sg)->length) 31#define sg_dma_len(sg) ((sg)->length)
34#endif /* 64 bit */
35#endif /* sg_dma_len */
36
37#ifndef ISA_DMA_THRESHOLD
38#define ISA_DMA_THRESHOLD (~0UL)
39#endif 32#endif
40 33
41#define ARCH_HAS_SG_CHAIN
42
43#endif /* __ASM_GENERIC_SCATTERLIST_H */ 34#endif /* __ASM_GENERIC_SCATTERLIST_H */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 510df36dd5d4..fd60700503c8 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -34,6 +34,9 @@
34#ifndef cpu_to_node 34#ifndef cpu_to_node
35#define cpu_to_node(cpu) ((void)(cpu),0) 35#define cpu_to_node(cpu) ((void)(cpu),0)
36#endif 36#endif
37#ifndef cpu_to_mem
38#define cpu_to_mem(cpu) ((void)(cpu),0)
39#endif
37#ifndef parent_node 40#ifndef parent_node
38#define parent_node(node) ((void)(node),0) 41#define parent_node(node) ((void)(node),0)
39#endif 42#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 67e652068e0e..ef779c6fc3d7 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -247,10 +247,10 @@
247 } \ 247 } \
248 \ 248 \
249 /* RapidIO route ops */ \ 249 /* RapidIO route ops */ \
250 .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \ 250 .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \
251 VMLINUX_SYMBOL(__start_rio_route_ops) = .; \ 251 VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \
252 *(.rio_route_ops) \ 252 *(.rio_switch_ops) \
253 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ 253 VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \
254 } \ 254 } \
255 \ 255 \
256 TRACEDATA \ 256 TRACEDATA \
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 811dbb369379..7a8db4155281 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -212,6 +212,8 @@ extern void kick_iocb(struct kiocb *iocb);
212extern int aio_complete(struct kiocb *iocb, long res, long res2); 212extern int aio_complete(struct kiocb *iocb, long res, long res2);
213struct mm_struct; 213struct mm_struct;
214extern void exit_aio(struct mm_struct *mm); 214extern void exit_aio(struct mm_struct *mm);
215extern long do_io_submit(aio_context_t ctx_id, long nr,
216 struct iocb __user *__user *iocbpp, bool compat);
215#else 217#else
216static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; } 218static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
217static inline int aio_put_req(struct kiocb *iocb) { return 0; } 219static inline int aio_put_req(struct kiocb *iocb) { return 0; }
@@ -219,6 +221,9 @@ static inline void kick_iocb(struct kiocb *iocb) { }
219static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; } 221static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; }
220struct mm_struct; 222struct mm_struct;
221static inline void exit_aio(struct mm_struct *mm) { } 223static inline void exit_aio(struct mm_struct *mm) { }
224static inline long do_io_submit(aio_context_t ctx_id, long nr,
225 struct iocb __user * __user *iocbpp,
226 bool compat) { return 0; }
222#endif /* CONFIG_AIO */ 227#endif /* CONFIG_AIO */
223 228
224static inline struct kiocb *list_kiocb(struct list_head *h) 229static inline struct kiocb *list_kiocb(struct list_head *h)
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index daf8c480c786..6fb2720882fc 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -141,6 +141,7 @@ extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
141extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); 141extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
142extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); 142extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
143extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); 143extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
144extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
144 145
145#define BITMAP_LAST_WORD_MASK(nbits) \ 146#define BITMAP_LAST_WORD_MASK(nbits) \
146( \ 147( \
diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h
index d53a67dff018..3c80fd7e8b56 100644
--- a/include/linux/byteorder/big_endian.h
+++ b/include/linux/byteorder/big_endian.h
@@ -7,9 +7,6 @@
7#ifndef __BIG_ENDIAN_BITFIELD 7#ifndef __BIG_ENDIAN_BITFIELD
8#define __BIG_ENDIAN_BITFIELD 8#define __BIG_ENDIAN_BITFIELD
9#endif 9#endif
10#ifndef __BYTE_ORDER
11#define __BYTE_ORDER __BIG_ENDIAN
12#endif
13 10
14#include <linux/types.h> 11#include <linux/types.h>
15#include <linux/swab.h> 12#include <linux/swab.h>
diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
index f7f8ad13adb6..83195fb82962 100644
--- a/include/linux/byteorder/little_endian.h
+++ b/include/linux/byteorder/little_endian.h
@@ -7,9 +7,6 @@
7#ifndef __LITTLE_ENDIAN_BITFIELD 7#ifndef __LITTLE_ENDIAN_BITFIELD
8#define __LITTLE_ENDIAN_BITFIELD 8#define __LITTLE_ENDIAN_BITFIELD
9#endif 9#endif
10#ifndef __BYTE_ORDER
11#define __BYTE_ORDER __LITTLE_ENDIAN
12#endif
13 10
14#include <linux/types.h> 11#include <linux/types.h>
15#include <linux/swab.h> 12#include <linux/swab.h>
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 8f78073d7caa..0c621604baa1 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -397,7 +397,7 @@ struct cftype {
397 * This callback must be implemented, if you want provide 397 * This callback must be implemented, if you want provide
398 * notification functionality. 398 * notification functionality.
399 */ 399 */
400 int (*unregister_event)(struct cgroup *cgrp, struct cftype *cft, 400 void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft,
401 struct eventfd_ctx *eventfd); 401 struct eventfd_ctx *eventfd);
402}; 402};
403 403
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 717c691ecd8e..168f7daa7bde 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -356,5 +356,9 @@ asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
356asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename, 356asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
357 int flags, int mode); 357 int flags, int mode);
358 358
359extern ssize_t compat_rw_copy_check_uvector(int type,
360 const struct compat_iovec __user *uvector, unsigned long nr_segs,
361 unsigned long fast_segs, struct iovec *fast_pointer,
362 struct iovec **ret_pointer);
359#endif /* CONFIG_COMPAT */ 363#endif /* CONFIG_COMPAT */
360#endif /* _LINUX_COMPAT_H */ 364#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 20b51cab6593..457ed765a116 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -69,6 +69,7 @@ extern void cpuset_task_status_allowed(struct seq_file *m,
69 struct task_struct *task); 69 struct task_struct *task);
70 70
71extern int cpuset_mem_spread_node(void); 71extern int cpuset_mem_spread_node(void);
72extern int cpuset_slab_spread_node(void);
72 73
73static inline int cpuset_do_page_mem_spread(void) 74static inline int cpuset_do_page_mem_spread(void)
74{ 75{
@@ -194,6 +195,11 @@ static inline int cpuset_mem_spread_node(void)
194 return 0; 195 return 0;
195} 196}
196 197
198static inline int cpuset_slab_spread_node(void)
199{
200 return 0;
201}
202
197static inline int cpuset_do_page_mem_spread(void) 203static inline int cpuset_do_page_mem_spread(void)
198{ 204{
199 return 0; 205 return 0;
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 52507c3e1387..75c0fa881308 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -156,7 +156,6 @@ extern int copy_creds(struct task_struct *, unsigned long);
156extern struct cred *cred_alloc_blank(void); 156extern struct cred *cred_alloc_blank(void);
157extern struct cred *prepare_creds(void); 157extern struct cred *prepare_creds(void);
158extern struct cred *prepare_exec_creds(void); 158extern struct cred *prepare_exec_creds(void);
159extern struct cred *prepare_usermodehelper_creds(void);
160extern int commit_creds(struct cred *); 159extern int commit_creds(struct cred *);
161extern void abort_creds(struct cred *); 160extern void abort_creds(struct cred *);
162extern const struct cred *override_creds(const struct cred *); 161extern const struct cred *override_creds(const struct cred *);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index ca32ed78b057..89b7e1a605b8 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -40,16 +40,6 @@ struct dma_map_ops {
40 void (*sync_single_for_device)(struct device *dev, 40 void (*sync_single_for_device)(struct device *dev,
41 dma_addr_t dma_handle, size_t size, 41 dma_addr_t dma_handle, size_t size,
42 enum dma_data_direction dir); 42 enum dma_data_direction dir);
43 void (*sync_single_range_for_cpu)(struct device *dev,
44 dma_addr_t dma_handle,
45 unsigned long offset,
46 size_t size,
47 enum dma_data_direction dir);
48 void (*sync_single_range_for_device)(struct device *dev,
49 dma_addr_t dma_handle,
50 unsigned long offset,
51 size_t size,
52 enum dma_data_direction dir);
53 void (*sync_sg_for_cpu)(struct device *dev, 43 void (*sync_sg_for_cpu)(struct device *dev,
54 struct scatterlist *sg, int nents, 44 struct scatterlist *sg, int nents,
55 enum dma_data_direction dir); 45 enum dma_data_direction dir);
@@ -105,21 +95,6 @@ static inline int is_device_dma_capable(struct device *dev)
105#include <asm-generic/dma-mapping-broken.h> 95#include <asm-generic/dma-mapping-broken.h>
106#endif 96#endif
107 97
108/* for backwards compatibility, removed soon */
109static inline void __deprecated dma_sync_single(struct device *dev,
110 dma_addr_t addr, size_t size,
111 enum dma_data_direction dir)
112{
113 dma_sync_single_for_cpu(dev, addr, size, dir);
114}
115
116static inline void __deprecated dma_sync_sg(struct device *dev,
117 struct scatterlist *sg, int nelems,
118 enum dma_data_direction dir)
119{
120 dma_sync_sg_for_cpu(dev, sg, nelems, dir);
121}
122
123static inline u64 dma_get_mask(struct device *dev) 98static inline u64 dma_get_mask(struct device *dev)
124{ 99{
125 if (dev && dev->dma_mask && *dev->dma_mask) 100 if (dev && dev->dma_mask && *dev->dma_mask)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b336cb9ca9a0..9682d52d1507 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2228,6 +2228,7 @@ extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
2228 2228
2229extern void 2229extern void
2230file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); 2230file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
2231extern loff_t noop_llseek(struct file *file, loff_t offset, int origin);
2231extern loff_t no_llseek(struct file *file, loff_t offset, int origin); 2232extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
2232extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin); 2233extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
2233extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset, 2234extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset,
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 4e949a5b5b85..03f616b78cfa 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -51,6 +51,11 @@ static inline int gpio_direction_output(unsigned gpio, int value)
51 return -ENOSYS; 51 return -ENOSYS;
52} 52}
53 53
54static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
55{
56 return -ENOSYS;
57}
58
54static inline int gpio_get_value(unsigned gpio) 59static inline int gpio_get_value(unsigned gpio)
55{ 60{
56 /* GPIO can never have been requested or set as {in,out}put */ 61 /* GPIO can never have been requested or set as {in,out}put */
diff --git a/include/linux/i2c/max732x.h b/include/linux/i2c/max732x.h
index e10336631c62..c04bac8bf2fe 100644
--- a/include/linux/i2c/max732x.h
+++ b/include/linux/i2c/max732x.h
@@ -7,6 +7,9 @@ struct max732x_platform_data {
7 /* number of the first GPIO */ 7 /* number of the first GPIO */
8 unsigned gpio_base; 8 unsigned gpio_base;
9 9
10 /* interrupt base */
11 int irq_base;
12
10 void *context; /* param to setup/teardown */ 13 void *context; /* param to setup/teardown */
11 14
12 int (*setup)(struct i2c_client *client, 15 int (*setup)(struct i2c_client *client,
diff --git a/include/linux/i2c/pca953x.h b/include/linux/i2c/pca953x.h
index d5c5a60c8a0b..139ba52667c8 100644
--- a/include/linux/i2c/pca953x.h
+++ b/include/linux/i2c/pca953x.h
@@ -24,7 +24,7 @@ struct pca953x_platform_data {
24 int (*teardown)(struct i2c_client *client, 24 int (*teardown)(struct i2c_client *client,
25 unsigned gpio, unsigned ngpio, 25 unsigned gpio, unsigned ngpio,
26 void *context); 26 void *context);
27 char **names; 27 const char *const *names;
28}; 28};
29 29
30#endif /* _LINUX_PCA953X_H */ 30#endif /* _LINUX_PCA953X_H */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 7996fc2c9ba9..2beaa13492be 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -16,7 +16,7 @@ extern struct files_struct init_files;
16extern struct fs_struct init_fs; 16extern struct fs_struct init_fs;
17 17
18#define INIT_SIGNALS(sig) { \ 18#define INIT_SIGNALS(sig) { \
19 .count = ATOMIC_INIT(1), \ 19 .nr_threads = 1, \
20 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ 20 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
21 .shared_pending = { \ 21 .shared_pending = { \
22 .list = LIST_HEAD_INIT(sig.shared_pending.list), \ 22 .list = LIST_HEAD_INIT(sig.shared_pending.list), \
@@ -35,7 +35,7 @@ extern struct nsproxy init_nsproxy;
35 35
36#define INIT_SIGHAND(sighand) { \ 36#define INIT_SIGHAND(sighand) { \
37 .count = ATOMIC_INIT(1), \ 37 .count = ATOMIC_INIT(1), \
38 .action = { { { .sa_handler = NULL, } }, }, \ 38 .action = { { { .sa_handler = SIG_DFL, } }, }, \
39 .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ 39 .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
40 .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \ 40 .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
41} 41}
@@ -45,9 +45,9 @@ extern struct group_info init_groups;
45#define INIT_STRUCT_PID { \ 45#define INIT_STRUCT_PID { \
46 .count = ATOMIC_INIT(1), \ 46 .count = ATOMIC_INIT(1), \
47 .tasks = { \ 47 .tasks = { \
48 { .first = &init_task.pids[PIDTYPE_PID].node }, \ 48 { .first = NULL }, \
49 { .first = &init_task.pids[PIDTYPE_PGID].node }, \ 49 { .first = NULL }, \
50 { .first = &init_task.pids[PIDTYPE_SID].node }, \ 50 { .first = NULL }, \
51 }, \ 51 }, \
52 .level = 0, \ 52 .level = 0, \
53 .numbers = { { \ 53 .numbers = { { \
@@ -61,7 +61,7 @@ extern struct group_info init_groups;
61{ \ 61{ \
62 .node = { \ 62 .node = { \
63 .next = NULL, \ 63 .next = NULL, \
64 .pprev = &init_struct_pid.tasks[type].first, \ 64 .pprev = NULL, \
65 }, \ 65 }, \
66 .pid = &init_struct_pid, \ 66 .pid = &init_struct_pid, \
67} 67}
@@ -163,6 +163,7 @@ extern struct cred init_cred;
163 [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ 163 [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
164 [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ 164 [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
165 }, \ 165 }, \
166 .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
166 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ 167 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
167 INIT_IDS \ 168 INIT_IDS \
168 INIT_PERF_EVENTS(tsk) \ 169 INIT_PERF_EVENTS(tsk) \
diff --git a/include/linux/input.h b/include/linux/input.h
index 83524e4f3290..6fcc9101beeb 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1155,7 +1155,7 @@ struct input_dev {
1155 1155
1156 int sync; 1156 int sync;
1157 1157
1158 int abs[ABS_MAX + 1]; 1158 int abs[ABS_CNT];
1159 int rep[REP_MAX + 1]; 1159 int rep[REP_MAX + 1];
1160 1160
1161 unsigned long key[BITS_TO_LONGS(KEY_CNT)]; 1161 unsigned long key[BITS_TO_LONGS(KEY_CNT)];
@@ -1163,11 +1163,11 @@ struct input_dev {
1163 unsigned long snd[BITS_TO_LONGS(SND_CNT)]; 1163 unsigned long snd[BITS_TO_LONGS(SND_CNT)];
1164 unsigned long sw[BITS_TO_LONGS(SW_CNT)]; 1164 unsigned long sw[BITS_TO_LONGS(SW_CNT)];
1165 1165
1166 int absmax[ABS_MAX + 1]; 1166 int absmax[ABS_CNT];
1167 int absmin[ABS_MAX + 1]; 1167 int absmin[ABS_CNT];
1168 int absfuzz[ABS_MAX + 1]; 1168 int absfuzz[ABS_CNT];
1169 int absflat[ABS_MAX + 1]; 1169 int absflat[ABS_CNT];
1170 int absres[ABS_MAX + 1]; 1170 int absres[ABS_CNT];
1171 1171
1172 int (*open)(struct input_dev *dev); 1172 int (*open)(struct input_dev *dev);
1173 void (*close)(struct input_dev *dev); 1173 void (*close)(struct input_dev *dev);
diff --git a/include/linux/joystick.h b/include/linux/joystick.h
index 9e20c29c1e14..47199b13e0eb 100644
--- a/include/linux/joystick.h
+++ b/include/linux/joystick.h
@@ -64,8 +64,8 @@ struct js_event {
64#define JSIOCSCORR _IOW('j', 0x21, struct js_corr) /* set correction values */ 64#define JSIOCSCORR _IOW('j', 0x21, struct js_corr) /* set correction values */
65#define JSIOCGCORR _IOR('j', 0x22, struct js_corr) /* get correction values */ 65#define JSIOCGCORR _IOR('j', 0x22, struct js_corr) /* get correction values */
66 66
67#define JSIOCSAXMAP _IOW('j', 0x31, __u8[ABS_MAX + 1]) /* set axis mapping */ 67#define JSIOCSAXMAP _IOW('j', 0x31, __u8[ABS_CNT]) /* set axis mapping */
68#define JSIOCGAXMAP _IOR('j', 0x32, __u8[ABS_MAX + 1]) /* get axis mapping */ 68#define JSIOCGAXMAP _IOR('j', 0x32, __u8[ABS_CNT]) /* get axis mapping */
69#define JSIOCSBTNMAP _IOW('j', 0x33, __u16[KEY_MAX - BTN_MISC + 1]) /* set button mapping */ 69#define JSIOCSBTNMAP _IOW('j', 0x33, __u16[KEY_MAX - BTN_MISC + 1]) /* set button mapping */
70#define JSIOCGBTNMAP _IOR('j', 0x34, __u16[KEY_MAX - BTN_MISC + 1]) /* get button mapping */ 70#define JSIOCGBTNMAP _IOR('j', 0x34, __u16[KEY_MAX - BTN_MISC + 1]) /* get button mapping */
71 71
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index facb27fe7de0..6efd7a78de6a 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -23,6 +23,7 @@
23#include <linux/stddef.h> 23#include <linux/stddef.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/compiler.h> 25#include <linux/compiler.h>
26#include <linux/workqueue.h>
26 27
27#define KMOD_PATH_LEN 256 28#define KMOD_PATH_LEN 256
28 29
@@ -45,19 +46,6 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS;
45 46
46struct key; 47struct key;
47struct file; 48struct file;
48struct subprocess_info;
49
50/* Allocate a subprocess_info structure */
51struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
52 char **envp, gfp_t gfp_mask);
53
54/* Set various pieces of state into the subprocess_info structure */
55void call_usermodehelper_setkeys(struct subprocess_info *info,
56 struct key *session_keyring);
57int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
58 struct file **filp);
59void call_usermodehelper_setcleanup(struct subprocess_info *info,
60 void (*cleanup)(char **argv, char **envp));
61 49
62enum umh_wait { 50enum umh_wait {
63 UMH_NO_WAIT = -1, /* don't wait at all */ 51 UMH_NO_WAIT = -1, /* don't wait at all */
@@ -65,6 +53,29 @@ enum umh_wait {
65 UMH_WAIT_PROC = 1, /* wait for the process to complete */ 53 UMH_WAIT_PROC = 1, /* wait for the process to complete */
66}; 54};
67 55
56struct subprocess_info {
57 struct work_struct work;
58 struct completion *complete;
59 char *path;
60 char **argv;
61 char **envp;
62 enum umh_wait wait;
63 int retval;
64 int (*init)(struct subprocess_info *info);
65 void (*cleanup)(struct subprocess_info *info);
66 void *data;
67};
68
69/* Allocate a subprocess_info structure */
70struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
71 char **envp, gfp_t gfp_mask);
72
73/* Set various pieces of state into the subprocess_info structure */
74void call_usermodehelper_setfns(struct subprocess_info *info,
75 int (*init)(struct subprocess_info *info),
76 void (*cleanup)(struct subprocess_info *info),
77 void *data);
78
68/* Actually execute the sub-process */ 79/* Actually execute the sub-process */
69int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait); 80int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait);
70 81
@@ -73,38 +84,33 @@ int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait);
73void call_usermodehelper_freeinfo(struct subprocess_info *info); 84void call_usermodehelper_freeinfo(struct subprocess_info *info);
74 85
75static inline int 86static inline int
76call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait) 87call_usermodehelper_fns(char *path, char **argv, char **envp,
88 enum umh_wait wait,
89 int (*init)(struct subprocess_info *info),
90 void (*cleanup)(struct subprocess_info *), void *data)
77{ 91{
78 struct subprocess_info *info; 92 struct subprocess_info *info;
79 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; 93 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
80 94
81 info = call_usermodehelper_setup(path, argv, envp, gfp_mask); 95 info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
96
82 if (info == NULL) 97 if (info == NULL)
83 return -ENOMEM; 98 return -ENOMEM;
99
100 call_usermodehelper_setfns(info, init, cleanup, data);
101
84 return call_usermodehelper_exec(info, wait); 102 return call_usermodehelper_exec(info, wait);
85} 103}
86 104
87static inline int 105static inline int
88call_usermodehelper_keys(char *path, char **argv, char **envp, 106call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
89 struct key *session_keyring, enum umh_wait wait)
90{ 107{
91 struct subprocess_info *info; 108 return call_usermodehelper_fns(path, argv, envp, wait,
92 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; 109 NULL, NULL, NULL);
93
94 info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
95 if (info == NULL)
96 return -ENOMEM;
97
98 call_usermodehelper_setkeys(info, session_keyring);
99 return call_usermodehelper_exec(info, wait);
100} 110}
101 111
102extern void usermodehelper_init(void); 112extern void usermodehelper_init(void);
103 113
104struct file;
105extern int call_usermodehelper_pipe(char *path, char *argv[], char *envp[],
106 struct file **filp);
107
108extern int usermodehelper_disable(void); 114extern int usermodehelper_disable(void);
109extern void usermodehelper_enable(void); 115extern void usermodehelper_enable(void);
110 116
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 05894795fdc1..9411d32840b0 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -90,7 +90,8 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
90extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem); 90extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem);
91 91
92extern int 92extern int
93mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr); 93mem_cgroup_prepare_migration(struct page *page,
94 struct page *newpage, struct mem_cgroup **ptr);
94extern void mem_cgroup_end_migration(struct mem_cgroup *mem, 95extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
95 struct page *oldpage, struct page *newpage); 96 struct page *oldpage, struct page *newpage);
96 97
@@ -227,7 +228,8 @@ static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
227} 228}
228 229
229static inline int 230static inline int
230mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) 231mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
232 struct mem_cgroup **ptr)
231{ 233{
232 return 0; 234 return 0;
233} 235}
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 3196c84cc630..f65913c9f5a4 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -230,7 +230,7 @@ static inline void *mmc_priv(struct mmc_host *host)
230#define mmc_classdev(x) (&(x)->class_dev) 230#define mmc_classdev(x) (&(x)->class_dev)
231#define mmc_hostname(x) (dev_name(&(x)->class_dev)) 231#define mmc_hostname(x) (dev_name(&(x)->class_dev))
232 232
233extern int mmc_suspend_host(struct mmc_host *, pm_message_t); 233extern int mmc_suspend_host(struct mmc_host *);
234extern int mmc_resume_host(struct mmc_host *); 234extern int mmc_resume_host(struct mmc_host *);
235 235
236extern void mmc_power_save_host(struct mmc_host *host); 236extern void mmc_power_save_host(struct mmc_host *host);
diff --git a/include/linux/mmc/sdhci-spear.h b/include/linux/mmc/sdhci-spear.h
new file mode 100644
index 000000000000..9188c973f3e1
--- /dev/null
+++ b/include/linux/mmc/sdhci-spear.h
@@ -0,0 +1,42 @@
1/*
2 * include/linux/mmc/sdhci-spear.h
3 *
4 * SDHCI declarations specific to ST SPEAr platform
5 *
6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#ifndef MMC_SDHCI_SPEAR_H
15#define MMC_SDHCI_SPEAR_H
16
17#include <linux/platform_device.h>
18/*
19 * struct sdhci_plat_data: spear sdhci platform data structure
20 *
21 * @card_power_gpio: gpio pin for enabling/disabling power to sdhci socket
22 * @power_active_high: if set, enable power to sdhci socket by setting
23 * card_power_gpio
24 * @power_always_enb: If set, then enable power on probe, otherwise enable only
25 * on card insertion and disable on card removal.
26 * card_int_gpio: gpio pin used for card detection
27 */
28struct sdhci_plat_data {
29 int card_power_gpio;
30 int power_active_high;
31 int power_always_enb;
32 int card_int_gpio;
33};
34
35/* This function is used to set platform_data field of pdev->dev */
36static inline void
37sdhci_set_plat_data(struct platform_device *pdev, struct sdhci_plat_data *data)
38{
39 pdev->dev.platform_data = data;
40}
41
42#endif /* MMC_SDHCI_SPEAR_H */
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index c6c0cceba5fe..31baaf82f458 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -145,6 +145,9 @@ extern void sdio_writew(struct sdio_func *func, u16 b,
145extern void sdio_writel(struct sdio_func *func, u32 b, 145extern void sdio_writel(struct sdio_func *func, u32 b,
146 unsigned int addr, int *err_ret); 146 unsigned int addr, int *err_ret);
147 147
148extern u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
149 unsigned int addr, int *err_ret);
150
148extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr, 151extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr,
149 void *src, int count); 152 void *src, int count);
150extern int sdio_writesb(struct sdio_func *func, unsigned int addr, 153extern int sdio_writesb(struct sdio_func *func, unsigned int addr,
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
new file mode 100644
index 000000000000..aafe832f18aa
--- /dev/null
+++ b/include/linux/mmc/sh_mmcif.h
@@ -0,0 +1,39 @@
1/*
2 * include/linux/mmc/sh_mmcif.h
3 *
4 * platform data for eMMC driver
5 *
6 * Copyright (C) 2010 Renesas Solutions Corp.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License.
11 *
12 */
13
14#ifndef __SH_MMCIF_H__
15#define __SH_MMCIF_H__
16
17/*
18 * MMCIF : CE_CLK_CTRL [19:16]
19 * 1000 : Peripheral clock / 512
20 * 0111 : Peripheral clock / 256
21 * 0110 : Peripheral clock / 128
22 * 0101 : Peripheral clock / 64
23 * 0100 : Peripheral clock / 32
24 * 0011 : Peripheral clock / 16
25 * 0010 : Peripheral clock / 8
26 * 0001 : Peripheral clock / 4
27 * 0000 : Peripheral clock / 2
28 * 1111 : Peripheral clock (sup_pclk set '1')
29 */
30
31struct sh_mmcif_plat_data {
32 void (*set_pwr)(struct platform_device *pdev, int state);
33 void (*down_pwr)(struct platform_device *pdev);
34 u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */
35 unsigned long caps;
36 u32 ocr;
37};
38
39#endif /* __SH_MMCIF_H__ */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0fa491326c4a..b4d109e389b8 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -671,6 +671,12 @@ void memory_present(int nid, unsigned long start, unsigned long end);
671static inline void memory_present(int nid, unsigned long start, unsigned long end) {} 671static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
672#endif 672#endif
673 673
674#ifdef CONFIG_HAVE_MEMORYLESS_NODES
675int local_memory_node(int node_id);
676#else
677static inline int local_memory_node(int node_id) { return node_id; };
678#endif
679
674#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE 680#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
675unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); 681unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
676#endif 682#endif
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index dba35e413371..8a8f1d09c133 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -66,6 +66,8 @@
66 * int num_online_nodes() Number of online Nodes 66 * int num_online_nodes() Number of online Nodes
67 * int num_possible_nodes() Number of all possible Nodes 67 * int num_possible_nodes() Number of all possible Nodes
68 * 68 *
69 * int node_random(mask) Random node with set bit in mask
70 *
69 * int node_online(node) Is some node online? 71 * int node_online(node) Is some node online?
70 * int node_possible(node) Is some node possible? 72 * int node_possible(node) Is some node possible?
71 * 73 *
@@ -430,6 +432,10 @@ static inline void node_set_offline(int nid)
430 node_clear_state(nid, N_ONLINE); 432 node_clear_state(nid, N_ONLINE);
431 nr_online_nodes = num_node_state(N_ONLINE); 433 nr_online_nodes = num_node_state(N_ONLINE);
432} 434}
435
436#define node_random(mask) __node_random(&(mask))
437extern int __node_random(const nodemask_t *maskp);
438
433#else 439#else
434 440
435static inline int node_state(int node, enum node_states state) 441static inline int node_state(int node, enum node_states state)
@@ -460,6 +466,8 @@ static inline int num_node_state(enum node_states state)
460 466
461#define node_set_online(node) node_set_state((node), N_ONLINE) 467#define node_set_online(node) node_set_state((node), N_ONLINE)
462#define node_set_offline(node) node_clear_state((node), N_ONLINE) 468#define node_set_offline(node) node_clear_state((node), N_ONLINE)
469
470static inline int node_random(const nodemask_t mask) { return 0; }
463#endif 471#endif
464 472
465#define node_online_map node_states[N_ONLINE] 473#define node_online_map node_states[N_ONLINE]
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 7c3609622334..540703b555cb 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -164,7 +164,10 @@ extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
164/* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */ 164/* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */
165static inline int notifier_from_errno(int err) 165static inline int notifier_from_errno(int err)
166{ 166{
167 return NOTIFY_STOP_MASK | (NOTIFY_OK - err); 167 if (err)
168 return NOTIFY_STOP_MASK | (NOTIFY_OK - err);
169
170 return NOTIFY_OK;
168} 171}
169 172
170/* Restore (negative) errno value from notify return value. */ 173/* Restore (negative) errno value from notify return value. */
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index aef22ae2af47..5bb13b3db84d 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -40,6 +40,7 @@ enum {
40 PCG_USED, /* this object is in use. */ 40 PCG_USED, /* this object is in use. */
41 PCG_ACCT_LRU, /* page has been accounted for */ 41 PCG_ACCT_LRU, /* page has been accounted for */
42 PCG_FILE_MAPPED, /* page is accounted as "mapped" */ 42 PCG_FILE_MAPPED, /* page is accounted as "mapped" */
43 PCG_MIGRATION, /* under page migration */
43}; 44};
44 45
45#define TESTPCGFLAG(uname, lname) \ 46#define TESTPCGFLAG(uname, lname) \
@@ -79,6 +80,10 @@ SETPCGFLAG(FileMapped, FILE_MAPPED)
79CLEARPCGFLAG(FileMapped, FILE_MAPPED) 80CLEARPCGFLAG(FileMapped, FILE_MAPPED)
80TESTPCGFLAG(FileMapped, FILE_MAPPED) 81TESTPCGFLAG(FileMapped, FILE_MAPPED)
81 82
83SETPCGFLAG(Migration, MIGRATION)
84CLEARPCGFLAG(Migration, MIGRATION)
85TESTPCGFLAG(Migration, MIGRATION)
86
82static inline int page_cgroup_nid(struct page_cgroup *pc) 87static inline int page_cgroup_nid(struct page_cgroup *pc)
83{ 88{
84 return page_to_nid(pc->page); 89 return page_to_nid(pc->page);
diff --git a/include/linux/random.h b/include/linux/random.h
index 25d02fe5c9b5..fb7ab9de5f36 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -40,6 +40,10 @@ struct rand_pool_info {
40 __u32 buf[0]; 40 __u32 buf[0];
41}; 41};
42 42
43struct rnd_state {
44 __u32 s1, s2, s3;
45};
46
43/* Exported functions */ 47/* Exported functions */
44 48
45#ifdef __KERNEL__ 49#ifdef __KERNEL__
@@ -74,6 +78,30 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
74u32 random32(void); 78u32 random32(void);
75void srandom32(u32 seed); 79void srandom32(u32 seed);
76 80
81u32 prandom32(struct rnd_state *);
82
83/*
84 * Handle minimum values for seeds
85 */
86static inline u32 __seed(u32 x, u32 m)
87{
88 return (x < m) ? x + m : x;
89}
90
91/**
92 * prandom32_seed - set seed for prandom32().
93 * @state: pointer to state structure to receive the seed.
94 * @seed: arbitrary 64-bit value to use as a seed.
95 */
96static inline void prandom32_seed(struct rnd_state *state, u64 seed)
97{
98 u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
99
100 state->s1 = __seed(i, 1);
101 state->s2 = __seed(i, 7);
102 state->s3 = __seed(i, 15);
103}
104
77#endif /* __KERNEL___ */ 105#endif /* __KERNEL___ */
78 106
79#endif /* _LINUX_RANDOM_H */ 107#endif /* _LINUX_RANDOM_H */
diff --git a/include/linux/rio.h b/include/linux/rio.h
index dc0c75556c63..19b5f227096e 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -64,10 +64,13 @@
64#define RIO_INB_MBOX_RESOURCE 1 64#define RIO_INB_MBOX_RESOURCE 1
65#define RIO_OUTB_MBOX_RESOURCE 2 65#define RIO_OUTB_MBOX_RESOURCE 2
66 66
67#define RIO_PW_MSG_SIZE 64
68
67extern struct bus_type rio_bus_type; 69extern struct bus_type rio_bus_type;
68extern struct list_head rio_devices; /* list of all devices */ 70extern struct list_head rio_devices; /* list of all devices */
69 71
70struct rio_mport; 72struct rio_mport;
73union rio_pw_msg;
71 74
72/** 75/**
73 * struct rio_dev - RIO device info 76 * struct rio_dev - RIO device info
@@ -107,11 +110,15 @@ struct rio_dev {
107 u32 swpinfo; /* Only used for switches */ 110 u32 swpinfo; /* Only used for switches */
108 u32 src_ops; 111 u32 src_ops;
109 u32 dst_ops; 112 u32 dst_ops;
113 u32 comp_tag;
114 u32 phys_efptr;
115 u32 em_efptr;
110 u64 dma_mask; 116 u64 dma_mask;
111 struct rio_switch *rswitch; /* RIO switch info */ 117 struct rio_switch *rswitch; /* RIO switch info */
112 struct rio_driver *driver; /* RIO driver claiming this device */ 118 struct rio_driver *driver; /* RIO driver claiming this device */
113 struct device dev; /* LDM device structure */ 119 struct device dev; /* LDM device structure */
114 struct resource riores[RIO_MAX_DEV_RESOURCES]; 120 struct resource riores[RIO_MAX_DEV_RESOURCES];
121 int (*pwcback) (struct rio_dev *rdev, union rio_pw_msg *msg, int step);
115 u16 destid; 122 u16 destid;
116}; 123};
117 124
@@ -211,8 +218,12 @@ struct rio_net {
211 * @hopcount: Hopcount to this switch 218 * @hopcount: Hopcount to this switch
212 * @destid: Associated destid in the path 219 * @destid: Associated destid in the path
213 * @route_table: Copy of switch routing table 220 * @route_table: Copy of switch routing table
221 * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0
214 * @add_entry: Callback for switch-specific route add function 222 * @add_entry: Callback for switch-specific route add function
215 * @get_entry: Callback for switch-specific route get function 223 * @get_entry: Callback for switch-specific route get function
224 * @clr_table: Callback for switch-specific clear route table function
225 * @em_init: Callback for switch-specific error management initialization function
226 * @em_handle: Callback for switch-specific error management handler function
216 */ 227 */
217struct rio_switch { 228struct rio_switch {
218 struct list_head node; 229 struct list_head node;
@@ -220,10 +231,19 @@ struct rio_switch {
220 u16 hopcount; 231 u16 hopcount;
221 u16 destid; 232 u16 destid;
222 u8 *route_table; 233 u8 *route_table;
234 u32 port_ok;
223 int (*add_entry) (struct rio_mport * mport, u16 destid, u8 hopcount, 235 int (*add_entry) (struct rio_mport * mport, u16 destid, u8 hopcount,
224 u16 table, u16 route_destid, u8 route_port); 236 u16 table, u16 route_destid, u8 route_port);
225 int (*get_entry) (struct rio_mport * mport, u16 destid, u8 hopcount, 237 int (*get_entry) (struct rio_mport * mport, u16 destid, u8 hopcount,
226 u16 table, u16 route_destid, u8 * route_port); 238 u16 table, u16 route_destid, u8 * route_port);
239 int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount,
240 u16 table);
241 int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
242 u8 sw_domain);
243 int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
244 u8 *sw_domain);
245 int (*em_init) (struct rio_dev *dev);
246 int (*em_handle) (struct rio_dev *dev, u8 swport);
227}; 247};
228 248
229/* Low-level architecture-dependent routines */ 249/* Low-level architecture-dependent routines */
@@ -235,6 +255,7 @@ struct rio_switch {
235 * @cread: Callback to perform network read of config space. 255 * @cread: Callback to perform network read of config space.
236 * @cwrite: Callback to perform network write of config space. 256 * @cwrite: Callback to perform network write of config space.
237 * @dsend: Callback to send a doorbell message. 257 * @dsend: Callback to send a doorbell message.
258 * @pwenable: Callback to enable/disable port-write message handling.
238 */ 259 */
239struct rio_ops { 260struct rio_ops {
240 int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len, 261 int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len,
@@ -246,6 +267,7 @@ struct rio_ops {
246 int (*cwrite) (struct rio_mport *mport, int index, u16 destid, 267 int (*cwrite) (struct rio_mport *mport, int index, u16 destid,
247 u8 hopcount, u32 offset, int len, u32 data); 268 u8 hopcount, u32 offset, int len, u32 data);
248 int (*dsend) (struct rio_mport *mport, int index, u16 destid, u16 data); 269 int (*dsend) (struct rio_mport *mport, int index, u16 destid, u16 data);
270 int (*pwenable) (struct rio_mport *mport, int enable);
249}; 271};
250 272
251#define RIO_RESOURCE_MEM 0x00000100 273#define RIO_RESOURCE_MEM 0x00000100
@@ -302,21 +324,28 @@ struct rio_device_id {
302}; 324};
303 325
304/** 326/**
305 * struct rio_route_ops - Per-switch route operations 327 * struct rio_switch_ops - Per-switch operations
306 * @vid: RIO vendor ID 328 * @vid: RIO vendor ID
307 * @did: RIO device ID 329 * @did: RIO device ID
308 * @add_hook: Callback that adds a route entry 330 * @init_hook: Callback that performs switch device initialization
309 * @get_hook: Callback that gets a route entry
310 * 331 *
311 * Defines the operations that are necessary to manipulate the route 332 * Defines the operations that are necessary to initialize/control
312 * tables for a particular RIO switch device. 333 * a particular RIO switch device.
313 */ 334 */
314struct rio_route_ops { 335struct rio_switch_ops {
315 u16 vid, did; 336 u16 vid, did;
316 int (*add_hook) (struct rio_mport * mport, u16 destid, u8 hopcount, 337 int (*init_hook) (struct rio_dev *rdev, int do_enum);
317 u16 table, u16 route_destid, u8 route_port); 338};
318 int (*get_hook) (struct rio_mport * mport, u16 destid, u8 hopcount, 339
319 u16 table, u16 route_destid, u8 * route_port); 340union rio_pw_msg {
341 struct {
342 u32 comptag; /* Component Tag CSR */
343 u32 errdetect; /* Port N Error Detect CSR */
344 u32 is_port; /* Implementation specific + PortID */
345 u32 ltlerrdet; /* LTL Error Detect CSR */
346 u32 padding[12];
347 } em;
348 u32 raw[RIO_PW_MSG_SIZE/sizeof(u32)];
320}; 349};
321 350
322/* Architecture and hardware-specific functions */ 351/* Architecture and hardware-specific functions */
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index c93a58a40033..edc55da717b3 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -413,6 +413,12 @@ void rio_release_regions(struct rio_dev *);
413int rio_request_region(struct rio_dev *, int, char *); 413int rio_request_region(struct rio_dev *, int, char *);
414void rio_release_region(struct rio_dev *, int); 414void rio_release_region(struct rio_dev *, int);
415 415
416/* Port-Write management */
417extern int rio_request_inb_pwrite(struct rio_dev *,
418 int (*)(struct rio_dev *, union rio_pw_msg*, int));
419extern int rio_release_inb_pwrite(struct rio_dev *);
420extern int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg);
421
416/* LDM support */ 422/* LDM support */
417int rio_register_driver(struct rio_driver *); 423int rio_register_driver(struct rio_driver *);
418void rio_unregister_driver(struct rio_driver *); 424void rio_unregister_driver(struct rio_driver *);
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index 919d4e07d54e..db50e1c288b7 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -20,5 +20,19 @@
20 20
21#define RIO_VID_TUNDRA 0x000d 21#define RIO_VID_TUNDRA 0x000d
22#define RIO_DID_TSI500 0x0500 22#define RIO_DID_TSI500 0x0500
23#define RIO_DID_TSI568 0x0568
24#define RIO_DID_TSI572 0x0572
25#define RIO_DID_TSI574 0x0574
26#define RIO_DID_TSI576 0x0578 /* Same ID as Tsi578 */
27#define RIO_DID_TSI577 0x0577
28#define RIO_DID_TSI578 0x0578
29
30#define RIO_VID_IDT 0x0038
31#define RIO_DID_IDT70K200 0x0310
32#define RIO_DID_IDTCPS8 0x035c
33#define RIO_DID_IDTCPS12 0x035d
34#define RIO_DID_IDTCPS16 0x035b
35#define RIO_DID_IDTCPS6Q 0x035f
36#define RIO_DID_IDTCPS10Q 0x035e
23 37
24#endif /* LINUX_RIO_IDS_H */ 38#endif /* LINUX_RIO_IDS_H */
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index 326540f9b54e..aedee0489fb4 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -39,6 +39,8 @@
39#define RIO_PEF_INB_MBOX2 0x00200000 /* [II] Mailbox 2 */ 39#define RIO_PEF_INB_MBOX2 0x00200000 /* [II] Mailbox 2 */
40#define RIO_PEF_INB_MBOX3 0x00100000 /* [II] Mailbox 3 */ 40#define RIO_PEF_INB_MBOX3 0x00100000 /* [II] Mailbox 3 */
41#define RIO_PEF_INB_DOORBELL 0x00080000 /* [II] Doorbells */ 41#define RIO_PEF_INB_DOORBELL 0x00080000 /* [II] Doorbells */
42#define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */
43#define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */
42#define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */ 44#define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */
43#define RIO_PEF_EXT_FEATURES 0x00000008 /* [I] EFT_PTR valid */ 45#define RIO_PEF_EXT_FEATURES 0x00000008 /* [I] EFT_PTR valid */
44#define RIO_PEF_ADDR_66 0x00000004 /* [I] 66 bits */ 46#define RIO_PEF_ADDR_66 0x00000004 /* [I] 66 bits */
@@ -91,7 +93,10 @@
91#define RIO_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */ 93#define RIO_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */
92#define RIO_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */ 94#define RIO_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */
93 95
94 /* 0x20-0x3c *//* Reserved */ 96 /* 0x20-0x30 *//* Reserved */
97
98#define RIO_SWITCH_RT_LIMIT 0x34 /* [III, 1.3] Switch Route Table Destination ID Limit CAR */
99#define RIO_RT_MAX_DESTID 0x0000ffff
95 100
96#define RIO_MBOX_CSR 0x40 /* [II] Mailbox CSR */ 101#define RIO_MBOX_CSR 0x40 /* [II] Mailbox CSR */
97#define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */ 102#define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */
@@ -153,7 +158,11 @@
153#define RIO_HOST_DID_LOCK_CSR 0x68 /* [III] Host Base Device ID Lock CSR */ 158#define RIO_HOST_DID_LOCK_CSR 0x68 /* [III] Host Base Device ID Lock CSR */
154#define RIO_COMPONENT_TAG_CSR 0x6c /* [III] Component Tag CSR */ 159#define RIO_COMPONENT_TAG_CSR 0x6c /* [III] Component Tag CSR */
155 160
156 /* 0x70-0xf8 *//* Reserved */ 161#define RIO_STD_RTE_CONF_DESTID_SEL_CSR 0x70
162#define RIO_STD_RTE_CONF_PORT_SEL_CSR 0x74
163#define RIO_STD_RTE_DEFAULT_PORT 0x78
164
165 /* 0x7c-0xf8 *//* Reserved */
157 /* 0x100-0xfff8 *//* [I] Extended Features Space */ 166 /* 0x100-0xfff8 *//* [I] Extended Features Space */
158 /* 0x10000-0xfffff8 *//* [I] Implementation-defined Space */ 167 /* 0x10000-0xfffff8 *//* [I] Implementation-defined Space */
159 168
@@ -183,9 +192,14 @@
183#define RIO_EFB_PAR_EP_ID 0x0001 /* [IV] LP/LVDS EP Devices */ 192#define RIO_EFB_PAR_EP_ID 0x0001 /* [IV] LP/LVDS EP Devices */
184#define RIO_EFB_PAR_EP_REC_ID 0x0002 /* [IV] LP/LVDS EP Recovery Devices */ 193#define RIO_EFB_PAR_EP_REC_ID 0x0002 /* [IV] LP/LVDS EP Recovery Devices */
185#define RIO_EFB_PAR_EP_FREE_ID 0x0003 /* [IV] LP/LVDS EP Free Devices */ 194#define RIO_EFB_PAR_EP_FREE_ID 0x0003 /* [IV] LP/LVDS EP Free Devices */
195#define RIO_EFB_SER_EP_ID_V13P 0x0001 /* [VI] LP/Serial EP Devices, RapidIO Spec ver 1.3 and above */
196#define RIO_EFB_SER_EP_REC_ID_V13P 0x0002 /* [VI] LP/Serial EP Recovery Devices, RapidIO Spec ver 1.3 and above */
197#define RIO_EFB_SER_EP_FREE_ID_V13P 0x0003 /* [VI] LP/Serial EP Free Devices, RapidIO Spec ver 1.3 and above */
186#define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP/Serial EP Devices */ 198#define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP/Serial EP Devices */
187#define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP/Serial EP Recovery Devices */ 199#define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP/Serial EP Recovery Devices */
188#define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP/Serial EP Free Devices */ 200#define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP/Serial EP Free Devices */
201#define RIO_EFB_SER_EP_FREC_ID 0x0009 /* [VI] LP/Serial EP Free Recovery Devices */
202#define RIO_EFB_ERR_MGMNT 0x0007 /* [VIII] Error Management Extensions */
189 203
190/* 204/*
191 * Physical 8/16 LP-LVDS 205 * Physical 8/16 LP-LVDS
@@ -201,15 +215,71 @@
201#define RIO_PORT_MNT_HEADER 0x0000 215#define RIO_PORT_MNT_HEADER 0x0000
202#define RIO_PORT_REQ_CTL_CSR 0x0020 216#define RIO_PORT_REQ_CTL_CSR 0x0020
203#define RIO_PORT_RSP_CTL_CSR 0x0024 /* 0x0001/0x0002 */ 217#define RIO_PORT_RSP_CTL_CSR 0x0024 /* 0x0001/0x0002 */
218#define RIO_PORT_LINKTO_CTL_CSR 0x0020 /* Serial */
219#define RIO_PORT_RSPTO_CTL_CSR 0x0024 /* Serial */
204#define RIO_PORT_GEN_CTL_CSR 0x003c 220#define RIO_PORT_GEN_CTL_CSR 0x003c
205#define RIO_PORT_GEN_HOST 0x80000000 221#define RIO_PORT_GEN_HOST 0x80000000
206#define RIO_PORT_GEN_MASTER 0x40000000 222#define RIO_PORT_GEN_MASTER 0x40000000
207#define RIO_PORT_GEN_DISCOVERED 0x20000000 223#define RIO_PORT_GEN_DISCOVERED 0x20000000
208#define RIO_PORT_N_MNT_REQ_CSR(x) (0x0040 + x*0x20) /* 0x0002 */ 224#define RIO_PORT_N_MNT_REQ_CSR(x) (0x0040 + x*0x20) /* 0x0002 */
209#define RIO_PORT_N_MNT_RSP_CSR(x) (0x0044 + x*0x20) /* 0x0002 */ 225#define RIO_PORT_N_MNT_RSP_CSR(x) (0x0044 + x*0x20) /* 0x0002 */
226#define RIO_PORT_N_MNT_RSP_RVAL 0x80000000 /* Response Valid */
227#define RIO_PORT_N_MNT_RSP_ASTAT 0x000003e0 /* ackID Status */
228#define RIO_PORT_N_MNT_RSP_LSTAT 0x0000001f /* Link Status */
210#define RIO_PORT_N_ACK_STS_CSR(x) (0x0048 + x*0x20) /* 0x0002 */ 229#define RIO_PORT_N_ACK_STS_CSR(x) (0x0048 + x*0x20) /* 0x0002 */
211#define RIO_PORT_N_ERR_STS_CSR(x) (0x58 + x*0x20) 230#define RIO_PORT_N_ACK_CLEAR 0x80000000
212#define PORT_N_ERR_STS_PORT_OK 0x00000002 231#define RIO_PORT_N_ACK_INBOUND 0x1f000000
213#define RIO_PORT_N_CTL_CSR(x) (0x5c + x*0x20) 232#define RIO_PORT_N_ACK_OUTSTAND 0x00001f00
233#define RIO_PORT_N_ACK_OUTBOUND 0x0000001f
234#define RIO_PORT_N_ERR_STS_CSR(x) (0x0058 + x*0x20)
235#define RIO_PORT_N_ERR_STS_PW_OUT_ES 0x00010000 /* Output Error-stopped */
236#define RIO_PORT_N_ERR_STS_PW_INP_ES 0x00000100 /* Input Error-stopped */
237#define RIO_PORT_N_ERR_STS_PW_PEND 0x00000010 /* Port-Write Pending */
238#define RIO_PORT_N_ERR_STS_PORT_ERR 0x00000004
239#define RIO_PORT_N_ERR_STS_PORT_OK 0x00000002
240#define RIO_PORT_N_ERR_STS_PORT_UNINIT 0x00000001
241#define RIO_PORT_N_ERR_STS_CLR_MASK 0x07120204
242#define RIO_PORT_N_CTL_CSR(x) (0x005c + x*0x20)
243#define RIO_PORT_N_CTL_PWIDTH 0xc0000000
244#define RIO_PORT_N_CTL_PWIDTH_1 0x00000000
245#define RIO_PORT_N_CTL_PWIDTH_4 0x40000000
246#define RIO_PORT_N_CTL_P_TYP_SER 0x00000001
247#define RIO_PORT_N_CTL_LOCKOUT 0x00000002
248#define RIO_PORT_N_CTL_EN_RX_SER 0x00200000
249#define RIO_PORT_N_CTL_EN_TX_SER 0x00400000
250#define RIO_PORT_N_CTL_EN_RX_PAR 0x08000000
251#define RIO_PORT_N_CTL_EN_TX_PAR 0x40000000
252
253/*
254 * Error Management Extensions (RapidIO 1.3+, Part 8)
255 *
256 * Extended Features Block ID=0x0007
257 */
258
259/* General EM Registers (Common for all Ports) */
260
261#define RIO_EM_EFB_HEADER 0x000 /* Error Management Extensions Block Header */
262#define RIO_EM_LTL_ERR_DETECT 0x008 /* Logical/Transport Layer Error Detect CSR */
263#define RIO_EM_LTL_ERR_EN 0x00c /* Logical/Transport Layer Error Enable CSR */
264#define RIO_EM_LTL_HIADDR_CAP 0x010 /* Logical/Transport Layer High Address Capture CSR */
265#define RIO_EM_LTL_ADDR_CAP 0x014 /* Logical/Transport Layer Address Capture CSR */
266#define RIO_EM_LTL_DEVID_CAP 0x018 /* Logical/Transport Layer Device ID Capture CSR */
267#define RIO_EM_LTL_CTRL_CAP 0x01c /* Logical/Transport Layer Control Capture CSR */
268#define RIO_EM_PW_TGT_DEVID 0x028 /* Port-write Target deviceID CSR */
269#define RIO_EM_PKT_TTL 0x02c /* Packet Time-to-live CSR */
270
271/* Per-Port EM Registers */
272
273#define RIO_EM_PN_ERR_DETECT(x) (0x040 + x*0x40) /* Port N Error Detect CSR */
274#define REM_PED_IMPL_SPEC 0x80000000
275#define REM_PED_LINK_TO 0x00000001
276#define RIO_EM_PN_ERRRATE_EN(x) (0x044 + x*0x40) /* Port N Error Rate Enable CSR */
277#define RIO_EM_PN_ATTRIB_CAP(x) (0x048 + x*0x40) /* Port N Attributes Capture CSR */
278#define RIO_EM_PN_PKT_CAP_0(x) (0x04c + x*0x40) /* Port N Packet/Control Symbol Capture 0 CSR */
279#define RIO_EM_PN_PKT_CAP_1(x) (0x050 + x*0x40) /* Port N Packet Capture 1 CSR */
280#define RIO_EM_PN_PKT_CAP_2(x) (0x054 + x*0x40) /* Port N Packet Capture 2 CSR */
281#define RIO_EM_PN_PKT_CAP_3(x) (0x058 + x*0x40) /* Port N Packet Capture 3 CSR */
282#define RIO_EM_PN_ERRRATE(x) (0x068 + x*0x40) /* Port N Error Rate CSR */
283#define RIO_EM_PN_ERRRATE_TR(x) (0x06c + x*0x40) /* Port N Error Rate Threshold CSR */
214 284
215#endif /* LINUX_RIO_REGS_H */ 285#endif /* LINUX_RIO_REGS_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c0151ffd3541..f118809c953f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -268,7 +268,6 @@ extern void init_idle(struct task_struct *idle, int cpu);
268extern void init_idle_bootup_task(struct task_struct *idle); 268extern void init_idle_bootup_task(struct task_struct *idle);
269 269
270extern int runqueue_is_locked(int cpu); 270extern int runqueue_is_locked(int cpu);
271extern void task_rq_unlock_wait(struct task_struct *p);
272 271
273extern cpumask_var_t nohz_cpu_mask; 272extern cpumask_var_t nohz_cpu_mask;
274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 273#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
@@ -527,8 +526,9 @@ struct thread_group_cputimer {
527 * the locking of signal_struct. 526 * the locking of signal_struct.
528 */ 527 */
529struct signal_struct { 528struct signal_struct {
530 atomic_t count; 529 atomic_t sigcnt;
531 atomic_t live; 530 atomic_t live;
531 int nr_threads;
532 532
533 wait_queue_head_t wait_chldexit; /* for wait4() */ 533 wait_queue_head_t wait_chldexit; /* for wait4() */
534 534
@@ -1423,6 +1423,7 @@ struct task_struct {
1423 nodemask_t mems_allowed; /* Protected by alloc_lock */ 1423 nodemask_t mems_allowed; /* Protected by alloc_lock */
1424 int mems_allowed_change_disable; 1424 int mems_allowed_change_disable;
1425 int cpuset_mem_spread_rotor; 1425 int cpuset_mem_spread_rotor;
1426 int cpuset_slab_spread_rotor;
1426#endif 1427#endif
1427#ifdef CONFIG_CGROUPS 1428#ifdef CONFIG_CGROUPS
1428 /* Control Group info protected by css_set_lock */ 1429 /* Control Group info protected by css_set_lock */
@@ -2035,7 +2036,7 @@ extern int do_notify_parent(struct task_struct *, int);
2035extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 2036extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2036extern void force_sig(int, struct task_struct *); 2037extern void force_sig(int, struct task_struct *);
2037extern int send_sig(int, struct task_struct *, int); 2038extern int send_sig(int, struct task_struct *, int);
2038extern void zap_other_threads(struct task_struct *p); 2039extern int zap_other_threads(struct task_struct *p);
2039extern struct sigqueue *sigqueue_alloc(void); 2040extern struct sigqueue *sigqueue_alloc(void);
2040extern void sigqueue_free(struct sigqueue *); 2041extern void sigqueue_free(struct sigqueue *);
2041extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 2042extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
@@ -2100,7 +2101,6 @@ extern void flush_thread(void);
2100extern void exit_thread(void); 2101extern void exit_thread(void);
2101 2102
2102extern void exit_files(struct task_struct *); 2103extern void exit_files(struct task_struct *);
2103extern void __cleanup_signal(struct signal_struct *);
2104extern void __cleanup_sighand(struct sighand_struct *); 2104extern void __cleanup_sighand(struct sighand_struct *);
2105 2105
2106extern void exit_itimers(struct signal_struct *); 2106extern void exit_itimers(struct signal_struct *);
@@ -2147,6 +2147,11 @@ extern bool current_is_single_threaded(void);
2147#define while_each_thread(g, t) \ 2147#define while_each_thread(g, t) \
2148 while ((t = next_thread(t)) != g) 2148 while ((t = next_thread(t)) != g)
2149 2149
2150static inline int get_nr_threads(struct task_struct *tsk)
2151{
2152 return tsk->signal->nr_threads;
2153}
2154
2150/* de_thread depends on thread_group_leader not being a pid based check */ 2155/* de_thread depends on thread_group_leader not being a pid based check */
2151#define thread_group_leader(p) (p == p->group_leader) 2156#define thread_group_leader(p) (p == p->group_leader)
2152 2157
@@ -2393,10 +2398,6 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)
2393 spin_lock_init(&sig->cputimer.lock); 2398 spin_lock_init(&sig->cputimer.lock);
2394} 2399}
2395 2400
2396static inline void thread_group_cputime_free(struct signal_struct *sig)
2397{
2398}
2399
2400/* 2401/*
2401 * Reevaluate whether the task has signals pending delivery. 2402 * Reevaluate whether the task has signals pending delivery.
2402 * Wake the task if so. 2403 * Wake the task if so.
diff --git a/include/linux/sdhci-pltfm.h b/include/linux/sdhci-pltfm.h
new file mode 100644
index 000000000000..0239bd70241e
--- /dev/null
+++ b/include/linux/sdhci-pltfm.h
@@ -0,0 +1,35 @@
1/*
2 * Platform data declarations for the sdhci-pltfm driver.
3 *
4 * Copyright (c) 2010 MontaVista Software, LLC.
5 *
6 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 */
13
14#ifndef _SDHCI_PLTFM_H
15#define _SDHCI_PLTFM_H
16
17struct sdhci_ops;
18struct sdhci_host;
19
20/**
21 * struct sdhci_pltfm_data - SDHCI platform-specific information & hooks
22 * @ops: optional pointer to the platform-provided SDHCI ops
23 * @quirks: optional SDHCI quirks
24 * @init: optional hook that is called during device probe, before the
25 * driver tries to access any SDHCI registers
26 * @exit: optional hook that is called during device removal
27 */
28struct sdhci_pltfm_data {
29 struct sdhci_ops *ops;
30 unsigned int quirks;
31 int (*init)(struct sdhci_host *host);
32 void (*exit)(struct sdhci_host *host);
33};
34
35#endif /* _SDHCI_PLTFM_H */
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 8a4adbef8a0f..f2961afa2f66 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -79,6 +79,7 @@ struct seminfo {
79#ifdef __KERNEL__ 79#ifdef __KERNEL__
80#include <asm/atomic.h> 80#include <asm/atomic.h>
81#include <linux/rcupdate.h> 81#include <linux/rcupdate.h>
82#include <linux/cache.h>
82 83
83struct task_struct; 84struct task_struct;
84 85
@@ -91,7 +92,8 @@ struct sem {
91 92
92/* One sem_array data structure for each set of semaphores in the system. */ 93/* One sem_array data structure for each set of semaphores in the system. */
93struct sem_array { 94struct sem_array {
94 struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */ 95 struct kern_ipc_perm ____cacheline_aligned_in_smp
96 sem_perm; /* permissions .. see ipc.h */
95 time_t sem_otime; /* last semop time */ 97 time_t sem_otime; /* last semop time */
96 time_t sem_ctime; /* last change time */ 98 time_t sem_ctime; /* last change time */
97 struct sem *sem_base; /* ptr to first semaphore in array */ 99 struct sem *sem_base; /* ptr to first semaphore in array */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b6b614364dd8..ff4acea9bbdb 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -282,6 +282,11 @@ extern void kswapd_stop(int nid);
282extern int shmem_unuse(swp_entry_t entry, struct page *page); 282extern int shmem_unuse(swp_entry_t entry, struct page *page);
283#endif /* CONFIG_MMU */ 283#endif /* CONFIG_MMU */
284 284
285#ifdef CONFIG_CGROUP_MEM_RES_CTLR
286extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
287 struct page **pagep, swp_entry_t *ent);
288#endif
289
285extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *); 290extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
286 291
287#ifdef CONFIG_SWAP 292#ifdef CONFIG_SWAP
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index febedcf67c7e..81a4e213c6cf 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -73,16 +73,6 @@ extern void
73swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 73swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
74 int nelems, enum dma_data_direction dir); 74 int nelems, enum dma_data_direction dir);
75 75
76extern void
77swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
78 unsigned long offset, size_t size,
79 enum dma_data_direction dir);
80
81extern void
82swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
83 unsigned long offset, size_t size,
84 enum dma_data_direction dir);
85
86extern int 76extern int
87swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); 77swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
88 78
diff --git a/include/linux/threads.h b/include/linux/threads.h
index 052b12bec8bd..383ab9592bec 100644
--- a/include/linux/threads.h
+++ b/include/linux/threads.h
@@ -33,4 +33,13 @@
33#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \ 33#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \
34 (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT)) 34 (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT))
35 35
36/*
37 * Define a minimum number of pids per cpu. Heuristically based
38 * on original pid max of 32k for 32 cpus. Also, increase the
39 * minimum settable value for pid_max on the running system based
40 * on similar defaults. See kernel/pid.c:pidmap_init() for details.
41 */
42#define PIDS_PER_CPU_DEFAULT 1024
43#define PIDS_PER_CPU_MIN 8
44
36#endif 45#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 5b81156780b1..c44df50a05ab 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -31,6 +31,7 @@
31#include <linux/bitops.h> 31#include <linux/bitops.h>
32#include <linux/mmzone.h> 32#include <linux/mmzone.h>
33#include <linux/smp.h> 33#include <linux/smp.h>
34#include <linux/percpu.h>
34#include <asm/topology.h> 35#include <asm/topology.h>
35 36
36#ifndef node_has_online_mem 37#ifndef node_has_online_mem
@@ -203,8 +204,114 @@ int arch_update_cpu_topology(void);
203#ifndef SD_NODE_INIT 204#ifndef SD_NODE_INIT
204#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!! 205#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!!
205#endif 206#endif
207
206#endif /* CONFIG_NUMA */ 208#endif /* CONFIG_NUMA */
207 209
210#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
211DECLARE_PER_CPU(int, numa_node);
212
213#ifndef numa_node_id
214/* Returns the number of the current Node. */
215static inline int numa_node_id(void)
216{
217 return __this_cpu_read(numa_node);
218}
219#endif
220
221#ifndef cpu_to_node
222static inline int cpu_to_node(int cpu)
223{
224 return per_cpu(numa_node, cpu);
225}
226#endif
227
228#ifndef set_numa_node
229static inline void set_numa_node(int node)
230{
231 percpu_write(numa_node, node);
232}
233#endif
234
235#ifndef set_cpu_numa_node
236static inline void set_cpu_numa_node(int cpu, int node)
237{
238 per_cpu(numa_node, cpu) = node;
239}
240#endif
241
242#else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
243
244/* Returns the number of the current Node. */
245#ifndef numa_node_id
246static inline int numa_node_id(void)
247{
248 return cpu_to_node(raw_smp_processor_id());
249}
250#endif
251
252#endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
253
254#ifdef CONFIG_HAVE_MEMORYLESS_NODES
255
256/*
257 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
258 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
259 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
260 */
261DECLARE_PER_CPU(int, _numa_mem_);
262
263#ifndef set_numa_mem
264static inline void set_numa_mem(int node)
265{
266 percpu_write(_numa_mem_, node);
267}
268#endif
269
270#ifndef numa_mem_id
271/* Returns the number of the nearest Node with memory */
272static inline int numa_mem_id(void)
273{
274 return __this_cpu_read(_numa_mem_);
275}
276#endif
277
278#ifndef cpu_to_mem
279static inline int cpu_to_mem(int cpu)
280{
281 return per_cpu(_numa_mem_, cpu);
282}
283#endif
284
285#ifndef set_cpu_numa_mem
286static inline void set_cpu_numa_mem(int cpu, int node)
287{
288 per_cpu(_numa_mem_, cpu) = node;
289}
290#endif
291
292#else /* !CONFIG_HAVE_MEMORYLESS_NODES */
293
294static inline void set_numa_mem(int node) {}
295
296static inline void set_cpu_numa_mem(int cpu, int node) {}
297
298#ifndef numa_mem_id
299/* Returns the number of the nearest Node with memory */
300static inline int numa_mem_id(void)
301{
302 return numa_node_id();
303}
304#endif
305
306#ifndef cpu_to_mem
307static inline int cpu_to_mem(int cpu)
308{
309 return cpu_to_node(cpu);
310}
311#endif
312
313#endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
314
208#ifndef topology_physical_package_id 315#ifndef topology_physical_package_id
209#define topology_physical_package_id(cpu) ((void)(cpu), -1) 316#define topology_physical_package_id(cpu) ((void)(cpu), -1)
210#endif 317#endif
@@ -218,9 +325,4 @@ int arch_update_cpu_topology(void);
218#define topology_core_cpumask(cpu) cpumask_of(cpu) 325#define topology_core_cpumask(cpu) cpumask_of(cpu)
219#endif 326#endif
220 327
221/* Returns the number of the current Node. */
222#ifndef numa_node_id
223#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
224#endif
225
226#endif /* _LINUX_TOPOLOGY_H */ 328#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
index 15ddd4483b09..60c81da77f0f 100644
--- a/include/linux/uinput.h
+++ b/include/linux/uinput.h
@@ -166,11 +166,11 @@ struct uinput_ff_erase {
166struct uinput_user_dev { 166struct uinput_user_dev {
167 char name[UINPUT_MAX_NAME_SIZE]; 167 char name[UINPUT_MAX_NAME_SIZE];
168 struct input_id id; 168 struct input_id id;
169 int ff_effects_max; 169 int ff_effects_max;
170 int absmax[ABS_MAX + 1]; 170 int absmax[ABS_CNT];
171 int absmin[ABS_MAX + 1]; 171 int absmin[ABS_CNT];
172 int absfuzz[ABS_MAX + 1]; 172 int absfuzz[ABS_CNT];
173 int absflat[ABS_MAX + 1]; 173 int absflat[ABS_CNT];
174}; 174};
175#endif /* __UINPUT_H_ */ 175#endif /* __UINPUT_H_ */
176 176
diff --git a/ipc/sem.c b/ipc/sem.c
index dbef95b15941..506c8491a8d1 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -3,56 +3,6 @@
3 * Copyright (C) 1992 Krishna Balasubramanian 3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible 4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
5 * 5 *
6 * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995):
7 * This code underwent a massive rewrite in order to solve some problems
8 * with the original code. In particular the original code failed to
9 * wake up processes that were waiting for semval to go to 0 if the
10 * value went to 0 and was then incremented rapidly enough. In solving
11 * this problem I have also modified the implementation so that it
12 * processes pending operations in a FIFO manner, thus give a guarantee
13 * that processes waiting for a lock on the semaphore won't starve
14 * unless another locking process fails to unlock.
15 * In addition the following two changes in behavior have been introduced:
16 * - The original implementation of semop returned the value
17 * last semaphore element examined on success. This does not
18 * match the manual page specifications, and effectively
19 * allows the user to read the semaphore even if they do not
20 * have read permissions. The implementation now returns 0
21 * on success as stated in the manual page.
22 * - There is some confusion over whether the set of undo adjustments
23 * to be performed at exit should be done in an atomic manner.
24 * That is, if we are attempting to decrement the semval should we queue
25 * up and wait until we can do so legally?
26 * The original implementation attempted to do this.
27 * The current implementation does not do so. This is because I don't
28 * think it is the right thing (TM) to do, and because I couldn't
29 * see a clean way to get the old behavior with the new design.
30 * The POSIX standard and SVID should be consulted to determine
31 * what behavior is mandated.
32 *
33 * Further notes on refinement (Christoph Rohland, December 1998):
34 * - The POSIX standard says, that the undo adjustments simply should
35 * redo. So the current implementation is o.K.
36 * - The previous code had two flaws:
37 * 1) It actively gave the semaphore to the next waiting process
38 * sleeping on the semaphore. Since this process did not have the
39 * cpu this led to many unnecessary context switches and bad
40 * performance. Now we only check which process should be able to
41 * get the semaphore and if this process wants to reduce some
42 * semaphore value we simply wake it up without doing the
43 * operation. So it has to try to get it later. Thus e.g. the
44 * running process may reacquire the semaphore during the current
45 * time slice. If it only waits for zero or increases the semaphore,
46 * we do the operation in advance and wake it up.
47 * 2) It did not wake up all zero waiting processes. We try to do
48 * better but only get the semops right which only wait for zero or
49 * increase. If there are decrement operations in the operations
50 * array we do the same as before.
51 *
52 * With the incarnation of O(1) scheduler, it becomes unnecessary to perform
53 * check/retry algorithm for waking up blocked processes as the new scheduler
54 * is better at handling thread switch than the old one.
55 *
56 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
57 * 7 *
58 * SMP-threaded, sysctl's added 8 * SMP-threaded, sysctl's added
@@ -61,6 +11,8 @@
61 * (c) 2001 Red Hat Inc 11 * (c) 2001 Red Hat Inc
62 * Lockless wakeup 12 * Lockless wakeup
63 * (c) 2003 Manfred Spraul <manfred@colorfullife.com> 13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14 * Further wakeup optimizations, documentation
15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
64 * 16 *
65 * support for audit of ipc object properties and permission changes 17 * support for audit of ipc object properties and permission changes
66 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
@@ -68,6 +20,57 @@
68 * namespaces support 20 * namespaces support
69 * OpenVZ, SWsoft Inc. 21 * OpenVZ, SWsoft Inc.
70 * Pavel Emelianov <xemul@openvz.org> 22 * Pavel Emelianov <xemul@openvz.org>
23 *
24 * Implementation notes: (May 2010)
25 * This file implements System V semaphores.
26 *
27 * User space visible behavior:
28 * - FIFO ordering for semop() operations (just FIFO, not starvation
29 * protection)
30 * - multiple semaphore operations that alter the same semaphore in
31 * one semop() are handled.
32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
33 * SETALL calls.
34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
35 * - undo adjustments at process exit are limited to 0..SEMVMX.
36 * - namespace are supported.
37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
38 * to /proc/sys/kernel/sem.
39 * - statistics about the usage are reported in /proc/sysvipc/sem.
40 *
41 * Internals:
42 * - scalability:
43 * - all global variables are read-mostly.
44 * - semop() calls and semctl(RMID) are synchronized by RCU.
45 * - most operations do write operations (actually: spin_lock calls) to
46 * the per-semaphore array structure.
47 * Thus: Perfect SMP scaling between independent semaphore arrays.
48 * If multiple semaphores in one array are used, then cache line
49 * trashing on the semaphore array spinlock will limit the scaling.
50 * - semncnt and semzcnt are calculated on demand in count_semncnt() and
51 * count_semzcnt()
52 * - the task that performs a successful semop() scans the list of all
53 * sleeping tasks and completes any pending operations that can be fulfilled.
54 * Semaphores are actively given to waiting tasks (necessary for FIFO).
55 * (see update_queue())
56 * - To improve the scalability, the actual wake-up calls are performed after
57 * dropping all locks. (see wake_up_sem_queue_prepare(),
58 * wake_up_sem_queue_do())
59 * - All work is done by the waker, the woken up task does not have to do
60 * anything - not even acquiring a lock or dropping a refcount.
61 * - A woken up task may not even touch the semaphore array anymore, it may
62 * have been destroyed already by a semctl(RMID).
63 * - The synchronizations between wake-ups due to a timeout/signal and a
64 * wake-up due to a completed semaphore operation is achieved by using an
65 * intermediate state (IN_WAKEUP).
66 * - UNDO values are stored in an array (one per process and per
67 * semaphore array, lazily allocated). For backwards compatibility, multiple
68 * modes for the UNDO variables are supported (per process, per thread)
69 * (see copy_semundo, CLONE_SYSVSEM)
70 * - There are two lists of the pending operations: a per-array list
71 * and per-semaphore list (stored in the array). This allows to achieve FIFO
72 * ordering without always scanning all pending operations.
73 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
71 */ 74 */
72 75
73#include <linux/slab.h> 76#include <linux/slab.h>
@@ -381,7 +384,6 @@ static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
381 sop--; 384 sop--;
382 } 385 }
383 386
384 sma->sem_otime = get_seconds();
385 return 0; 387 return 0;
386 388
387out_of_range: 389out_of_range:
@@ -404,25 +406,51 @@ undo:
404 return result; 406 return result;
405} 407}
406 408
407/* 409/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
408 * Wake up a process waiting on the sem queue with a given error. 410 * @q: queue entry that must be signaled
409 * The queue is invalid (may not be accessed) after the function returns. 411 * @error: Error value for the signal
412 *
413 * Prepare the wake-up of the queue entry q.
410 */ 414 */
411static void wake_up_sem_queue(struct sem_queue *q, int error) 415static void wake_up_sem_queue_prepare(struct list_head *pt,
416 struct sem_queue *q, int error)
412{ 417{
413 /* 418 if (list_empty(pt)) {
414 * Hold preempt off so that we don't get preempted and have the 419 /*
415 * wakee busy-wait until we're scheduled back on. We're holding 420 * Hold preempt off so that we don't get preempted and have the
416 * locks here so it may not strictly be needed, however if the 421 * wakee busy-wait until we're scheduled back on.
417 * locks become preemptible then this prevents such a problem. 422 */
418 */ 423 preempt_disable();
419 preempt_disable(); 424 }
420 q->status = IN_WAKEUP; 425 q->status = IN_WAKEUP;
421 wake_up_process(q->sleeper); 426 q->pid = error;
422 /* hands-off: q can disappear immediately after writing q->status. */ 427
423 smp_wmb(); 428 list_add_tail(&q->simple_list, pt);
424 q->status = error; 429}
425 preempt_enable(); 430
431/**
432 * wake_up_sem_queue_do(pt) - do the actual wake-up
433 * @pt: list of tasks to be woken up
434 *
435 * Do the actual wake-up.
436 * The function is called without any locks held, thus the semaphore array
437 * could be destroyed already and the tasks can disappear as soon as the
438 * status is set to the actual return code.
439 */
440static void wake_up_sem_queue_do(struct list_head *pt)
441{
442 struct sem_queue *q, *t;
443 int did_something;
444
445 did_something = !list_empty(pt);
446 list_for_each_entry_safe(q, t, pt, simple_list) {
447 wake_up_process(q->sleeper);
448 /* q can disappear immediately after writing q->status. */
449 smp_wmb();
450 q->status = q->pid;
451 }
452 if (did_something)
453 preempt_enable();
426} 454}
427 455
428static void unlink_queue(struct sem_array *sma, struct sem_queue *q) 456static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
@@ -434,22 +462,90 @@ static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
434 sma->complex_count--; 462 sma->complex_count--;
435} 463}
436 464
465/** check_restart(sma, q)
466 * @sma: semaphore array
467 * @q: the operation that just completed
468 *
469 * update_queue is O(N^2) when it restarts scanning the whole queue of
470 * waiting operations. Therefore this function checks if the restart is
471 * really necessary. It is called after a previously waiting operation
472 * was completed.
473 */
474static int check_restart(struct sem_array *sma, struct sem_queue *q)
475{
476 struct sem *curr;
477 struct sem_queue *h;
478
479 /* if the operation didn't modify the array, then no restart */
480 if (q->alter == 0)
481 return 0;
482
483 /* pending complex operations are too difficult to analyse */
484 if (sma->complex_count)
485 return 1;
486
487 /* we were a sleeping complex operation. Too difficult */
488 if (q->nsops > 1)
489 return 1;
490
491 curr = sma->sem_base + q->sops[0].sem_num;
492
493 /* No-one waits on this queue */
494 if (list_empty(&curr->sem_pending))
495 return 0;
496
497 /* the new semaphore value */
498 if (curr->semval) {
499 /* It is impossible that someone waits for the new value:
500 * - q is a previously sleeping simple operation that
501 * altered the array. It must be a decrement, because
502 * simple increments never sleep.
503 * - The value is not 0, thus wait-for-zero won't proceed.
504 * - If there are older (higher priority) decrements
505 * in the queue, then they have observed the original
506 * semval value and couldn't proceed. The operation
507 * decremented to value - thus they won't proceed either.
508 */
509 BUG_ON(q->sops[0].sem_op >= 0);
510 return 0;
511 }
512 /*
513 * semval is 0. Check if there are wait-for-zero semops.
514 * They must be the first entries in the per-semaphore simple queue
515 */
516 h = list_first_entry(&curr->sem_pending, struct sem_queue, simple_list);
517 BUG_ON(h->nsops != 1);
518 BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
519
520 /* Yes, there is a wait-for-zero semop. Restart */
521 if (h->sops[0].sem_op == 0)
522 return 1;
523
524 /* Again - no-one is waiting for the new value. */
525 return 0;
526}
527
437 528
438/** 529/**
439 * update_queue(sma, semnum): Look for tasks that can be completed. 530 * update_queue(sma, semnum): Look for tasks that can be completed.
440 * @sma: semaphore array. 531 * @sma: semaphore array.
441 * @semnum: semaphore that was modified. 532 * @semnum: semaphore that was modified.
533 * @pt: list head for the tasks that must be woken up.
442 * 534 *
443 * update_queue must be called after a semaphore in a semaphore array 535 * update_queue must be called after a semaphore in a semaphore array
444 * was modified. If multiple semaphore were modified, then @semnum 536 * was modified. If multiple semaphore were modified, then @semnum
445 * must be set to -1. 537 * must be set to -1.
538 * The tasks that must be woken up are added to @pt. The return code
539 * is stored in q->pid.
540 * The function return 1 if at least one semop was completed successfully.
446 */ 541 */
447static void update_queue(struct sem_array *sma, int semnum) 542static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
448{ 543{
449 struct sem_queue *q; 544 struct sem_queue *q;
450 struct list_head *walk; 545 struct list_head *walk;
451 struct list_head *pending_list; 546 struct list_head *pending_list;
452 int offset; 547 int offset;
548 int semop_completed = 0;
453 549
454 /* if there are complex operations around, then knowing the semaphore 550 /* if there are complex operations around, then knowing the semaphore
455 * that was modified doesn't help us. Assume that multiple semaphores 551 * that was modified doesn't help us. Assume that multiple semaphores
@@ -469,7 +565,7 @@ static void update_queue(struct sem_array *sma, int semnum)
469again: 565again:
470 walk = pending_list->next; 566 walk = pending_list->next;
471 while (walk != pending_list) { 567 while (walk != pending_list) {
472 int error, alter; 568 int error, restart;
473 569
474 q = (struct sem_queue *)((char *)walk - offset); 570 q = (struct sem_queue *)((char *)walk - offset);
475 walk = walk->next; 571 walk = walk->next;
@@ -494,22 +590,58 @@ again:
494 590
495 unlink_queue(sma, q); 591 unlink_queue(sma, q);
496 592
497 /* 593 if (error) {
498 * The next operation that must be checked depends on the type 594 restart = 0;
499 * of the completed operation: 595 } else {
500 * - if the operation modified the array, then restart from the 596 semop_completed = 1;
501 * head of the queue and check for threads that might be 597 restart = check_restart(sma, q);
502 * waiting for the new semaphore values. 598 }
503 * - if the operation didn't modify the array, then just 599
504 * continue. 600 wake_up_sem_queue_prepare(pt, q, error);
505 */ 601 if (restart)
506 alter = q->alter;
507 wake_up_sem_queue(q, error);
508 if (alter && !error)
509 goto again; 602 goto again;
510 } 603 }
604 return semop_completed;
605}
606
607/**
608 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
609 * @sma: semaphore array
610 * @sops: operations that were performed
611 * @nsops: number of operations
612 * @otime: force setting otime
613 * @pt: list head of the tasks that must be woken up.
614 *
615 * do_smart_update() does the required called to update_queue, based on the
616 * actual changes that were performed on the semaphore array.
617 * Note that the function does not do the actual wake-up: the caller is
618 * responsible for calling wake_up_sem_queue_do(@pt).
619 * It is safe to perform this call after dropping all locks.
620 */
621static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
622 int otime, struct list_head *pt)
623{
624 int i;
625
626 if (sma->complex_count || sops == NULL) {
627 if (update_queue(sma, -1, pt))
628 otime = 1;
629 goto done;
630 }
631
632 for (i = 0; i < nsops; i++) {
633 if (sops[i].sem_op > 0 ||
634 (sops[i].sem_op < 0 &&
635 sma->sem_base[sops[i].sem_num].semval == 0))
636 if (update_queue(sma, sops[i].sem_num, pt))
637 otime = 1;
638 }
639done:
640 if (otime)
641 sma->sem_otime = get_seconds();
511} 642}
512 643
644
513/* The following counts are associated to each semaphore: 645/* The following counts are associated to each semaphore:
514 * semncnt number of tasks waiting on semval being nonzero 646 * semncnt number of tasks waiting on semval being nonzero
515 * semzcnt number of tasks waiting on semval being zero 647 * semzcnt number of tasks waiting on semval being zero
@@ -572,6 +704,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
572 struct sem_undo *un, *tu; 704 struct sem_undo *un, *tu;
573 struct sem_queue *q, *tq; 705 struct sem_queue *q, *tq;
574 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); 706 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
707 struct list_head tasks;
575 708
576 /* Free the existing undo structures for this semaphore set. */ 709 /* Free the existing undo structures for this semaphore set. */
577 assert_spin_locked(&sma->sem_perm.lock); 710 assert_spin_locked(&sma->sem_perm.lock);
@@ -585,15 +718,17 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
585 } 718 }
586 719
587 /* Wake up all pending processes and let them fail with EIDRM. */ 720 /* Wake up all pending processes and let them fail with EIDRM. */
721 INIT_LIST_HEAD(&tasks);
588 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) { 722 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
589 unlink_queue(sma, q); 723 unlink_queue(sma, q);
590 wake_up_sem_queue(q, -EIDRM); 724 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
591 } 725 }
592 726
593 /* Remove the semaphore set from the IDR */ 727 /* Remove the semaphore set from the IDR */
594 sem_rmid(ns, sma); 728 sem_rmid(ns, sma);
595 sem_unlock(sma); 729 sem_unlock(sma);
596 730
731 wake_up_sem_queue_do(&tasks);
597 ns->used_sems -= sma->sem_nsems; 732 ns->used_sems -= sma->sem_nsems;
598 security_sem_free(sma); 733 security_sem_free(sma);
599 ipc_rcu_putref(sma); 734 ipc_rcu_putref(sma);
@@ -715,11 +850,13 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
715 ushort fast_sem_io[SEMMSL_FAST]; 850 ushort fast_sem_io[SEMMSL_FAST];
716 ushort* sem_io = fast_sem_io; 851 ushort* sem_io = fast_sem_io;
717 int nsems; 852 int nsems;
853 struct list_head tasks;
718 854
719 sma = sem_lock_check(ns, semid); 855 sma = sem_lock_check(ns, semid);
720 if (IS_ERR(sma)) 856 if (IS_ERR(sma))
721 return PTR_ERR(sma); 857 return PTR_ERR(sma);
722 858
859 INIT_LIST_HEAD(&tasks);
723 nsems = sma->sem_nsems; 860 nsems = sma->sem_nsems;
724 861
725 err = -EACCES; 862 err = -EACCES;
@@ -807,7 +944,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
807 } 944 }
808 sma->sem_ctime = get_seconds(); 945 sma->sem_ctime = get_seconds();
809 /* maybe some queued-up processes were waiting for this */ 946 /* maybe some queued-up processes were waiting for this */
810 update_queue(sma, -1); 947 do_smart_update(sma, NULL, 0, 0, &tasks);
811 err = 0; 948 err = 0;
812 goto out_unlock; 949 goto out_unlock;
813 } 950 }
@@ -849,13 +986,15 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
849 curr->sempid = task_tgid_vnr(current); 986 curr->sempid = task_tgid_vnr(current);
850 sma->sem_ctime = get_seconds(); 987 sma->sem_ctime = get_seconds();
851 /* maybe some queued-up processes were waiting for this */ 988 /* maybe some queued-up processes were waiting for this */
852 update_queue(sma, semnum); 989 do_smart_update(sma, NULL, 0, 0, &tasks);
853 err = 0; 990 err = 0;
854 goto out_unlock; 991 goto out_unlock;
855 } 992 }
856 } 993 }
857out_unlock: 994out_unlock:
858 sem_unlock(sma); 995 sem_unlock(sma);
996 wake_up_sem_queue_do(&tasks);
997
859out_free: 998out_free:
860 if(sem_io != fast_sem_io) 999 if(sem_io != fast_sem_io)
861 ipc_free(sem_io, sizeof(ushort)*nsems); 1000 ipc_free(sem_io, sizeof(ushort)*nsems);
@@ -1069,7 +1208,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1069 /* step 1: figure out the size of the semaphore array */ 1208 /* step 1: figure out the size of the semaphore array */
1070 sma = sem_lock_check(ns, semid); 1209 sma = sem_lock_check(ns, semid);
1071 if (IS_ERR(sma)) 1210 if (IS_ERR(sma))
1072 return ERR_PTR(PTR_ERR(sma)); 1211 return ERR_CAST(sma);
1073 1212
1074 nsems = sma->sem_nsems; 1213 nsems = sma->sem_nsems;
1075 sem_getref_and_unlock(sma); 1214 sem_getref_and_unlock(sma);
@@ -1129,6 +1268,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1129 struct sem_queue queue; 1268 struct sem_queue queue;
1130 unsigned long jiffies_left = 0; 1269 unsigned long jiffies_left = 0;
1131 struct ipc_namespace *ns; 1270 struct ipc_namespace *ns;
1271 struct list_head tasks;
1132 1272
1133 ns = current->nsproxy->ipc_ns; 1273 ns = current->nsproxy->ipc_ns;
1134 1274
@@ -1177,6 +1317,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1177 } else 1317 } else
1178 un = NULL; 1318 un = NULL;
1179 1319
1320 INIT_LIST_HEAD(&tasks);
1321
1180 sma = sem_lock_check(ns, semid); 1322 sma = sem_lock_check(ns, semid);
1181 if (IS_ERR(sma)) { 1323 if (IS_ERR(sma)) {
1182 if (un) 1324 if (un)
@@ -1225,7 +1367,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1225 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current)); 1367 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1226 if (error <= 0) { 1368 if (error <= 0) {
1227 if (alter && error == 0) 1369 if (alter && error == 0)
1228 update_queue(sma, (nsops == 1) ? sops[0].sem_num : -1); 1370 do_smart_update(sma, sops, nsops, 1, &tasks);
1229 1371
1230 goto out_unlock_free; 1372 goto out_unlock_free;
1231 } 1373 }
@@ -1302,6 +1444,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1302 1444
1303out_unlock_free: 1445out_unlock_free:
1304 sem_unlock(sma); 1446 sem_unlock(sma);
1447
1448 wake_up_sem_queue_do(&tasks);
1305out_free: 1449out_free:
1306 if(sops != fast_sops) 1450 if(sops != fast_sops)
1307 kfree(sops); 1451 kfree(sops);
@@ -1362,6 +1506,7 @@ void exit_sem(struct task_struct *tsk)
1362 for (;;) { 1506 for (;;) {
1363 struct sem_array *sma; 1507 struct sem_array *sma;
1364 struct sem_undo *un; 1508 struct sem_undo *un;
1509 struct list_head tasks;
1365 int semid; 1510 int semid;
1366 int i; 1511 int i;
1367 1512
@@ -1425,10 +1570,11 @@ void exit_sem(struct task_struct *tsk)
1425 semaphore->sempid = task_tgid_vnr(current); 1570 semaphore->sempid = task_tgid_vnr(current);
1426 } 1571 }
1427 } 1572 }
1428 sma->sem_otime = get_seconds();
1429 /* maybe some queued-up processes were waiting for this */ 1573 /* maybe some queued-up processes were waiting for this */
1430 update_queue(sma, -1); 1574 INIT_LIST_HEAD(&tasks);
1575 do_smart_update(sma, NULL, 0, 1, &tasks);
1431 sem_unlock(sma); 1576 sem_unlock(sma);
1577 wake_up_sem_queue_do(&tasks);
1432 1578
1433 call_rcu(&un->rcu, free_un); 1579 call_rcu(&un->rcu, free_un);
1434 } 1580 }
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 291775021b2e..422cb19f156e 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2994,7 +2994,6 @@ static void cgroup_event_remove(struct work_struct *work)
2994 remove); 2994 remove);
2995 struct cgroup *cgrp = event->cgrp; 2995 struct cgroup *cgrp = event->cgrp;
2996 2996
2997 /* TODO: check return code */
2998 event->cft->unregister_event(cgrp, event->cft, event->eventfd); 2997 event->cft->unregister_event(cgrp, event->cft, event->eventfd);
2999 2998
3000 eventfd_ctx_put(event->eventfd); 2999 eventfd_ctx_put(event->eventfd);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 124ad9d6be16..63e8de13c948 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -20,6 +20,20 @@
20/* Serializes the updates to cpu_online_mask, cpu_present_mask */ 20/* Serializes the updates to cpu_online_mask, cpu_present_mask */
21static DEFINE_MUTEX(cpu_add_remove_lock); 21static DEFINE_MUTEX(cpu_add_remove_lock);
22 22
23/*
24 * The following two API's must be used when attempting
25 * to serialize the updates to cpu_online_mask, cpu_present_mask.
26 */
27void cpu_maps_update_begin(void)
28{
29 mutex_lock(&cpu_add_remove_lock);
30}
31
32void cpu_maps_update_done(void)
33{
34 mutex_unlock(&cpu_add_remove_lock);
35}
36
23static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); 37static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
24 38
25/* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 39/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
@@ -27,6 +41,8 @@ static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
27 */ 41 */
28static int cpu_hotplug_disabled; 42static int cpu_hotplug_disabled;
29 43
44#ifdef CONFIG_HOTPLUG_CPU
45
30static struct { 46static struct {
31 struct task_struct *active_writer; 47 struct task_struct *active_writer;
32 struct mutex lock; /* Synchronizes accesses to refcount, */ 48 struct mutex lock; /* Synchronizes accesses to refcount, */
@@ -41,8 +57,6 @@ static struct {
41 .refcount = 0, 57 .refcount = 0,
42}; 58};
43 59
44#ifdef CONFIG_HOTPLUG_CPU
45
46void get_online_cpus(void) 60void get_online_cpus(void)
47{ 61{
48 might_sleep(); 62 might_sleep();
@@ -67,22 +81,6 @@ void put_online_cpus(void)
67} 81}
68EXPORT_SYMBOL_GPL(put_online_cpus); 82EXPORT_SYMBOL_GPL(put_online_cpus);
69 83
70#endif /* CONFIG_HOTPLUG_CPU */
71
72/*
73 * The following two API's must be used when attempting
74 * to serialize the updates to cpu_online_mask, cpu_present_mask.
75 */
76void cpu_maps_update_begin(void)
77{
78 mutex_lock(&cpu_add_remove_lock);
79}
80
81void cpu_maps_update_done(void)
82{
83 mutex_unlock(&cpu_add_remove_lock);
84}
85
86/* 84/*
87 * This ensures that the hotplug operation can begin only when the 85 * This ensures that the hotplug operation can begin only when the
88 * refcount goes to zero. 86 * refcount goes to zero.
@@ -124,6 +122,12 @@ static void cpu_hotplug_done(void)
124 cpu_hotplug.active_writer = NULL; 122 cpu_hotplug.active_writer = NULL;
125 mutex_unlock(&cpu_hotplug.lock); 123 mutex_unlock(&cpu_hotplug.lock);
126} 124}
125
126#else /* #if CONFIG_HOTPLUG_CPU */
127static void cpu_hotplug_begin(void) {}
128static void cpu_hotplug_done(void) {}
129#endif /* #esle #if CONFIG_HOTPLUG_CPU */
130
127/* Need to know about CPUs going up/down? */ 131/* Need to know about CPUs going up/down? */
128int __ref register_cpu_notifier(struct notifier_block *nb) 132int __ref register_cpu_notifier(struct notifier_block *nb)
129{ 133{
@@ -134,6 +138,30 @@ int __ref register_cpu_notifier(struct notifier_block *nb)
134 return ret; 138 return ret;
135} 139}
136 140
141static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
142 int *nr_calls)
143{
144 int ret;
145
146 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
147 nr_calls);
148
149 return notifier_to_errno(ret);
150}
151
152static int cpu_notify(unsigned long val, void *v)
153{
154 return __cpu_notify(val, v, -1, NULL);
155}
156
157static void cpu_notify_nofail(unsigned long val, void *v)
158{
159 int err;
160
161 err = cpu_notify(val, v);
162 BUG_ON(err);
163}
164
137#ifdef CONFIG_HOTPLUG_CPU 165#ifdef CONFIG_HOTPLUG_CPU
138 166
139EXPORT_SYMBOL(register_cpu_notifier); 167EXPORT_SYMBOL(register_cpu_notifier);
@@ -181,8 +209,7 @@ static int __ref take_cpu_down(void *_param)
181 if (err < 0) 209 if (err < 0)
182 return err; 210 return err;
183 211
184 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, 212 cpu_notify(CPU_DYING | param->mod, param->hcpu);
185 param->hcpu);
186 213
187 if (task_cpu(param->caller) == cpu) 214 if (task_cpu(param->caller) == cpu)
188 move_task_off_dead_cpu(cpu, param->caller); 215 move_task_off_dead_cpu(cpu, param->caller);
@@ -212,17 +239,14 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
212 239
213 cpu_hotplug_begin(); 240 cpu_hotplug_begin();
214 set_cpu_active(cpu, false); 241 set_cpu_active(cpu, false);
215 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 242 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
216 hcpu, -1, &nr_calls); 243 if (err) {
217 if (err == NOTIFY_BAD) {
218 set_cpu_active(cpu, true); 244 set_cpu_active(cpu, true);
219 245
220 nr_calls--; 246 nr_calls--;
221 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 247 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
222 hcpu, nr_calls, NULL);
223 printk("%s: attempt to take down CPU %u failed\n", 248 printk("%s: attempt to take down CPU %u failed\n",
224 __func__, cpu); 249 __func__, cpu);
225 err = -EINVAL;
226 goto out_release; 250 goto out_release;
227 } 251 }
228 252
@@ -230,9 +254,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
230 if (err) { 254 if (err) {
231 set_cpu_active(cpu, true); 255 set_cpu_active(cpu, true);
232 /* CPU didn't die: tell everyone. Can't complain. */ 256 /* CPU didn't die: tell everyone. Can't complain. */
233 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 257 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
234 hcpu) == NOTIFY_BAD)
235 BUG();
236 258
237 goto out_release; 259 goto out_release;
238 } 260 }
@@ -246,19 +268,14 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
246 __cpu_die(cpu); 268 __cpu_die(cpu);
247 269
248 /* CPU is completely dead: tell everyone. Too late to complain. */ 270 /* CPU is completely dead: tell everyone. Too late to complain. */
249 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod, 271 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
250 hcpu) == NOTIFY_BAD)
251 BUG();
252 272
253 check_for_tasks(cpu); 273 check_for_tasks(cpu);
254 274
255out_release: 275out_release:
256 cpu_hotplug_done(); 276 cpu_hotplug_done();
257 if (!err) { 277 if (!err)
258 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod, 278 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
259 hcpu) == NOTIFY_BAD)
260 BUG();
261 }
262 return err; 279 return err;
263} 280}
264 281
@@ -293,13 +310,11 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
293 return -EINVAL; 310 return -EINVAL;
294 311
295 cpu_hotplug_begin(); 312 cpu_hotplug_begin();
296 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, 313 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
297 -1, &nr_calls); 314 if (ret) {
298 if (ret == NOTIFY_BAD) {
299 nr_calls--; 315 nr_calls--;
300 printk("%s: attempt to bring up CPU %u failed\n", 316 printk("%s: attempt to bring up CPU %u failed\n",
301 __func__, cpu); 317 __func__, cpu);
302 ret = -EINVAL;
303 goto out_notify; 318 goto out_notify;
304 } 319 }
305 320
@@ -312,12 +327,11 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
312 set_cpu_active(cpu, true); 327 set_cpu_active(cpu, true);
313 328
314 /* Now call notifier in preparation. */ 329 /* Now call notifier in preparation. */
315 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); 330 cpu_notify(CPU_ONLINE | mod, hcpu);
316 331
317out_notify: 332out_notify:
318 if (ret != 0) 333 if (ret != 0)
319 __raw_notifier_call_chain(&cpu_chain, 334 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
320 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
321 cpu_hotplug_done(); 335 cpu_hotplug_done();
322 336
323 return ret; 337 return ret;
@@ -481,7 +495,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu)
481 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 495 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
482 val = CPU_STARTING_FROZEN; 496 val = CPU_STARTING_FROZEN;
483#endif /* CONFIG_PM_SLEEP_SMP */ 497#endif /* CONFIG_PM_SLEEP_SMP */
484 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); 498 cpu_notify(val, (void *)(long)cpu);
485} 499}
486 500
487#endif /* CONFIG_SMP */ 501#endif /* CONFIG_SMP */
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 61d6af7fa676..02b9611eadde 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2469,7 +2469,8 @@ void cpuset_unlock(void)
2469} 2469}
2470 2470
2471/** 2471/**
2472 * cpuset_mem_spread_node() - On which node to begin search for a page 2472 * cpuset_mem_spread_node() - On which node to begin search for a file page
2473 * cpuset_slab_spread_node() - On which node to begin search for a slab page
2473 * 2474 *
2474 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for 2475 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2475 * tasks in a cpuset with is_spread_page or is_spread_slab set), 2476 * tasks in a cpuset with is_spread_page or is_spread_slab set),
@@ -2494,16 +2495,27 @@ void cpuset_unlock(void)
2494 * See kmem_cache_alloc_node(). 2495 * See kmem_cache_alloc_node().
2495 */ 2496 */
2496 2497
2497int cpuset_mem_spread_node(void) 2498static int cpuset_spread_node(int *rotor)
2498{ 2499{
2499 int node; 2500 int node;
2500 2501
2501 node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed); 2502 node = next_node(*rotor, current->mems_allowed);
2502 if (node == MAX_NUMNODES) 2503 if (node == MAX_NUMNODES)
2503 node = first_node(current->mems_allowed); 2504 node = first_node(current->mems_allowed);
2504 current->cpuset_mem_spread_rotor = node; 2505 *rotor = node;
2505 return node; 2506 return node;
2506} 2507}
2508
2509int cpuset_mem_spread_node(void)
2510{
2511 return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
2512}
2513
2514int cpuset_slab_spread_node(void)
2515{
2516 return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
2517}
2518
2507EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); 2519EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2508 2520
2509/** 2521/**
diff --git a/kernel/cred.c b/kernel/cred.c
index 2c24870c55d1..a2d5504fbcc2 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -347,66 +347,6 @@ struct cred *prepare_exec_creds(void)
347} 347}
348 348
349/* 349/*
350 * prepare new credentials for the usermode helper dispatcher
351 */
352struct cred *prepare_usermodehelper_creds(void)
353{
354#ifdef CONFIG_KEYS
355 struct thread_group_cred *tgcred = NULL;
356#endif
357 struct cred *new;
358
359#ifdef CONFIG_KEYS
360 tgcred = kzalloc(sizeof(*new->tgcred), GFP_ATOMIC);
361 if (!tgcred)
362 return NULL;
363#endif
364
365 new = kmem_cache_alloc(cred_jar, GFP_ATOMIC);
366 if (!new)
367 goto free_tgcred;
368
369 kdebug("prepare_usermodehelper_creds() alloc %p", new);
370
371 memcpy(new, &init_cred, sizeof(struct cred));
372
373 atomic_set(&new->usage, 1);
374 set_cred_subscribers(new, 0);
375 get_group_info(new->group_info);
376 get_uid(new->user);
377
378#ifdef CONFIG_KEYS
379 new->thread_keyring = NULL;
380 new->request_key_auth = NULL;
381 new->jit_keyring = KEY_REQKEY_DEFL_DEFAULT;
382
383 atomic_set(&tgcred->usage, 1);
384 spin_lock_init(&tgcred->lock);
385 new->tgcred = tgcred;
386#endif
387
388#ifdef CONFIG_SECURITY
389 new->security = NULL;
390#endif
391 if (security_prepare_creds(new, &init_cred, GFP_ATOMIC) < 0)
392 goto error;
393 validate_creds(new);
394
395 BUG_ON(atomic_read(&new->usage) != 1);
396 return new;
397
398error:
399 put_cred(new);
400 return NULL;
401
402free_tgcred:
403#ifdef CONFIG_KEYS
404 kfree(tgcred);
405#endif
406 return NULL;
407}
408
409/*
410 * Copy credentials for the new process created by fork() 350 * Copy credentials for the new process created by fork()
411 * 351 *
412 * We share if we can, but under some circumstances we have to generate a new 352 * We share if we can, but under some circumstances we have to generate a new
diff --git a/kernel/exit.c b/kernel/exit.c
index 019a2843bf95..ceffc67b564a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -58,11 +58,11 @@
58 58
59static void exit_mm(struct task_struct * tsk); 59static void exit_mm(struct task_struct * tsk);
60 60
61static void __unhash_process(struct task_struct *p) 61static void __unhash_process(struct task_struct *p, bool group_dead)
62{ 62{
63 nr_threads--; 63 nr_threads--;
64 detach_pid(p, PIDTYPE_PID); 64 detach_pid(p, PIDTYPE_PID);
65 if (thread_group_leader(p)) { 65 if (group_dead) {
66 detach_pid(p, PIDTYPE_PGID); 66 detach_pid(p, PIDTYPE_PGID);
67 detach_pid(p, PIDTYPE_SID); 67 detach_pid(p, PIDTYPE_SID);
68 68
@@ -79,10 +79,9 @@ static void __unhash_process(struct task_struct *p)
79static void __exit_signal(struct task_struct *tsk) 79static void __exit_signal(struct task_struct *tsk)
80{ 80{
81 struct signal_struct *sig = tsk->signal; 81 struct signal_struct *sig = tsk->signal;
82 bool group_dead = thread_group_leader(tsk);
82 struct sighand_struct *sighand; 83 struct sighand_struct *sighand;
83 84 struct tty_struct *uninitialized_var(tty);
84 BUG_ON(!sig);
85 BUG_ON(!atomic_read(&sig->count));
86 85
87 sighand = rcu_dereference_check(tsk->sighand, 86 sighand = rcu_dereference_check(tsk->sighand,
88 rcu_read_lock_held() || 87 rcu_read_lock_held() ||
@@ -90,14 +89,16 @@ static void __exit_signal(struct task_struct *tsk)
90 spin_lock(&sighand->siglock); 89 spin_lock(&sighand->siglock);
91 90
92 posix_cpu_timers_exit(tsk); 91 posix_cpu_timers_exit(tsk);
93 if (atomic_dec_and_test(&sig->count)) 92 if (group_dead) {
94 posix_cpu_timers_exit_group(tsk); 93 posix_cpu_timers_exit_group(tsk);
95 else { 94 tty = sig->tty;
95 sig->tty = NULL;
96 } else {
96 /* 97 /*
97 * If there is any task waiting for the group exit 98 * If there is any task waiting for the group exit
98 * then notify it: 99 * then notify it:
99 */ 100 */
100 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) 101 if (sig->notify_count > 0 && !--sig->notify_count)
101 wake_up_process(sig->group_exit_task); 102 wake_up_process(sig->group_exit_task);
102 103
103 if (tsk == sig->curr_target) 104 if (tsk == sig->curr_target)
@@ -123,32 +124,24 @@ static void __exit_signal(struct task_struct *tsk)
123 sig->oublock += task_io_get_oublock(tsk); 124 sig->oublock += task_io_get_oublock(tsk);
124 task_io_accounting_add(&sig->ioac, &tsk->ioac); 125 task_io_accounting_add(&sig->ioac, &tsk->ioac);
125 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 126 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
126 sig = NULL; /* Marker for below. */
127 } 127 }
128 128
129 __unhash_process(tsk); 129 sig->nr_threads--;
130 __unhash_process(tsk, group_dead);
130 131
131 /* 132 /*
132 * Do this under ->siglock, we can race with another thread 133 * Do this under ->siglock, we can race with another thread
133 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. 134 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
134 */ 135 */
135 flush_sigqueue(&tsk->pending); 136 flush_sigqueue(&tsk->pending);
136
137 tsk->signal = NULL;
138 tsk->sighand = NULL; 137 tsk->sighand = NULL;
139 spin_unlock(&sighand->siglock); 138 spin_unlock(&sighand->siglock);
140 139
141 __cleanup_sighand(sighand); 140 __cleanup_sighand(sighand);
142 clear_tsk_thread_flag(tsk,TIF_SIGPENDING); 141 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
143 if (sig) { 142 if (group_dead) {
144 flush_sigqueue(&sig->shared_pending); 143 flush_sigqueue(&sig->shared_pending);
145 taskstats_tgid_free(sig); 144 tty_kref_put(tty);
146 /*
147 * Make sure ->signal can't go away under rq->lock,
148 * see account_group_exec_runtime().
149 */
150 task_rq_unlock_wait(tsk);
151 __cleanup_signal(sig);
152 } 145 }
153} 146}
154 147
@@ -856,12 +849,9 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
856 849
857 tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE; 850 tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
858 851
859 /* mt-exec, de_thread() is waiting for us */ 852 /* mt-exec, de_thread() is waiting for group leader */
860 if (thread_group_leader(tsk) && 853 if (unlikely(tsk->signal->notify_count < 0))
861 tsk->signal->group_exit_task &&
862 tsk->signal->notify_count < 0)
863 wake_up_process(tsk->signal->group_exit_task); 854 wake_up_process(tsk->signal->group_exit_task);
864
865 write_unlock_irq(&tasklist_lock); 855 write_unlock_irq(&tasklist_lock);
866 856
867 tracehook_report_death(tsk, signal, cookie, group_dead); 857 tracehook_report_death(tsk, signal, cookie, group_dead);
diff --git a/kernel/fork.c b/kernel/fork.c
index 4d57d9e3a6e9..bf9fef6d1bfe 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -165,6 +165,18 @@ void free_task(struct task_struct *tsk)
165} 165}
166EXPORT_SYMBOL(free_task); 166EXPORT_SYMBOL(free_task);
167 167
168static inline void free_signal_struct(struct signal_struct *sig)
169{
170 taskstats_tgid_free(sig);
171 kmem_cache_free(signal_cachep, sig);
172}
173
174static inline void put_signal_struct(struct signal_struct *sig)
175{
176 if (atomic_dec_and_test(&sig->sigcnt))
177 free_signal_struct(sig);
178}
179
168void __put_task_struct(struct task_struct *tsk) 180void __put_task_struct(struct task_struct *tsk)
169{ 181{
170 WARN_ON(!tsk->exit_state); 182 WARN_ON(!tsk->exit_state);
@@ -173,6 +185,7 @@ void __put_task_struct(struct task_struct *tsk)
173 185
174 exit_creds(tsk); 186 exit_creds(tsk);
175 delayacct_tsk_free(tsk); 187 delayacct_tsk_free(tsk);
188 put_signal_struct(tsk->signal);
176 189
177 if (!profile_handoff_task(tsk)) 190 if (!profile_handoff_task(tsk))
178 free_task(tsk); 191 free_task(tsk);
@@ -864,8 +877,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
864 if (!sig) 877 if (!sig)
865 return -ENOMEM; 878 return -ENOMEM;
866 879
867 atomic_set(&sig->count, 1); 880 sig->nr_threads = 1;
868 atomic_set(&sig->live, 1); 881 atomic_set(&sig->live, 1);
882 atomic_set(&sig->sigcnt, 1);
869 init_waitqueue_head(&sig->wait_chldexit); 883 init_waitqueue_head(&sig->wait_chldexit);
870 if (clone_flags & CLONE_NEWPID) 884 if (clone_flags & CLONE_NEWPID)
871 sig->flags |= SIGNAL_UNKILLABLE; 885 sig->flags |= SIGNAL_UNKILLABLE;
@@ -889,13 +903,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
889 return 0; 903 return 0;
890} 904}
891 905
892void __cleanup_signal(struct signal_struct *sig)
893{
894 thread_group_cputime_free(sig);
895 tty_kref_put(sig->tty);
896 kmem_cache_free(signal_cachep, sig);
897}
898
899static void copy_flags(unsigned long clone_flags, struct task_struct *p) 906static void copy_flags(unsigned long clone_flags, struct task_struct *p)
900{ 907{
901 unsigned long new_flags = p->flags; 908 unsigned long new_flags = p->flags;
@@ -1079,6 +1086,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1079 } 1086 }
1080 mpol_fix_fork_child_flag(p); 1087 mpol_fix_fork_child_flag(p);
1081#endif 1088#endif
1089#ifdef CONFIG_CPUSETS
1090 p->cpuset_mem_spread_rotor = node_random(p->mems_allowed);
1091 p->cpuset_slab_spread_rotor = node_random(p->mems_allowed);
1092#endif
1082#ifdef CONFIG_TRACE_IRQFLAGS 1093#ifdef CONFIG_TRACE_IRQFLAGS
1083 p->irq_events = 0; 1094 p->irq_events = 0;
1084#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 1095#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
@@ -1245,8 +1256,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1245 } 1256 }
1246 1257
1247 if (clone_flags & CLONE_THREAD) { 1258 if (clone_flags & CLONE_THREAD) {
1248 atomic_inc(&current->signal->count); 1259 current->signal->nr_threads++;
1249 atomic_inc(&current->signal->live); 1260 atomic_inc(&current->signal->live);
1261 atomic_inc(&current->signal->sigcnt);
1250 p->group_leader = current->group_leader; 1262 p->group_leader = current->group_leader;
1251 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); 1263 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1252 } 1264 }
@@ -1259,7 +1271,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1259 p->nsproxy->pid_ns->child_reaper = p; 1271 p->nsproxy->pid_ns->child_reaper = p;
1260 1272
1261 p->signal->leader_pid = pid; 1273 p->signal->leader_pid = pid;
1262 tty_kref_put(p->signal->tty);
1263 p->signal->tty = tty_kref_get(current->signal->tty); 1274 p->signal->tty = tty_kref_get(current->signal->tty);
1264 attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); 1275 attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1265 attach_pid(p, PIDTYPE_SID, task_session(current)); 1276 attach_pid(p, PIDTYPE_SID, task_session(current));
@@ -1292,7 +1303,7 @@ bad_fork_cleanup_mm:
1292 mmput(p->mm); 1303 mmput(p->mm);
1293bad_fork_cleanup_signal: 1304bad_fork_cleanup_signal:
1294 if (!(clone_flags & CLONE_THREAD)) 1305 if (!(clone_flags & CLONE_THREAD))
1295 __cleanup_signal(p->signal); 1306 free_signal_struct(p->signal);
1296bad_fork_cleanup_sighand: 1307bad_fork_cleanup_sighand:
1297 __cleanup_sighand(p->sighand); 1308 __cleanup_sighand(p->sighand);
1298bad_fork_cleanup_fs: 1309bad_fork_cleanup_fs:
@@ -1327,6 +1338,16 @@ noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_re
1327 return regs; 1338 return regs;
1328} 1339}
1329 1340
1341static inline void init_idle_pids(struct pid_link *links)
1342{
1343 enum pid_type type;
1344
1345 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
1346 INIT_HLIST_NODE(&links[type].node); /* not really needed */
1347 links[type].pid = &init_struct_pid;
1348 }
1349}
1350
1330struct task_struct * __cpuinit fork_idle(int cpu) 1351struct task_struct * __cpuinit fork_idle(int cpu)
1331{ 1352{
1332 struct task_struct *task; 1353 struct task_struct *task;
@@ -1334,8 +1355,10 @@ struct task_struct * __cpuinit fork_idle(int cpu)
1334 1355
1335 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, 1356 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
1336 &init_struct_pid, 0); 1357 &init_struct_pid, 0);
1337 if (!IS_ERR(task)) 1358 if (!IS_ERR(task)) {
1359 init_idle_pids(task->pids);
1338 init_idle(task, cpu); 1360 init_idle(task, cpu);
1361 }
1339 1362
1340 return task; 1363 return task;
1341} 1364}
@@ -1507,14 +1530,6 @@ static void check_unshare_flags(unsigned long *flags_ptr)
1507 *flags_ptr |= CLONE_SIGHAND; 1530 *flags_ptr |= CLONE_SIGHAND;
1508 1531
1509 /* 1532 /*
1510 * If unsharing signal handlers and the task was created
1511 * using CLONE_THREAD, then must unshare the thread
1512 */
1513 if ((*flags_ptr & CLONE_SIGHAND) &&
1514 (atomic_read(&current->signal->count) > 1))
1515 *flags_ptr |= CLONE_THREAD;
1516
1517 /*
1518 * If unsharing namespace, must also unshare filesystem information. 1533 * If unsharing namespace, must also unshare filesystem information.
1519 */ 1534 */
1520 if (*flags_ptr & CLONE_NEWNS) 1535 if (*flags_ptr & CLONE_NEWNS)
diff --git a/kernel/kmod.c b/kernel/kmod.c
index bf0e231d9702..6e9b19667a8d 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -116,27 +116,16 @@ int __request_module(bool wait, const char *fmt, ...)
116 116
117 trace_module_request(module_name, wait, _RET_IP_); 117 trace_module_request(module_name, wait, _RET_IP_);
118 118
119 ret = call_usermodehelper(modprobe_path, argv, envp, 119 ret = call_usermodehelper_fns(modprobe_path, argv, envp,
120 wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC); 120 wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC,
121 NULL, NULL, NULL);
122
121 atomic_dec(&kmod_concurrent); 123 atomic_dec(&kmod_concurrent);
122 return ret; 124 return ret;
123} 125}
124EXPORT_SYMBOL(__request_module); 126EXPORT_SYMBOL(__request_module);
125#endif /* CONFIG_MODULES */ 127#endif /* CONFIG_MODULES */
126 128
127struct subprocess_info {
128 struct work_struct work;
129 struct completion *complete;
130 struct cred *cred;
131 char *path;
132 char **argv;
133 char **envp;
134 enum umh_wait wait;
135 int retval;
136 struct file *stdin;
137 void (*cleanup)(char **argv, char **envp);
138};
139
140/* 129/*
141 * This is the task which runs the usermode application 130 * This is the task which runs the usermode application
142 */ 131 */
@@ -145,36 +134,10 @@ static int ____call_usermodehelper(void *data)
145 struct subprocess_info *sub_info = data; 134 struct subprocess_info *sub_info = data;
146 int retval; 135 int retval;
147 136
148 BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
149
150 /* Unblock all signals */
151 spin_lock_irq(&current->sighand->siglock); 137 spin_lock_irq(&current->sighand->siglock);
152 flush_signal_handlers(current, 1); 138 flush_signal_handlers(current, 1);
153 sigemptyset(&current->blocked);
154 recalc_sigpending();
155 spin_unlock_irq(&current->sighand->siglock); 139 spin_unlock_irq(&current->sighand->siglock);
156 140
157 /* Install the credentials */
158 commit_creds(sub_info->cred);
159 sub_info->cred = NULL;
160
161 /* Install input pipe when needed */
162 if (sub_info->stdin) {
163 struct files_struct *f = current->files;
164 struct fdtable *fdt;
165 /* no races because files should be private here */
166 sys_close(0);
167 fd_install(0, sub_info->stdin);
168 spin_lock(&f->file_lock);
169 fdt = files_fdtable(f);
170 FD_SET(0, fdt->open_fds);
171 FD_CLR(0, fdt->close_on_exec);
172 spin_unlock(&f->file_lock);
173
174 /* and disallow core files too */
175 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){0, 0};
176 }
177
178 /* We can run anywhere, unlike our parent keventd(). */ 141 /* We can run anywhere, unlike our parent keventd(). */
179 set_cpus_allowed_ptr(current, cpu_all_mask); 142 set_cpus_allowed_ptr(current, cpu_all_mask);
180 143
@@ -184,9 +147,16 @@ static int ____call_usermodehelper(void *data)
184 */ 147 */
185 set_user_nice(current, 0); 148 set_user_nice(current, 0);
186 149
150 if (sub_info->init) {
151 retval = sub_info->init(sub_info);
152 if (retval)
153 goto fail;
154 }
155
187 retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp); 156 retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp);
188 157
189 /* Exec failed? */ 158 /* Exec failed? */
159fail:
190 sub_info->retval = retval; 160 sub_info->retval = retval;
191 do_exit(0); 161 do_exit(0);
192} 162}
@@ -194,9 +164,7 @@ static int ____call_usermodehelper(void *data)
194void call_usermodehelper_freeinfo(struct subprocess_info *info) 164void call_usermodehelper_freeinfo(struct subprocess_info *info)
195{ 165{
196 if (info->cleanup) 166 if (info->cleanup)
197 (*info->cleanup)(info->argv, info->envp); 167 (*info->cleanup)(info);
198 if (info->cred)
199 put_cred(info->cred);
200 kfree(info); 168 kfree(info);
201} 169}
202EXPORT_SYMBOL(call_usermodehelper_freeinfo); 170EXPORT_SYMBOL(call_usermodehelper_freeinfo);
@@ -207,16 +175,16 @@ static int wait_for_helper(void *data)
207 struct subprocess_info *sub_info = data; 175 struct subprocess_info *sub_info = data;
208 pid_t pid; 176 pid_t pid;
209 177
210 /* Install a handler: if SIGCLD isn't handled sys_wait4 won't 178 /* If SIGCLD is ignored sys_wait4 won't populate the status. */
211 * populate the status, but will return -ECHILD. */ 179 spin_lock_irq(&current->sighand->siglock);
212 allow_signal(SIGCHLD); 180 current->sighand->action[SIGCHLD-1].sa.sa_handler = SIG_DFL;
181 spin_unlock_irq(&current->sighand->siglock);
213 182
214 pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD); 183 pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD);
215 if (pid < 0) { 184 if (pid < 0) {
216 sub_info->retval = pid; 185 sub_info->retval = pid;
217 } else { 186 } else {
218 int ret; 187 int ret = -ECHILD;
219
220 /* 188 /*
221 * Normally it is bogus to call wait4() from in-kernel because 189 * Normally it is bogus to call wait4() from in-kernel because
222 * wait4() wants to write the exit code to a userspace address. 190 * wait4() wants to write the exit code to a userspace address.
@@ -237,10 +205,7 @@ static int wait_for_helper(void *data)
237 sub_info->retval = ret; 205 sub_info->retval = ret;
238 } 206 }
239 207
240 if (sub_info->wait == UMH_NO_WAIT) 208 complete(sub_info->complete);
241 call_usermodehelper_freeinfo(sub_info);
242 else
243 complete(sub_info->complete);
244 return 0; 209 return 0;
245} 210}
246 211
@@ -249,15 +214,13 @@ static void __call_usermodehelper(struct work_struct *work)
249{ 214{
250 struct subprocess_info *sub_info = 215 struct subprocess_info *sub_info =
251 container_of(work, struct subprocess_info, work); 216 container_of(work, struct subprocess_info, work);
252 pid_t pid;
253 enum umh_wait wait = sub_info->wait; 217 enum umh_wait wait = sub_info->wait;
254 218 pid_t pid;
255 BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
256 219
257 /* CLONE_VFORK: wait until the usermode helper has execve'd 220 /* CLONE_VFORK: wait until the usermode helper has execve'd
258 * successfully We need the data structures to stay around 221 * successfully We need the data structures to stay around
259 * until that is done. */ 222 * until that is done. */
260 if (wait == UMH_WAIT_PROC || wait == UMH_NO_WAIT) 223 if (wait == UMH_WAIT_PROC)
261 pid = kernel_thread(wait_for_helper, sub_info, 224 pid = kernel_thread(wait_for_helper, sub_info,
262 CLONE_FS | CLONE_FILES | SIGCHLD); 225 CLONE_FS | CLONE_FILES | SIGCHLD);
263 else 226 else
@@ -266,15 +229,16 @@ static void __call_usermodehelper(struct work_struct *work)
266 229
267 switch (wait) { 230 switch (wait) {
268 case UMH_NO_WAIT: 231 case UMH_NO_WAIT:
232 call_usermodehelper_freeinfo(sub_info);
269 break; 233 break;
270 234
271 case UMH_WAIT_PROC: 235 case UMH_WAIT_PROC:
272 if (pid > 0) 236 if (pid > 0)
273 break; 237 break;
274 sub_info->retval = pid;
275 /* FALLTHROUGH */ 238 /* FALLTHROUGH */
276
277 case UMH_WAIT_EXEC: 239 case UMH_WAIT_EXEC:
240 if (pid < 0)
241 sub_info->retval = pid;
278 complete(sub_info->complete); 242 complete(sub_info->complete);
279 } 243 }
280} 244}
@@ -376,80 +340,37 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
376 sub_info->path = path; 340 sub_info->path = path;
377 sub_info->argv = argv; 341 sub_info->argv = argv;
378 sub_info->envp = envp; 342 sub_info->envp = envp;
379 sub_info->cred = prepare_usermodehelper_creds();
380 if (!sub_info->cred) {
381 kfree(sub_info);
382 return NULL;
383 }
384
385 out: 343 out:
386 return sub_info; 344 return sub_info;
387} 345}
388EXPORT_SYMBOL(call_usermodehelper_setup); 346EXPORT_SYMBOL(call_usermodehelper_setup);
389 347
390/** 348/**
391 * call_usermodehelper_setkeys - set the session keys for usermode helper 349 * call_usermodehelper_setfns - set a cleanup/init function
392 * @info: a subprocess_info returned by call_usermodehelper_setup
393 * @session_keyring: the session keyring for the process
394 */
395void call_usermodehelper_setkeys(struct subprocess_info *info,
396 struct key *session_keyring)
397{
398#ifdef CONFIG_KEYS
399 struct thread_group_cred *tgcred = info->cred->tgcred;
400 key_put(tgcred->session_keyring);
401 tgcred->session_keyring = key_get(session_keyring);
402#else
403 BUG();
404#endif
405}
406EXPORT_SYMBOL(call_usermodehelper_setkeys);
407
408/**
409 * call_usermodehelper_setcleanup - set a cleanup function
410 * @info: a subprocess_info returned by call_usermodehelper_setup 350 * @info: a subprocess_info returned by call_usermodehelper_setup
411 * @cleanup: a cleanup function 351 * @cleanup: a cleanup function
352 * @init: an init function
353 * @data: arbitrary context sensitive data
412 * 354 *
413 * The cleanup function is just befor ethe subprocess_info is about to 355 * The init function is used to customize the helper process prior to
356 * exec. A non-zero return code causes the process to error out, exit,
357 * and return the failure to the calling process
358 *
359 * The cleanup function is just before ethe subprocess_info is about to
414 * be freed. This can be used for freeing the argv and envp. The 360 * be freed. This can be used for freeing the argv and envp. The
415 * Function must be runnable in either a process context or the 361 * Function must be runnable in either a process context or the
416 * context in which call_usermodehelper_exec is called. 362 * context in which call_usermodehelper_exec is called.
417 */ 363 */
418void call_usermodehelper_setcleanup(struct subprocess_info *info, 364void call_usermodehelper_setfns(struct subprocess_info *info,
419 void (*cleanup)(char **argv, char **envp)) 365 int (*init)(struct subprocess_info *info),
366 void (*cleanup)(struct subprocess_info *info),
367 void *data)
420{ 368{
421 info->cleanup = cleanup; 369 info->cleanup = cleanup;
370 info->init = init;
371 info->data = data;
422} 372}
423EXPORT_SYMBOL(call_usermodehelper_setcleanup); 373EXPORT_SYMBOL(call_usermodehelper_setfns);
424
425/**
426 * call_usermodehelper_stdinpipe - set up a pipe to be used for stdin
427 * @sub_info: a subprocess_info returned by call_usermodehelper_setup
428 * @filp: set to the write-end of a pipe
429 *
430 * This constructs a pipe, and sets the read end to be the stdin of the
431 * subprocess, and returns the write-end in *@filp.
432 */
433int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
434 struct file **filp)
435{
436 struct file *f;
437
438 f = create_write_pipe(0);
439 if (IS_ERR(f))
440 return PTR_ERR(f);
441 *filp = f;
442
443 f = create_read_pipe(f, 0);
444 if (IS_ERR(f)) {
445 free_write_pipe(*filp);
446 return PTR_ERR(f);
447 }
448 sub_info->stdin = f;
449
450 return 0;
451}
452EXPORT_SYMBOL(call_usermodehelper_stdinpipe);
453 374
454/** 375/**
455 * call_usermodehelper_exec - start a usermode application 376 * call_usermodehelper_exec - start a usermode application
@@ -469,9 +390,6 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
469 DECLARE_COMPLETION_ONSTACK(done); 390 DECLARE_COMPLETION_ONSTACK(done);
470 int retval = 0; 391 int retval = 0;
471 392
472 BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
473 validate_creds(sub_info->cred);
474
475 helper_lock(); 393 helper_lock();
476 if (sub_info->path[0] == '\0') 394 if (sub_info->path[0] == '\0')
477 goto out; 395 goto out;
@@ -498,41 +416,6 @@ unlock:
498} 416}
499EXPORT_SYMBOL(call_usermodehelper_exec); 417EXPORT_SYMBOL(call_usermodehelper_exec);
500 418
501/**
502 * call_usermodehelper_pipe - call a usermode helper process with a pipe stdin
503 * @path: path to usermode executable
504 * @argv: arg vector for process
505 * @envp: environment for process
506 * @filp: set to the write-end of a pipe
507 *
508 * This is a simple wrapper which executes a usermode-helper function
509 * with a pipe as stdin. It is implemented entirely in terms of
510 * lower-level call_usermodehelper_* functions.
511 */
512int call_usermodehelper_pipe(char *path, char **argv, char **envp,
513 struct file **filp)
514{
515 struct subprocess_info *sub_info;
516 int ret;
517
518 sub_info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL);
519 if (sub_info == NULL)
520 return -ENOMEM;
521
522 ret = call_usermodehelper_stdinpipe(sub_info, filp);
523 if (ret < 0) {
524 call_usermodehelper_freeinfo(sub_info);
525 return ret;
526 }
527
528 ret = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
529 if (ret < 0) /* Failed to execute helper, close pipe */
530 filp_close(*filp, NULL);
531
532 return ret;
533}
534EXPORT_SYMBOL(call_usermodehelper_pipe);
535
536void __init usermodehelper_init(void) 419void __init usermodehelper_init(void)
537{ 420{
538 khelper_wq = create_singlethread_workqueue("khelper"); 421 khelper_wq = create_singlethread_workqueue("khelper");
diff --git a/kernel/padata.c b/kernel/padata.c
index b1c9857f8402..fdd8ae609ce3 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -659,7 +659,7 @@ static int padata_cpu_callback(struct notifier_block *nfb,
659 err = __padata_add_cpu(pinst, cpu); 659 err = __padata_add_cpu(pinst, cpu);
660 mutex_unlock(&pinst->lock); 660 mutex_unlock(&pinst->lock);
661 if (err) 661 if (err)
662 return NOTIFY_BAD; 662 return notifier_from_errno(err);
663 break; 663 break;
664 664
665 case CPU_DOWN_PREPARE: 665 case CPU_DOWN_PREPARE:
@@ -670,7 +670,7 @@ static int padata_cpu_callback(struct notifier_block *nfb,
670 err = __padata_remove_cpu(pinst, cpu); 670 err = __padata_remove_cpu(pinst, cpu);
671 mutex_unlock(&pinst->lock); 671 mutex_unlock(&pinst->lock);
672 if (err) 672 if (err)
673 return NOTIFY_BAD; 673 return notifier_from_errno(err);
674 break; 674 break;
675 675
676 case CPU_UP_CANCELED: 676 case CPU_UP_CANCELED:
diff --git a/kernel/panic.c b/kernel/panic.c
index dbe13dbb057a..3b16cd93fa7d 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -87,6 +87,7 @@ NORET_TYPE void panic(const char * fmt, ...)
87 */ 87 */
88 preempt_disable(); 88 preempt_disable();
89 89
90 console_verbose();
90 bust_spinlocks(1); 91 bust_spinlocks(1);
91 va_start(args, fmt); 92 va_start(args, fmt);
92 vsnprintf(buf, sizeof(buf), fmt, args); 93 vsnprintf(buf, sizeof(buf), fmt, args);
diff --git a/kernel/pid.c b/kernel/pid.c
index aebb30d9c233..e9fd8c132d26 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -513,6 +513,13 @@ void __init pidhash_init(void)
513 513
514void __init pidmap_init(void) 514void __init pidmap_init(void)
515{ 515{
516 /* bump default and minimum pid_max based on number of cpus */
517 pid_max = min(pid_max_max, max_t(int, pid_max,
518 PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
519 pid_max_min = max_t(int, pid_max_min,
520 PIDS_PER_CPU_MIN * num_possible_cpus());
521 pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
522
516 init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); 523 init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
517 /* Reserve PID 0. We never call free_pidmap(0) */ 524 /* Reserve PID 0. We never call free_pidmap(0) */
518 set_bit(0, init_pid_ns.pidmap[0].page); 525 set_bit(0, init_pid_ns.pidmap[0].page);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 00bb252f29a2..9829646d399c 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -363,7 +363,7 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
363 } 363 }
364 } else { 364 } else {
365 read_lock(&tasklist_lock); 365 read_lock(&tasklist_lock);
366 if (thread_group_leader(p) && p->signal) { 366 if (thread_group_leader(p) && p->sighand) {
367 error = 367 error =
368 cpu_clock_sample_group(which_clock, 368 cpu_clock_sample_group(which_clock,
369 p, &rtn); 369 p, &rtn);
@@ -439,7 +439,7 @@ int posix_cpu_timer_del(struct k_itimer *timer)
439 439
440 if (likely(p != NULL)) { 440 if (likely(p != NULL)) {
441 read_lock(&tasklist_lock); 441 read_lock(&tasklist_lock);
442 if (unlikely(p->signal == NULL)) { 442 if (unlikely(p->sighand == NULL)) {
443 /* 443 /*
444 * We raced with the reaping of the task. 444 * We raced with the reaping of the task.
445 * The deletion should have cleared us off the list. 445 * The deletion should have cleared us off the list.
@@ -691,10 +691,10 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
691 read_lock(&tasklist_lock); 691 read_lock(&tasklist_lock);
692 /* 692 /*
693 * We need the tasklist_lock to protect against reaping that 693 * We need the tasklist_lock to protect against reaping that
694 * clears p->signal. If p has just been reaped, we can no 694 * clears p->sighand. If p has just been reaped, we can no
695 * longer get any information about it at all. 695 * longer get any information about it at all.
696 */ 696 */
697 if (unlikely(p->signal == NULL)) { 697 if (unlikely(p->sighand == NULL)) {
698 read_unlock(&tasklist_lock); 698 read_unlock(&tasklist_lock);
699 put_task_struct(p); 699 put_task_struct(p);
700 timer->it.cpu.task = NULL; 700 timer->it.cpu.task = NULL;
@@ -863,7 +863,7 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
863 clear_dead = p->exit_state; 863 clear_dead = p->exit_state;
864 } else { 864 } else {
865 read_lock(&tasklist_lock); 865 read_lock(&tasklist_lock);
866 if (unlikely(p->signal == NULL)) { 866 if (unlikely(p->sighand == NULL)) {
867 /* 867 /*
868 * The process has been reaped. 868 * The process has been reaped.
869 * We can't even collect a sample any more. 869 * We can't even collect a sample any more.
@@ -1199,7 +1199,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1199 spin_lock(&p->sighand->siglock); 1199 spin_lock(&p->sighand->siglock);
1200 } else { 1200 } else {
1201 read_lock(&tasklist_lock); 1201 read_lock(&tasklist_lock);
1202 if (unlikely(p->signal == NULL)) { 1202 if (unlikely(p->sighand == NULL)) {
1203 /* 1203 /*
1204 * The process has been reaped. 1204 * The process has been reaped.
1205 * We can't even collect a sample any more. 1205 * We can't even collect a sample any more.
diff --git a/kernel/profile.c b/kernel/profile.c
index dfadc5b729f1..b22a899934cc 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -365,14 +365,14 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info,
365 switch (action) { 365 switch (action) {
366 case CPU_UP_PREPARE: 366 case CPU_UP_PREPARE:
367 case CPU_UP_PREPARE_FROZEN: 367 case CPU_UP_PREPARE_FROZEN:
368 node = cpu_to_node(cpu); 368 node = cpu_to_mem(cpu);
369 per_cpu(cpu_profile_flip, cpu) = 0; 369 per_cpu(cpu_profile_flip, cpu) = 0;
370 if (!per_cpu(cpu_profile_hits, cpu)[1]) { 370 if (!per_cpu(cpu_profile_hits, cpu)[1]) {
371 page = alloc_pages_exact_node(node, 371 page = alloc_pages_exact_node(node,
372 GFP_KERNEL | __GFP_ZERO, 372 GFP_KERNEL | __GFP_ZERO,
373 0); 373 0);
374 if (!page) 374 if (!page)
375 return NOTIFY_BAD; 375 return notifier_from_errno(-ENOMEM);
376 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); 376 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
377 } 377 }
378 if (!per_cpu(cpu_profile_hits, cpu)[0]) { 378 if (!per_cpu(cpu_profile_hits, cpu)[0]) {
@@ -388,7 +388,7 @@ out_free:
388 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); 388 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
389 per_cpu(cpu_profile_hits, cpu)[1] = NULL; 389 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
390 __free_page(page); 390 __free_page(page);
391 return NOTIFY_BAD; 391 return notifier_from_errno(-ENOMEM);
392 case CPU_ONLINE: 392 case CPU_ONLINE:
393 case CPU_ONLINE_FROZEN: 393 case CPU_ONLINE_FROZEN:
394 if (prof_cpu_mask != NULL) 394 if (prof_cpu_mask != NULL)
@@ -567,7 +567,7 @@ static int create_hash_tables(void)
567 int cpu; 567 int cpu;
568 568
569 for_each_online_cpu(cpu) { 569 for_each_online_cpu(cpu) {
570 int node = cpu_to_node(cpu); 570 int node = cpu_to_mem(cpu);
571 struct page *page; 571 struct page *page;
572 572
573 page = alloc_pages_exact_node(node, 573 page = alloc_pages_exact_node(node,
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 6af9cdd558b7..74a3d693c196 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -594,6 +594,32 @@ int ptrace_request(struct task_struct *child, long request,
594 ret = ptrace_detach(child, data); 594 ret = ptrace_detach(child, data);
595 break; 595 break;
596 596
597#ifdef CONFIG_BINFMT_ELF_FDPIC
598 case PTRACE_GETFDPIC: {
599 struct mm_struct *mm = get_task_mm(child);
600 unsigned long tmp = 0;
601
602 ret = -ESRCH;
603 if (!mm)
604 break;
605
606 switch (addr) {
607 case PTRACE_GETFDPIC_EXEC:
608 tmp = mm->context.exec_fdpic_loadmap;
609 break;
610 case PTRACE_GETFDPIC_INTERP:
611 tmp = mm->context.interp_fdpic_loadmap;
612 break;
613 default:
614 break;
615 }
616 mmput(mm);
617
618 ret = put_user(tmp, (unsigned long __user *) data);
619 break;
620 }
621#endif
622
597#ifdef PTRACE_SINGLESTEP 623#ifdef PTRACE_SINGLESTEP
598 case PTRACE_SINGLESTEP: 624 case PTRACE_SINGLESTEP:
599#endif 625#endif
diff --git a/kernel/relay.c b/kernel/relay.c
index 4268287148c1..c7cf397fb929 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -539,7 +539,7 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
539 "relay_hotcpu_callback: cpu %d buffer " 539 "relay_hotcpu_callback: cpu %d buffer "
540 "creation failed\n", hotcpu); 540 "creation failed\n", hotcpu);
541 mutex_unlock(&relay_channels_mutex); 541 mutex_unlock(&relay_channels_mutex);
542 return NOTIFY_BAD; 542 return notifier_from_errno(-ENOMEM);
543 } 543 }
544 } 544 }
545 mutex_unlock(&relay_channels_mutex); 545 mutex_unlock(&relay_channels_mutex);
diff --git a/kernel/sched.c b/kernel/sched.c
index 054a6012de99..15b93f617fd7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -969,14 +969,6 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
969 } 969 }
970} 970}
971 971
972void task_rq_unlock_wait(struct task_struct *p)
973{
974 struct rq *rq = task_rq(p);
975
976 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
977 raw_spin_unlock_wait(&rq->lock);
978}
979
980static void __task_rq_unlock(struct rq *rq) 972static void __task_rq_unlock(struct rq *rq)
981 __releases(rq->lock) 973 __releases(rq->lock)
982{ 974{
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 87a330a7185f..35565395d00d 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -381,15 +381,9 @@ __initcall(init_sched_debug_procfs);
381void proc_sched_show_task(struct task_struct *p, struct seq_file *m) 381void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
382{ 382{
383 unsigned long nr_switches; 383 unsigned long nr_switches;
384 unsigned long flags;
385 int num_threads = 1;
386
387 if (lock_task_sighand(p, &flags)) {
388 num_threads = atomic_read(&p->signal->count);
389 unlock_task_sighand(p, &flags);
390 }
391 384
392 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads); 385 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
386 get_nr_threads(p));
393 SEQ_printf(m, 387 SEQ_printf(m,
394 "---------------------------------------------------------\n"); 388 "---------------------------------------------------------\n");
395#define __P(F) \ 389#define __P(F) \
diff --git a/kernel/signal.c b/kernel/signal.c
index 825a3f24ad76..906ae5a1779c 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -642,7 +642,7 @@ static inline bool si_fromuser(const struct siginfo *info)
642static int check_kill_permission(int sig, struct siginfo *info, 642static int check_kill_permission(int sig, struct siginfo *info,
643 struct task_struct *t) 643 struct task_struct *t)
644{ 644{
645 const struct cred *cred = current_cred(), *tcred; 645 const struct cred *cred, *tcred;
646 struct pid *sid; 646 struct pid *sid;
647 int error; 647 int error;
648 648
@@ -656,8 +656,10 @@ static int check_kill_permission(int sig, struct siginfo *info,
656 if (error) 656 if (error)
657 return error; 657 return error;
658 658
659 cred = current_cred();
659 tcred = __task_cred(t); 660 tcred = __task_cred(t);
660 if ((cred->euid ^ tcred->suid) && 661 if (!same_thread_group(current, t) &&
662 (cred->euid ^ tcred->suid) &&
661 (cred->euid ^ tcred->uid) && 663 (cred->euid ^ tcred->uid) &&
662 (cred->uid ^ tcred->suid) && 664 (cred->uid ^ tcred->suid) &&
663 (cred->uid ^ tcred->uid) && 665 (cred->uid ^ tcred->uid) &&
@@ -1083,23 +1085,24 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1083/* 1085/*
1084 * Nuke all other threads in the group. 1086 * Nuke all other threads in the group.
1085 */ 1087 */
1086void zap_other_threads(struct task_struct *p) 1088int zap_other_threads(struct task_struct *p)
1087{ 1089{
1088 struct task_struct *t; 1090 struct task_struct *t = p;
1091 int count = 0;
1089 1092
1090 p->signal->group_stop_count = 0; 1093 p->signal->group_stop_count = 0;
1091 1094
1092 for (t = next_thread(p); t != p; t = next_thread(t)) { 1095 while_each_thread(p, t) {
1093 /* 1096 count++;
1094 * Don't bother with already dead threads 1097
1095 */ 1098 /* Don't bother with already dead threads */
1096 if (t->exit_state) 1099 if (t->exit_state)
1097 continue; 1100 continue;
1098
1099 /* SIGKILL will be handled before any pending SIGSTOP */
1100 sigaddset(&t->pending.signal, SIGKILL); 1101 sigaddset(&t->pending.signal, SIGKILL);
1101 signal_wake_up(t, 1); 1102 signal_wake_up(t, 1);
1102 } 1103 }
1104
1105 return count;
1103} 1106}
1104 1107
1105struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) 1108struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
diff --git a/kernel/smp.c b/kernel/smp.c
index 3fc697336183..75c970c715d3 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -52,7 +52,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
52 case CPU_UP_PREPARE_FROZEN: 52 case CPU_UP_PREPARE_FROZEN:
53 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 53 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
54 cpu_to_node(cpu))) 54 cpu_to_node(cpu)))
55 return NOTIFY_BAD; 55 return notifier_from_errno(-ENOMEM);
56 break; 56 break;
57 57
58#ifdef CONFIG_HOTPLUG_CPU 58#ifdef CONFIG_HOTPLUG_CPU
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 0db913a5c60f..825e1126008f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -808,7 +808,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
808 p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); 808 p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
809 if (IS_ERR(p)) { 809 if (IS_ERR(p)) {
810 printk("ksoftirqd for %i failed\n", hotcpu); 810 printk("ksoftirqd for %i failed\n", hotcpu);
811 return NOTIFY_BAD; 811 return notifier_from_errno(PTR_ERR(p));
812 } 812 }
813 kthread_bind(p, hotcpu); 813 kthread_bind(p, hotcpu);
814 per_cpu(ksoftirqd, hotcpu) = p; 814 per_cpu(ksoftirqd, hotcpu) = p;
diff --git a/kernel/sys.c b/kernel/sys.c
index 0d36d889c74d..e83ddbbaf89d 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1632,9 +1632,9 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
1632 1632
1633char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; 1633char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
1634 1634
1635static void argv_cleanup(char **argv, char **envp) 1635static void argv_cleanup(struct subprocess_info *info)
1636{ 1636{
1637 argv_free(argv); 1637 argv_free(info->argv);
1638} 1638}
1639 1639
1640/** 1640/**
@@ -1668,7 +1668,7 @@ int orderly_poweroff(bool force)
1668 goto out; 1668 goto out;
1669 } 1669 }
1670 1670
1671 call_usermodehelper_setcleanup(info, argv_cleanup); 1671 call_usermodehelper_setfns(info, NULL, argv_cleanup, NULL);
1672 1672
1673 ret = call_usermodehelper_exec(info, UMH_NO_WAIT); 1673 ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
1674 1674
diff --git a/kernel/timer.c b/kernel/timer.c
index be394af5bc22..e3b8c697bde4 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1680,11 +1680,14 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1680 unsigned long action, void *hcpu) 1680 unsigned long action, void *hcpu)
1681{ 1681{
1682 long cpu = (long)hcpu; 1682 long cpu = (long)hcpu;
1683 int err;
1684
1683 switch(action) { 1685 switch(action) {
1684 case CPU_UP_PREPARE: 1686 case CPU_UP_PREPARE:
1685 case CPU_UP_PREPARE_FROZEN: 1687 case CPU_UP_PREPARE_FROZEN:
1686 if (init_timers_cpu(cpu) < 0) 1688 err = init_timers_cpu(cpu);
1687 return NOTIFY_BAD; 1689 if (err < 0)
1690 return notifier_from_errno(err);
1688 break; 1691 break;
1689#ifdef CONFIG_HOTPLUG_CPU 1692#ifdef CONFIG_HOTPLUG_CPU
1690 case CPU_DEAD: 1693 case CPU_DEAD:
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 77dabbf64b8f..327d2deb4451 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1110,7 +1110,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1110 unsigned int cpu = (unsigned long)hcpu; 1110 unsigned int cpu = (unsigned long)hcpu;
1111 struct cpu_workqueue_struct *cwq; 1111 struct cpu_workqueue_struct *cwq;
1112 struct workqueue_struct *wq; 1112 struct workqueue_struct *wq;
1113 int ret = NOTIFY_OK; 1113 int err = 0;
1114 1114
1115 action &= ~CPU_TASKS_FROZEN; 1115 action &= ~CPU_TASKS_FROZEN;
1116 1116
@@ -1124,12 +1124,13 @@ undo:
1124 1124
1125 switch (action) { 1125 switch (action) {
1126 case CPU_UP_PREPARE: 1126 case CPU_UP_PREPARE:
1127 if (!create_workqueue_thread(cwq, cpu)) 1127 err = create_workqueue_thread(cwq, cpu);
1128 if (!err)
1128 break; 1129 break;
1129 printk(KERN_ERR "workqueue [%s] for %i failed\n", 1130 printk(KERN_ERR "workqueue [%s] for %i failed\n",
1130 wq->name, cpu); 1131 wq->name, cpu);
1131 action = CPU_UP_CANCELED; 1132 action = CPU_UP_CANCELED;
1132 ret = NOTIFY_BAD; 1133 err = -ENOMEM;
1133 goto undo; 1134 goto undo;
1134 1135
1135 case CPU_ONLINE: 1136 case CPU_ONLINE:
@@ -1150,7 +1151,7 @@ undo:
1150 cpumask_clear_cpu(cpu, cpu_populated_map); 1151 cpumask_clear_cpu(cpu, cpu_populated_map);
1151 } 1152 }
1152 1153
1153 return ret; 1154 return notifier_from_errno(err);
1154} 1155}
1155 1156
1156#ifdef CONFIG_SMP 1157#ifdef CONFIG_SMP
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 231208948363..e722e9d62221 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -898,6 +898,18 @@ config LKDTM
898 Documentation on how to use the module can be found in 898 Documentation on how to use the module can be found in
899 Documentation/fault-injection/provoke-crashes.txt 899 Documentation/fault-injection/provoke-crashes.txt
900 900
901config CPU_NOTIFIER_ERROR_INJECT
902 tristate "CPU notifier error injection module"
903 depends on HOTPLUG_CPU && DEBUG_KERNEL
904 help
905 This option provides a kernel module that can be used to test
906 the error handling of the cpu notifiers
907
908 To compile this code as a module, choose M here: the module will
909 be called cpu-notifier-error-inject.
910
911 If unsure, say N.
912
901config FAULT_INJECTION 913config FAULT_INJECTION
902 bool "Fault-injection framework" 914 bool "Fault-injection framework"
903 depends on DEBUG_KERNEL 915 depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 9e6d3c29d73a..c8567a59d316 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -85,6 +85,7 @@ obj-$(CONFIG_AUDIT_GENERIC) += audit.o
85obj-$(CONFIG_SWIOTLB) += swiotlb.o 85obj-$(CONFIG_SWIOTLB) += swiotlb.o
86obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o 86obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
87obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o 87obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
88obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
88 89
89lib-$(CONFIG_GENERIC_BUG) += bug.o 90lib-$(CONFIG_GENERIC_BUG) += bug.o
90 91
diff --git a/lib/bitmap.c b/lib/bitmap.c
index ffb78c916ccd..d7137e7e06e8 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -672,7 +672,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
672 * 672 *
673 * The bit positions 0 through @bits are valid positions in @buf. 673 * The bit positions 0 through @bits are valid positions in @buf.
674 */ 674 */
675static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) 675int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
676{ 676{
677 int pos = 0; 677 int pos = 0;
678 678
diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c
new file mode 100644
index 000000000000..4dc20321b0d5
--- /dev/null
+++ b/lib/cpu-notifier-error-inject.c
@@ -0,0 +1,63 @@
1#include <linux/kernel.h>
2#include <linux/cpu.h>
3#include <linux/module.h>
4#include <linux/notifier.h>
5
6static int priority;
7static int cpu_up_prepare_error;
8static int cpu_down_prepare_error;
9
10module_param(priority, int, 0);
11MODULE_PARM_DESC(priority, "specify cpu notifier priority");
12
13module_param(cpu_up_prepare_error, int, 0644);
14MODULE_PARM_DESC(cpu_up_prepare_error,
15 "specify error code to inject CPU_UP_PREPARE action");
16
17module_param(cpu_down_prepare_error, int, 0644);
18MODULE_PARM_DESC(cpu_down_prepare_error,
19 "specify error code to inject CPU_DOWN_PREPARE action");
20
21static int err_inject_cpu_callback(struct notifier_block *nfb,
22 unsigned long action, void *hcpu)
23{
24 int err = 0;
25
26 switch (action) {
27 case CPU_UP_PREPARE:
28 case CPU_UP_PREPARE_FROZEN:
29 err = cpu_up_prepare_error;
30 break;
31 case CPU_DOWN_PREPARE:
32 case CPU_DOWN_PREPARE_FROZEN:
33 err = cpu_down_prepare_error;
34 break;
35 }
36 if (err)
37 printk(KERN_INFO "Injecting error (%d) at cpu notifier\n", err);
38
39 return notifier_from_errno(err);
40}
41
42static struct notifier_block err_inject_cpu_notifier = {
43 .notifier_call = err_inject_cpu_callback,
44};
45
46static int err_inject_init(void)
47{
48 err_inject_cpu_notifier.priority = priority;
49
50 return register_hotcpu_notifier(&err_inject_cpu_notifier);
51}
52
53static void err_inject_exit(void)
54{
55 unregister_hotcpu_notifier(&err_inject_cpu_notifier);
56}
57
58module_init(err_inject_init);
59module_exit(err_inject_exit);
60
61MODULE_DESCRIPTION("CPU notifier error injection module");
62MODULE_LICENSE("GPL");
63MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/crc32.c b/lib/crc32.c
index 3087ed899ee3..4855995fcde9 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -50,7 +50,7 @@ MODULE_LICENSE("GPL");
50static inline u32 50static inline u32
51crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) 51crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
52{ 52{
53# if __BYTE_ORDER == __LITTLE_ENDIAN 53# ifdef __LITTLE_ENDIAN
54# define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8) 54# define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8)
55# define DO_CRC4 crc = tab[3][(crc) & 255] ^ \ 55# define DO_CRC4 crc = tab[3][(crc) & 255] ^ \
56 tab[2][(crc >> 8) & 255] ^ \ 56 tab[2][(crc >> 8) & 255] ^ \
diff --git a/lib/idr.c b/lib/idr.c
index 422a9d5069cc..c1a206901761 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -445,6 +445,7 @@ EXPORT_SYMBOL(idr_remove);
445void idr_remove_all(struct idr *idp) 445void idr_remove_all(struct idr *idp)
446{ 446{
447 int n, id, max; 447 int n, id, max;
448 int bt_mask;
448 struct idr_layer *p; 449 struct idr_layer *p;
449 struct idr_layer *pa[MAX_LEVEL]; 450 struct idr_layer *pa[MAX_LEVEL];
450 struct idr_layer **paa = &pa[0]; 451 struct idr_layer **paa = &pa[0];
@@ -462,8 +463,10 @@ void idr_remove_all(struct idr *idp)
462 p = p->ary[(id >> n) & IDR_MASK]; 463 p = p->ary[(id >> n) & IDR_MASK];
463 } 464 }
464 465
466 bt_mask = id;
465 id += 1 << n; 467 id += 1 << n;
466 while (n < fls(id)) { 468 /* Get the highest bit that the above add changed from 0->1. */
469 while (n < fls(id ^ bt_mask)) {
467 if (p) 470 if (p)
468 free_layer(p); 471 free_layer(p);
469 n += IDR_BITS; 472 n += IDR_BITS;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 2a087e0f9863..05da38bcc298 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -656,7 +656,7 @@ EXPORT_SYMBOL(radix_tree_next_hole);
656 * 656 *
657 * Returns: the index of the hole if found, otherwise returns an index 657 * Returns: the index of the hole if found, otherwise returns an index
658 * outside of the set specified (in which case 'index - return >= max_scan' 658 * outside of the set specified (in which case 'index - return >= max_scan'
659 * will be true). In rare cases of wrap-around, LONG_MAX will be returned. 659 * will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
660 * 660 *
661 * radix_tree_next_hole may be called under rcu_read_lock. However, like 661 * radix_tree_next_hole may be called under rcu_read_lock. However, like
662 * radix_tree_gang_lookup, this will not atomically search a snapshot of 662 * radix_tree_gang_lookup, this will not atomically search a snapshot of
@@ -674,7 +674,7 @@ unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
674 if (!radix_tree_lookup(root, index)) 674 if (!radix_tree_lookup(root, index))
675 break; 675 break;
676 index--; 676 index--;
677 if (index == LONG_MAX) 677 if (index == ULONG_MAX)
678 break; 678 break;
679 } 679 }
680 680
diff --git a/lib/random32.c b/lib/random32.c
index 217d5c4b666d..870dc3fc0f0f 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -39,13 +39,16 @@
39#include <linux/jiffies.h> 39#include <linux/jiffies.h>
40#include <linux/random.h> 40#include <linux/random.h>
41 41
42struct rnd_state {
43 u32 s1, s2, s3;
44};
45
46static DEFINE_PER_CPU(struct rnd_state, net_rand_state); 42static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
47 43
48static u32 __random32(struct rnd_state *state) 44/**
45 * prandom32 - seeded pseudo-random number generator.
46 * @state: pointer to state structure holding seeded state.
47 *
48 * This is used for pseudo-randomness with no outside seeding.
49 * For more random results, use random32().
50 */
51u32 prandom32(struct rnd_state *state)
49{ 52{
50#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b) 53#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
51 54
@@ -55,14 +58,7 @@ static u32 __random32(struct rnd_state *state)
55 58
56 return (state->s1 ^ state->s2 ^ state->s3); 59 return (state->s1 ^ state->s2 ^ state->s3);
57} 60}
58 61EXPORT_SYMBOL(prandom32);
59/*
60 * Handle minimum values for seeds
61 */
62static inline u32 __seed(u32 x, u32 m)
63{
64 return (x < m) ? x + m : x;
65}
66 62
67/** 63/**
68 * random32 - pseudo random number generator 64 * random32 - pseudo random number generator
@@ -75,7 +71,7 @@ u32 random32(void)
75{ 71{
76 unsigned long r; 72 unsigned long r;
77 struct rnd_state *state = &get_cpu_var(net_rand_state); 73 struct rnd_state *state = &get_cpu_var(net_rand_state);
78 r = __random32(state); 74 r = prandom32(state);
79 put_cpu_var(state); 75 put_cpu_var(state);
80 return r; 76 return r;
81} 77}
@@ -118,12 +114,12 @@ static int __init random32_init(void)
118 state->s3 = __seed(LCG(state->s2), 15); 114 state->s3 = __seed(LCG(state->s2), 15);
119 115
120 /* "warm it up" */ 116 /* "warm it up" */
121 __random32(state); 117 prandom32(state);
122 __random32(state); 118 prandom32(state);
123 __random32(state); 119 prandom32(state);
124 __random32(state); 120 prandom32(state);
125 __random32(state); 121 prandom32(state);
126 __random32(state); 122 prandom32(state);
127 } 123 }
128 return 0; 124 return 0;
129} 125}
@@ -147,7 +143,7 @@ static int __init random32_reseed(void)
147 state->s3 = __seed(seeds[2], 15); 143 state->s3 = __seed(seeds[2], 15);
148 144
149 /* mix it in */ 145 /* mix it in */
150 __random32(state); 146 prandom32(state);
151 } 147 }
152 return 0; 148 return 0;
153} 149}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 5fddf720da73..a009055140ec 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -757,37 +757,6 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
757EXPORT_SYMBOL(swiotlb_sync_single_for_device); 757EXPORT_SYMBOL(swiotlb_sync_single_for_device);
758 758
759/* 759/*
760 * Same as above, but for a sub-range of the mapping.
761 */
762static void
763swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
764 unsigned long offset, size_t size,
765 int dir, int target)
766{
767 swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
768}
769
770void
771swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
772 unsigned long offset, size_t size,
773 enum dma_data_direction dir)
774{
775 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
776 SYNC_FOR_CPU);
777}
778EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
779
780void
781swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
782 unsigned long offset, size_t size,
783 enum dma_data_direction dir)
784{
785 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
786 SYNC_FOR_DEVICE);
787}
788EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
789
790/*
791 * Map a set of buffers described by scatterlist in streaming mode for DMA. 760 * Map a set of buffers described by scatterlist in streaming mode for DMA.
792 * This is the scatter-gather version of the above swiotlb_map_page 761 * This is the scatter-gather version of the above swiotlb_map_page
793 * interface. Here the scatter gather list elements are each tagged with the 762 * interface. Here the scatter gather list elements are each tagged with the
diff --git a/mm/filemap.c b/mm/filemap.c
index 88d719665a28..35e12d186566 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1105,6 +1105,12 @@ page_not_up_to_date_locked:
1105 } 1105 }
1106 1106
1107readpage: 1107readpage:
1108 /*
1109 * A previous I/O error may have been due to temporary
1110 * failures, eg. multipath errors.
1111 * PG_error will be set again if readpage fails.
1112 */
1113 ClearPageError(page);
1108 /* Start the actual read. The read will unlock the page. */ 1114 /* Start the actual read. The read will unlock the page. */
1109 error = mapping->a_ops->readpage(filp, page); 1115 error = mapping->a_ops->readpage(filp, page);
1110 1116
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c8569bc298ff..c6ece0a57595 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -149,16 +149,35 @@ struct mem_cgroup_threshold {
149 u64 threshold; 149 u64 threshold;
150}; 150};
151 151
152/* For threshold */
152struct mem_cgroup_threshold_ary { 153struct mem_cgroup_threshold_ary {
153 /* An array index points to threshold just below usage. */ 154 /* An array index points to threshold just below usage. */
154 atomic_t current_threshold; 155 int current_threshold;
155 /* Size of entries[] */ 156 /* Size of entries[] */
156 unsigned int size; 157 unsigned int size;
157 /* Array of thresholds */ 158 /* Array of thresholds */
158 struct mem_cgroup_threshold entries[0]; 159 struct mem_cgroup_threshold entries[0];
159}; 160};
160 161
162struct mem_cgroup_thresholds {
163 /* Primary thresholds array */
164 struct mem_cgroup_threshold_ary *primary;
165 /*
166 * Spare threshold array.
167 * This is needed to make mem_cgroup_unregister_event() "never fail".
168 * It must be able to store at least primary->size - 1 entries.
169 */
170 struct mem_cgroup_threshold_ary *spare;
171};
172
173/* for OOM */
174struct mem_cgroup_eventfd_list {
175 struct list_head list;
176 struct eventfd_ctx *eventfd;
177};
178
161static void mem_cgroup_threshold(struct mem_cgroup *mem); 179static void mem_cgroup_threshold(struct mem_cgroup *mem);
180static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
162 181
163/* 182/*
164 * The memory controller data structure. The memory controller controls both 183 * The memory controller data structure. The memory controller controls both
@@ -207,6 +226,8 @@ struct mem_cgroup {
207 atomic_t refcnt; 226 atomic_t refcnt;
208 227
209 unsigned int swappiness; 228 unsigned int swappiness;
229 /* OOM-Killer disable */
230 int oom_kill_disable;
210 231
211 /* set when res.limit == memsw.limit */ 232 /* set when res.limit == memsw.limit */
212 bool memsw_is_minimum; 233 bool memsw_is_minimum;
@@ -215,17 +236,19 @@ struct mem_cgroup {
215 struct mutex thresholds_lock; 236 struct mutex thresholds_lock;
216 237
217 /* thresholds for memory usage. RCU-protected */ 238 /* thresholds for memory usage. RCU-protected */
218 struct mem_cgroup_threshold_ary *thresholds; 239 struct mem_cgroup_thresholds thresholds;
219 240
220 /* thresholds for mem+swap usage. RCU-protected */ 241 /* thresholds for mem+swap usage. RCU-protected */
221 struct mem_cgroup_threshold_ary *memsw_thresholds; 242 struct mem_cgroup_thresholds memsw_thresholds;
243
244 /* For oom notifier event fd */
245 struct list_head oom_notify;
222 246
223 /* 247 /*
224 * Should we move charges of a task when a task is moved into this 248 * Should we move charges of a task when a task is moved into this
225 * mem_cgroup ? And what type of charges should we move ? 249 * mem_cgroup ? And what type of charges should we move ?
226 */ 250 */
227 unsigned long move_charge_at_immigrate; 251 unsigned long move_charge_at_immigrate;
228
229 /* 252 /*
230 * percpu counter. 253 * percpu counter.
231 */ 254 */
@@ -239,6 +262,7 @@ struct mem_cgroup {
239 */ 262 */
240enum move_type { 263enum move_type {
241 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ 264 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
265 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
242 NR_MOVE_TYPE, 266 NR_MOVE_TYPE,
243}; 267};
244 268
@@ -255,6 +279,18 @@ static struct move_charge_struct {
255 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 279 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
256}; 280};
257 281
282static bool move_anon(void)
283{
284 return test_bit(MOVE_CHARGE_TYPE_ANON,
285 &mc.to->move_charge_at_immigrate);
286}
287
288static bool move_file(void)
289{
290 return test_bit(MOVE_CHARGE_TYPE_FILE,
291 &mc.to->move_charge_at_immigrate);
292}
293
258/* 294/*
259 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 295 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
260 * limit reclaim to prevent infinite loops, if they ever occur. 296 * limit reclaim to prevent infinite loops, if they ever occur.
@@ -282,9 +318,12 @@ enum charge_type {
282/* for encoding cft->private value on file */ 318/* for encoding cft->private value on file */
283#define _MEM (0) 319#define _MEM (0)
284#define _MEMSWAP (1) 320#define _MEMSWAP (1)
321#define _OOM_TYPE (2)
285#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) 322#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
286#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff) 323#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
287#define MEMFILE_ATTR(val) ((val) & 0xffff) 324#define MEMFILE_ATTR(val) ((val) & 0xffff)
325/* Used for OOM nofiier */
326#define OOM_CONTROL (0)
288 327
289/* 328/*
290 * Reclaim flags for mem_cgroup_hierarchical_reclaim 329 * Reclaim flags for mem_cgroup_hierarchical_reclaim
@@ -1293,14 +1332,62 @@ static void mem_cgroup_oom_unlock(struct mem_cgroup *mem)
1293static DEFINE_MUTEX(memcg_oom_mutex); 1332static DEFINE_MUTEX(memcg_oom_mutex);
1294static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1333static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1295 1334
1335struct oom_wait_info {
1336 struct mem_cgroup *mem;
1337 wait_queue_t wait;
1338};
1339
1340static int memcg_oom_wake_function(wait_queue_t *wait,
1341 unsigned mode, int sync, void *arg)
1342{
1343 struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
1344 struct oom_wait_info *oom_wait_info;
1345
1346 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1347
1348 if (oom_wait_info->mem == wake_mem)
1349 goto wakeup;
1350 /* if no hierarchy, no match */
1351 if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
1352 return 0;
1353 /*
1354 * Both of oom_wait_info->mem and wake_mem are stable under us.
1355 * Then we can use css_is_ancestor without taking care of RCU.
1356 */
1357 if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
1358 !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
1359 return 0;
1360
1361wakeup:
1362 return autoremove_wake_function(wait, mode, sync, arg);
1363}
1364
1365static void memcg_wakeup_oom(struct mem_cgroup *mem)
1366{
1367 /* for filtering, pass "mem" as argument. */
1368 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1369}
1370
1371static void memcg_oom_recover(struct mem_cgroup *mem)
1372{
1373 if (mem->oom_kill_disable && atomic_read(&mem->oom_lock))
1374 memcg_wakeup_oom(mem);
1375}
1376
1296/* 1377/*
1297 * try to call OOM killer. returns false if we should exit memory-reclaim loop. 1378 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1298 */ 1379 */
1299bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) 1380bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1300{ 1381{
1301 DEFINE_WAIT(wait); 1382 struct oom_wait_info owait;
1302 bool locked; 1383 bool locked, need_to_kill;
1303 1384
1385 owait.mem = mem;
1386 owait.wait.flags = 0;
1387 owait.wait.func = memcg_oom_wake_function;
1388 owait.wait.private = current;
1389 INIT_LIST_HEAD(&owait.wait.task_list);
1390 need_to_kill = true;
1304 /* At first, try to OOM lock hierarchy under mem.*/ 1391 /* At first, try to OOM lock hierarchy under mem.*/
1305 mutex_lock(&memcg_oom_mutex); 1392 mutex_lock(&memcg_oom_mutex);
1306 locked = mem_cgroup_oom_lock(mem); 1393 locked = mem_cgroup_oom_lock(mem);
@@ -1309,32 +1396,23 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1309 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL 1396 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1310 * under OOM is always welcomed, use TASK_KILLABLE here. 1397 * under OOM is always welcomed, use TASK_KILLABLE here.
1311 */ 1398 */
1312 if (!locked) 1399 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1313 prepare_to_wait(&memcg_oom_waitq, &wait, TASK_KILLABLE); 1400 if (!locked || mem->oom_kill_disable)
1401 need_to_kill = false;
1402 if (locked)
1403 mem_cgroup_oom_notify(mem);
1314 mutex_unlock(&memcg_oom_mutex); 1404 mutex_unlock(&memcg_oom_mutex);
1315 1405
1316 if (locked) 1406 if (need_to_kill) {
1407 finish_wait(&memcg_oom_waitq, &owait.wait);
1317 mem_cgroup_out_of_memory(mem, mask); 1408 mem_cgroup_out_of_memory(mem, mask);
1318 else { 1409 } else {
1319 schedule(); 1410 schedule();
1320 finish_wait(&memcg_oom_waitq, &wait); 1411 finish_wait(&memcg_oom_waitq, &owait.wait);
1321 } 1412 }
1322 mutex_lock(&memcg_oom_mutex); 1413 mutex_lock(&memcg_oom_mutex);
1323 mem_cgroup_oom_unlock(mem); 1414 mem_cgroup_oom_unlock(mem);
1324 /* 1415 memcg_wakeup_oom(mem);
1325 * Here, we use global waitq .....more fine grained waitq ?
1326 * Assume following hierarchy.
1327 * A/
1328 * 01
1329 * 02
1330 * assume OOM happens both in A and 01 at the same time. Tthey are
1331 * mutually exclusive by lock. (kill in 01 helps A.)
1332 * When we use per memcg waitq, we have to wake up waiters on A and 02
1333 * in addtion to waiters on 01. We use global waitq for avoiding mess.
1334 * It will not be a big problem.
1335 * (And a task may be moved to other groups while it's waiting for OOM.)
1336 */
1337 wake_up_all(&memcg_oom_waitq);
1338 mutex_unlock(&memcg_oom_mutex); 1416 mutex_unlock(&memcg_oom_mutex);
1339 1417
1340 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) 1418 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
@@ -2118,15 +2196,6 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
2118 /* If swapout, usage of swap doesn't decrease */ 2196 /* If swapout, usage of swap doesn't decrease */
2119 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 2197 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2120 uncharge_memsw = false; 2198 uncharge_memsw = false;
2121 /*
2122 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2123 * In those cases, all pages freed continously can be expected to be in
2124 * the same cgroup and we have chance to coalesce uncharges.
2125 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2126 * because we want to do uncharge as soon as possible.
2127 */
2128 if (!current->memcg_batch.do_batch || test_thread_flag(TIF_MEMDIE))
2129 goto direct_uncharge;
2130 2199
2131 batch = &current->memcg_batch; 2200 batch = &current->memcg_batch;
2132 /* 2201 /*
@@ -2137,6 +2206,17 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
2137 if (!batch->memcg) 2206 if (!batch->memcg)
2138 batch->memcg = mem; 2207 batch->memcg = mem;
2139 /* 2208 /*
2209 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2210 * In those cases, all pages freed continously can be expected to be in
2211 * the same cgroup and we have chance to coalesce uncharges.
2212 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2213 * because we want to do uncharge as soon as possible.
2214 */
2215
2216 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2217 goto direct_uncharge;
2218
2219 /*
2140 * In typical case, batch->memcg == mem. This means we can 2220 * In typical case, batch->memcg == mem. This means we can
2141 * merge a series of uncharges to an uncharge of res_counter. 2221 * merge a series of uncharges to an uncharge of res_counter.
2142 * If not, we uncharge res_counter ony by one. 2222 * If not, we uncharge res_counter ony by one.
@@ -2152,6 +2232,8 @@ direct_uncharge:
2152 res_counter_uncharge(&mem->res, PAGE_SIZE); 2232 res_counter_uncharge(&mem->res, PAGE_SIZE);
2153 if (uncharge_memsw) 2233 if (uncharge_memsw)
2154 res_counter_uncharge(&mem->memsw, PAGE_SIZE); 2234 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
2235 if (unlikely(batch->memcg != mem))
2236 memcg_oom_recover(mem);
2155 return; 2237 return;
2156} 2238}
2157 2239
@@ -2188,7 +2270,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2188 switch (ctype) { 2270 switch (ctype) {
2189 case MEM_CGROUP_CHARGE_TYPE_MAPPED: 2271 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2190 case MEM_CGROUP_CHARGE_TYPE_DROP: 2272 case MEM_CGROUP_CHARGE_TYPE_DROP:
2191 if (page_mapped(page)) 2273 /* See mem_cgroup_prepare_migration() */
2274 if (page_mapped(page) || PageCgroupMigration(pc))
2192 goto unlock_out; 2275 goto unlock_out;
2193 break; 2276 break;
2194 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT: 2277 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
@@ -2288,6 +2371,7 @@ void mem_cgroup_uncharge_end(void)
2288 res_counter_uncharge(&batch->memcg->res, batch->bytes); 2371 res_counter_uncharge(&batch->memcg->res, batch->bytes);
2289 if (batch->memsw_bytes) 2372 if (batch->memsw_bytes)
2290 res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes); 2373 res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
2374 memcg_oom_recover(batch->memcg);
2291 /* forget this pointer (for sanity check) */ 2375 /* forget this pointer (for sanity check) */
2292 batch->memcg = NULL; 2376 batch->memcg = NULL;
2293} 2377}
@@ -2410,10 +2494,12 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2410 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old 2494 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
2411 * page belongs to. 2495 * page belongs to.
2412 */ 2496 */
2413int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) 2497int mem_cgroup_prepare_migration(struct page *page,
2498 struct page *newpage, struct mem_cgroup **ptr)
2414{ 2499{
2415 struct page_cgroup *pc; 2500 struct page_cgroup *pc;
2416 struct mem_cgroup *mem = NULL; 2501 struct mem_cgroup *mem = NULL;
2502 enum charge_type ctype;
2417 int ret = 0; 2503 int ret = 0;
2418 2504
2419 if (mem_cgroup_disabled()) 2505 if (mem_cgroup_disabled())
@@ -2424,69 +2510,125 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
2424 if (PageCgroupUsed(pc)) { 2510 if (PageCgroupUsed(pc)) {
2425 mem = pc->mem_cgroup; 2511 mem = pc->mem_cgroup;
2426 css_get(&mem->css); 2512 css_get(&mem->css);
2513 /*
2514 * At migrating an anonymous page, its mapcount goes down
2515 * to 0 and uncharge() will be called. But, even if it's fully
2516 * unmapped, migration may fail and this page has to be
2517 * charged again. We set MIGRATION flag here and delay uncharge
2518 * until end_migration() is called
2519 *
2520 * Corner Case Thinking
2521 * A)
2522 * When the old page was mapped as Anon and it's unmap-and-freed
2523 * while migration was ongoing.
2524 * If unmap finds the old page, uncharge() of it will be delayed
2525 * until end_migration(). If unmap finds a new page, it's
2526 * uncharged when it make mapcount to be 1->0. If unmap code
2527 * finds swap_migration_entry, the new page will not be mapped
2528 * and end_migration() will find it(mapcount==0).
2529 *
2530 * B)
2531 * When the old page was mapped but migraion fails, the kernel
2532 * remaps it. A charge for it is kept by MIGRATION flag even
2533 * if mapcount goes down to 0. We can do remap successfully
2534 * without charging it again.
2535 *
2536 * C)
2537 * The "old" page is under lock_page() until the end of
2538 * migration, so, the old page itself will not be swapped-out.
2539 * If the new page is swapped out before end_migraton, our
2540 * hook to usual swap-out path will catch the event.
2541 */
2542 if (PageAnon(page))
2543 SetPageCgroupMigration(pc);
2427 } 2544 }
2428 unlock_page_cgroup(pc); 2545 unlock_page_cgroup(pc);
2546 /*
2547 * If the page is not charged at this point,
2548 * we return here.
2549 */
2550 if (!mem)
2551 return 0;
2429 2552
2430 *ptr = mem; 2553 *ptr = mem;
2431 if (mem) { 2554 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
2432 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false); 2555 css_put(&mem->css);/* drop extra refcnt */
2433 css_put(&mem->css); 2556 if (ret || *ptr == NULL) {
2557 if (PageAnon(page)) {
2558 lock_page_cgroup(pc);
2559 ClearPageCgroupMigration(pc);
2560 unlock_page_cgroup(pc);
2561 /*
2562 * The old page may be fully unmapped while we kept it.
2563 */
2564 mem_cgroup_uncharge_page(page);
2565 }
2566 return -ENOMEM;
2434 } 2567 }
2568 /*
2569 * We charge new page before it's used/mapped. So, even if unlock_page()
2570 * is called before end_migration, we can catch all events on this new
2571 * page. In the case new page is migrated but not remapped, new page's
2572 * mapcount will be finally 0 and we call uncharge in end_migration().
2573 */
2574 pc = lookup_page_cgroup(newpage);
2575 if (PageAnon(page))
2576 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
2577 else if (page_is_file_cache(page))
2578 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
2579 else
2580 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2581 __mem_cgroup_commit_charge(mem, pc, ctype);
2435 return ret; 2582 return ret;
2436} 2583}
2437 2584
2438/* remove redundant charge if migration failed*/ 2585/* remove redundant charge if migration failed*/
2439void mem_cgroup_end_migration(struct mem_cgroup *mem, 2586void mem_cgroup_end_migration(struct mem_cgroup *mem,
2440 struct page *oldpage, struct page *newpage) 2587 struct page *oldpage, struct page *newpage)
2441{ 2588{
2442 struct page *target, *unused; 2589 struct page *used, *unused;
2443 struct page_cgroup *pc; 2590 struct page_cgroup *pc;
2444 enum charge_type ctype;
2445 2591
2446 if (!mem) 2592 if (!mem)
2447 return; 2593 return;
2594 /* blocks rmdir() */
2448 cgroup_exclude_rmdir(&mem->css); 2595 cgroup_exclude_rmdir(&mem->css);
2449 /* at migration success, oldpage->mapping is NULL. */ 2596 /* at migration success, oldpage->mapping is NULL. */
2450 if (oldpage->mapping) { 2597 if (oldpage->mapping) {
2451 target = oldpage; 2598 used = oldpage;
2452 unused = NULL; 2599 unused = newpage;
2453 } else { 2600 } else {
2454 target = newpage; 2601 used = newpage;
2455 unused = oldpage; 2602 unused = oldpage;
2456 } 2603 }
2457
2458 if (PageAnon(target))
2459 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
2460 else if (page_is_file_cache(target))
2461 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
2462 else
2463 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2464
2465 /* unused page is not on radix-tree now. */
2466 if (unused)
2467 __mem_cgroup_uncharge_common(unused, ctype);
2468
2469 pc = lookup_page_cgroup(target);
2470 /* 2604 /*
2471 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup. 2605 * We disallowed uncharge of pages under migration because mapcount
2472 * So, double-counting is effectively avoided. 2606 * of the page goes down to zero, temporarly.
2607 * Clear the flag and check the page should be charged.
2473 */ 2608 */
2474 __mem_cgroup_commit_charge(mem, pc, ctype); 2609 pc = lookup_page_cgroup(oldpage);
2610 lock_page_cgroup(pc);
2611 ClearPageCgroupMigration(pc);
2612 unlock_page_cgroup(pc);
2613
2614 if (unused != oldpage)
2615 pc = lookup_page_cgroup(unused);
2616 __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
2475 2617
2618 pc = lookup_page_cgroup(used);
2476 /* 2619 /*
2477 * Both of oldpage and newpage are still under lock_page(). 2620 * If a page is a file cache, radix-tree replacement is very atomic
2478 * Then, we don't have to care about race in radix-tree. 2621 * and we can skip this check. When it was an Anon page, its mapcount
2479 * But we have to be careful that this page is unmapped or not. 2622 * goes down to 0. But because we added MIGRATION flage, it's not
2480 * 2623 * uncharged yet. There are several case but page->mapcount check
2481 * There is a case for !page_mapped(). At the start of 2624 * and USED bit check in mem_cgroup_uncharge_page() will do enough
2482 * migration, oldpage was mapped. But now, it's zapped. 2625 * check. (see prepare_charge() also)
2483 * But we know *target* page is not freed/reused under us.
2484 * mem_cgroup_uncharge_page() does all necessary checks.
2485 */ 2626 */
2486 if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED) 2627 if (PageAnon(used))
2487 mem_cgroup_uncharge_page(target); 2628 mem_cgroup_uncharge_page(used);
2488 /* 2629 /*
2489 * At migration, we may charge account against cgroup which has no tasks 2630 * At migration, we may charge account against cgroup which has no
2631 * tasks.
2490 * So, rmdir()->pre_destroy() can be called while we do this charge. 2632 * So, rmdir()->pre_destroy() can be called while we do this charge.
2491 * In that case, we need to call pre_destroy() again. check it here. 2633 * In that case, we need to call pre_destroy() again. check it here.
2492 */ 2634 */
@@ -2524,10 +2666,11 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2524 unsigned long long val) 2666 unsigned long long val)
2525{ 2667{
2526 int retry_count; 2668 int retry_count;
2527 u64 memswlimit; 2669 u64 memswlimit, memlimit;
2528 int ret = 0; 2670 int ret = 0;
2529 int children = mem_cgroup_count_children(memcg); 2671 int children = mem_cgroup_count_children(memcg);
2530 u64 curusage, oldusage; 2672 u64 curusage, oldusage;
2673 int enlarge;
2531 2674
2532 /* 2675 /*
2533 * For keeping hierarchical_reclaim simple, how long we should retry 2676 * For keeping hierarchical_reclaim simple, how long we should retry
@@ -2538,6 +2681,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2538 2681
2539 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE); 2682 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2540 2683
2684 enlarge = 0;
2541 while (retry_count) { 2685 while (retry_count) {
2542 if (signal_pending(current)) { 2686 if (signal_pending(current)) {
2543 ret = -EINTR; 2687 ret = -EINTR;
@@ -2555,6 +2699,11 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2555 mutex_unlock(&set_limit_mutex); 2699 mutex_unlock(&set_limit_mutex);
2556 break; 2700 break;
2557 } 2701 }
2702
2703 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2704 if (memlimit < val)
2705 enlarge = 1;
2706
2558 ret = res_counter_set_limit(&memcg->res, val); 2707 ret = res_counter_set_limit(&memcg->res, val);
2559 if (!ret) { 2708 if (!ret) {
2560 if (memswlimit == val) 2709 if (memswlimit == val)
@@ -2576,6 +2725,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2576 else 2725 else
2577 oldusage = curusage; 2726 oldusage = curusage;
2578 } 2727 }
2728 if (!ret && enlarge)
2729 memcg_oom_recover(memcg);
2579 2730
2580 return ret; 2731 return ret;
2581} 2732}
@@ -2584,9 +2735,10 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2584 unsigned long long val) 2735 unsigned long long val)
2585{ 2736{
2586 int retry_count; 2737 int retry_count;
2587 u64 memlimit, oldusage, curusage; 2738 u64 memlimit, memswlimit, oldusage, curusage;
2588 int children = mem_cgroup_count_children(memcg); 2739 int children = mem_cgroup_count_children(memcg);
2589 int ret = -EBUSY; 2740 int ret = -EBUSY;
2741 int enlarge = 0;
2590 2742
2591 /* see mem_cgroup_resize_res_limit */ 2743 /* see mem_cgroup_resize_res_limit */
2592 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; 2744 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
@@ -2608,6 +2760,9 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2608 mutex_unlock(&set_limit_mutex); 2760 mutex_unlock(&set_limit_mutex);
2609 break; 2761 break;
2610 } 2762 }
2763 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2764 if (memswlimit < val)
2765 enlarge = 1;
2611 ret = res_counter_set_limit(&memcg->memsw, val); 2766 ret = res_counter_set_limit(&memcg->memsw, val);
2612 if (!ret) { 2767 if (!ret) {
2613 if (memlimit == val) 2768 if (memlimit == val)
@@ -2630,6 +2785,8 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2630 else 2785 else
2631 oldusage = curusage; 2786 oldusage = curusage;
2632 } 2787 }
2788 if (!ret && enlarge)
2789 memcg_oom_recover(memcg);
2633 return ret; 2790 return ret;
2634} 2791}
2635 2792
@@ -2821,6 +2978,7 @@ move_account:
2821 if (ret) 2978 if (ret)
2822 break; 2979 break;
2823 } 2980 }
2981 memcg_oom_recover(mem);
2824 /* it seems parent cgroup doesn't have enough mem */ 2982 /* it seems parent cgroup doesn't have enough mem */
2825 if (ret == -ENOMEM) 2983 if (ret == -ENOMEM)
2826 goto try_to_free; 2984 goto try_to_free;
@@ -3311,9 +3469,9 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3311 3469
3312 rcu_read_lock(); 3470 rcu_read_lock();
3313 if (!swap) 3471 if (!swap)
3314 t = rcu_dereference(memcg->thresholds); 3472 t = rcu_dereference(memcg->thresholds.primary);
3315 else 3473 else
3316 t = rcu_dereference(memcg->memsw_thresholds); 3474 t = rcu_dereference(memcg->memsw_thresholds.primary);
3317 3475
3318 if (!t) 3476 if (!t)
3319 goto unlock; 3477 goto unlock;
@@ -3325,7 +3483,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3325 * If it's not true, a threshold was crossed after last 3483 * If it's not true, a threshold was crossed after last
3326 * call of __mem_cgroup_threshold(). 3484 * call of __mem_cgroup_threshold().
3327 */ 3485 */
3328 i = atomic_read(&t->current_threshold); 3486 i = t->current_threshold;
3329 3487
3330 /* 3488 /*
3331 * Iterate backward over array of thresholds starting from 3489 * Iterate backward over array of thresholds starting from
@@ -3349,7 +3507,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3349 eventfd_signal(t->entries[i].eventfd, 1); 3507 eventfd_signal(t->entries[i].eventfd, 1);
3350 3508
3351 /* Update current_threshold */ 3509 /* Update current_threshold */
3352 atomic_set(&t->current_threshold, i - 1); 3510 t->current_threshold = i - 1;
3353unlock: 3511unlock:
3354 rcu_read_unlock(); 3512 rcu_read_unlock();
3355} 3513}
@@ -3369,106 +3527,117 @@ static int compare_thresholds(const void *a, const void *b)
3369 return _a->threshold - _b->threshold; 3527 return _a->threshold - _b->threshold;
3370} 3528}
3371 3529
3372static int mem_cgroup_register_event(struct cgroup *cgrp, struct cftype *cft, 3530static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data)
3373 struct eventfd_ctx *eventfd, const char *args) 3531{
3532 struct mem_cgroup_eventfd_list *ev;
3533
3534 list_for_each_entry(ev, &mem->oom_notify, list)
3535 eventfd_signal(ev->eventfd, 1);
3536 return 0;
3537}
3538
3539static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
3540{
3541 mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_notify_cb);
3542}
3543
3544static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
3545 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3374{ 3546{
3375 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 3547 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3376 struct mem_cgroup_threshold_ary *thresholds, *thresholds_new; 3548 struct mem_cgroup_thresholds *thresholds;
3549 struct mem_cgroup_threshold_ary *new;
3377 int type = MEMFILE_TYPE(cft->private); 3550 int type = MEMFILE_TYPE(cft->private);
3378 u64 threshold, usage; 3551 u64 threshold, usage;
3379 int size; 3552 int i, size, ret;
3380 int i, ret;
3381 3553
3382 ret = res_counter_memparse_write_strategy(args, &threshold); 3554 ret = res_counter_memparse_write_strategy(args, &threshold);
3383 if (ret) 3555 if (ret)
3384 return ret; 3556 return ret;
3385 3557
3386 mutex_lock(&memcg->thresholds_lock); 3558 mutex_lock(&memcg->thresholds_lock);
3559
3387 if (type == _MEM) 3560 if (type == _MEM)
3388 thresholds = memcg->thresholds; 3561 thresholds = &memcg->thresholds;
3389 else if (type == _MEMSWAP) 3562 else if (type == _MEMSWAP)
3390 thresholds = memcg->memsw_thresholds; 3563 thresholds = &memcg->memsw_thresholds;
3391 else 3564 else
3392 BUG(); 3565 BUG();
3393 3566
3394 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 3567 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3395 3568
3396 /* Check if a threshold crossed before adding a new one */ 3569 /* Check if a threshold crossed before adding a new one */
3397 if (thresholds) 3570 if (thresholds->primary)
3398 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3571 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3399 3572
3400 if (thresholds) 3573 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3401 size = thresholds->size + 1;
3402 else
3403 size = 1;
3404 3574
3405 /* Allocate memory for new array of thresholds */ 3575 /* Allocate memory for new array of thresholds */
3406 thresholds_new = kmalloc(sizeof(*thresholds_new) + 3576 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3407 size * sizeof(struct mem_cgroup_threshold),
3408 GFP_KERNEL); 3577 GFP_KERNEL);
3409 if (!thresholds_new) { 3578 if (!new) {
3410 ret = -ENOMEM; 3579 ret = -ENOMEM;
3411 goto unlock; 3580 goto unlock;
3412 } 3581 }
3413 thresholds_new->size = size; 3582 new->size = size;
3414 3583
3415 /* Copy thresholds (if any) to new array */ 3584 /* Copy thresholds (if any) to new array */
3416 if (thresholds) 3585 if (thresholds->primary) {
3417 memcpy(thresholds_new->entries, thresholds->entries, 3586 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3418 thresholds->size *
3419 sizeof(struct mem_cgroup_threshold)); 3587 sizeof(struct mem_cgroup_threshold));
3588 }
3589
3420 /* Add new threshold */ 3590 /* Add new threshold */
3421 thresholds_new->entries[size - 1].eventfd = eventfd; 3591 new->entries[size - 1].eventfd = eventfd;
3422 thresholds_new->entries[size - 1].threshold = threshold; 3592 new->entries[size - 1].threshold = threshold;
3423 3593
3424 /* Sort thresholds. Registering of new threshold isn't time-critical */ 3594 /* Sort thresholds. Registering of new threshold isn't time-critical */
3425 sort(thresholds_new->entries, size, 3595 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3426 sizeof(struct mem_cgroup_threshold),
3427 compare_thresholds, NULL); 3596 compare_thresholds, NULL);
3428 3597
3429 /* Find current threshold */ 3598 /* Find current threshold */
3430 atomic_set(&thresholds_new->current_threshold, -1); 3599 new->current_threshold = -1;
3431 for (i = 0; i < size; i++) { 3600 for (i = 0; i < size; i++) {
3432 if (thresholds_new->entries[i].threshold < usage) { 3601 if (new->entries[i].threshold < usage) {
3433 /* 3602 /*
3434 * thresholds_new->current_threshold will not be used 3603 * new->current_threshold will not be used until
3435 * until rcu_assign_pointer(), so it's safe to increment 3604 * rcu_assign_pointer(), so it's safe to increment
3436 * it here. 3605 * it here.
3437 */ 3606 */
3438 atomic_inc(&thresholds_new->current_threshold); 3607 ++new->current_threshold;
3439 } 3608 }
3440 } 3609 }
3441 3610
3442 if (type == _MEM) 3611 /* Free old spare buffer and save old primary buffer as spare */
3443 rcu_assign_pointer(memcg->thresholds, thresholds_new); 3612 kfree(thresholds->spare);
3444 else 3613 thresholds->spare = thresholds->primary;
3445 rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new); 3614
3615 rcu_assign_pointer(thresholds->primary, new);
3446 3616
3447 /* To be sure that nobody uses thresholds before freeing it */ 3617 /* To be sure that nobody uses thresholds */
3448 synchronize_rcu(); 3618 synchronize_rcu();
3449 3619
3450 kfree(thresholds);
3451unlock: 3620unlock:
3452 mutex_unlock(&memcg->thresholds_lock); 3621 mutex_unlock(&memcg->thresholds_lock);
3453 3622
3454 return ret; 3623 return ret;
3455} 3624}
3456 3625
3457static int mem_cgroup_unregister_event(struct cgroup *cgrp, struct cftype *cft, 3626static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
3458 struct eventfd_ctx *eventfd) 3627 struct cftype *cft, struct eventfd_ctx *eventfd)
3459{ 3628{
3460 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 3629 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3461 struct mem_cgroup_threshold_ary *thresholds, *thresholds_new; 3630 struct mem_cgroup_thresholds *thresholds;
3631 struct mem_cgroup_threshold_ary *new;
3462 int type = MEMFILE_TYPE(cft->private); 3632 int type = MEMFILE_TYPE(cft->private);
3463 u64 usage; 3633 u64 usage;
3464 int size = 0; 3634 int i, j, size;
3465 int i, j, ret;
3466 3635
3467 mutex_lock(&memcg->thresholds_lock); 3636 mutex_lock(&memcg->thresholds_lock);
3468 if (type == _MEM) 3637 if (type == _MEM)
3469 thresholds = memcg->thresholds; 3638 thresholds = &memcg->thresholds;
3470 else if (type == _MEMSWAP) 3639 else if (type == _MEMSWAP)
3471 thresholds = memcg->memsw_thresholds; 3640 thresholds = &memcg->memsw_thresholds;
3472 else 3641 else
3473 BUG(); 3642 BUG();
3474 3643
@@ -3484,59 +3653,136 @@ static int mem_cgroup_unregister_event(struct cgroup *cgrp, struct cftype *cft,
3484 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3653 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3485 3654
3486 /* Calculate new number of threshold */ 3655 /* Calculate new number of threshold */
3487 for (i = 0; i < thresholds->size; i++) { 3656 size = 0;
3488 if (thresholds->entries[i].eventfd != eventfd) 3657 for (i = 0; i < thresholds->primary->size; i++) {
3658 if (thresholds->primary->entries[i].eventfd != eventfd)
3489 size++; 3659 size++;
3490 } 3660 }
3491 3661
3662 new = thresholds->spare;
3663
3492 /* Set thresholds array to NULL if we don't have thresholds */ 3664 /* Set thresholds array to NULL if we don't have thresholds */
3493 if (!size) { 3665 if (!size) {
3494 thresholds_new = NULL; 3666 kfree(new);
3495 goto assign; 3667 new = NULL;
3668 goto swap_buffers;
3496 } 3669 }
3497 3670
3498 /* Allocate memory for new array of thresholds */ 3671 new->size = size;
3499 thresholds_new = kmalloc(sizeof(*thresholds_new) +
3500 size * sizeof(struct mem_cgroup_threshold),
3501 GFP_KERNEL);
3502 if (!thresholds_new) {
3503 ret = -ENOMEM;
3504 goto unlock;
3505 }
3506 thresholds_new->size = size;
3507 3672
3508 /* Copy thresholds and find current threshold */ 3673 /* Copy thresholds and find current threshold */
3509 atomic_set(&thresholds_new->current_threshold, -1); 3674 new->current_threshold = -1;
3510 for (i = 0, j = 0; i < thresholds->size; i++) { 3675 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3511 if (thresholds->entries[i].eventfd == eventfd) 3676 if (thresholds->primary->entries[i].eventfd == eventfd)
3512 continue; 3677 continue;
3513 3678
3514 thresholds_new->entries[j] = thresholds->entries[i]; 3679 new->entries[j] = thresholds->primary->entries[i];
3515 if (thresholds_new->entries[j].threshold < usage) { 3680 if (new->entries[j].threshold < usage) {
3516 /* 3681 /*
3517 * thresholds_new->current_threshold will not be used 3682 * new->current_threshold will not be used
3518 * until rcu_assign_pointer(), so it's safe to increment 3683 * until rcu_assign_pointer(), so it's safe to increment
3519 * it here. 3684 * it here.
3520 */ 3685 */
3521 atomic_inc(&thresholds_new->current_threshold); 3686 ++new->current_threshold;
3522 } 3687 }
3523 j++; 3688 j++;
3524 } 3689 }
3525 3690
3526assign: 3691swap_buffers:
3527 if (type == _MEM) 3692 /* Swap primary and spare array */
3528 rcu_assign_pointer(memcg->thresholds, thresholds_new); 3693 thresholds->spare = thresholds->primary;
3529 else 3694 rcu_assign_pointer(thresholds->primary, new);
3530 rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
3531 3695
3532 /* To be sure that nobody uses thresholds before freeing it */ 3696 /* To be sure that nobody uses thresholds */
3533 synchronize_rcu(); 3697 synchronize_rcu();
3534 3698
3535 kfree(thresholds);
3536unlock:
3537 mutex_unlock(&memcg->thresholds_lock); 3699 mutex_unlock(&memcg->thresholds_lock);
3700}
3538 3701
3539 return ret; 3702static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
3703 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3704{
3705 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3706 struct mem_cgroup_eventfd_list *event;
3707 int type = MEMFILE_TYPE(cft->private);
3708
3709 BUG_ON(type != _OOM_TYPE);
3710 event = kmalloc(sizeof(*event), GFP_KERNEL);
3711 if (!event)
3712 return -ENOMEM;
3713
3714 mutex_lock(&memcg_oom_mutex);
3715
3716 event->eventfd = eventfd;
3717 list_add(&event->list, &memcg->oom_notify);
3718
3719 /* already in OOM ? */
3720 if (atomic_read(&memcg->oom_lock))
3721 eventfd_signal(eventfd, 1);
3722 mutex_unlock(&memcg_oom_mutex);
3723
3724 return 0;
3725}
3726
3727static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
3728 struct cftype *cft, struct eventfd_ctx *eventfd)
3729{
3730 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3731 struct mem_cgroup_eventfd_list *ev, *tmp;
3732 int type = MEMFILE_TYPE(cft->private);
3733
3734 BUG_ON(type != _OOM_TYPE);
3735
3736 mutex_lock(&memcg_oom_mutex);
3737
3738 list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
3739 if (ev->eventfd == eventfd) {
3740 list_del(&ev->list);
3741 kfree(ev);
3742 }
3743 }
3744
3745 mutex_unlock(&memcg_oom_mutex);
3746}
3747
3748static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
3749 struct cftype *cft, struct cgroup_map_cb *cb)
3750{
3751 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3752
3753 cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
3754
3755 if (atomic_read(&mem->oom_lock))
3756 cb->fill(cb, "under_oom", 1);
3757 else
3758 cb->fill(cb, "under_oom", 0);
3759 return 0;
3760}
3761
3762/*
3763 */
3764static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
3765 struct cftype *cft, u64 val)
3766{
3767 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3768 struct mem_cgroup *parent;
3769
3770 /* cannot set to root cgroup and only 0 and 1 are allowed */
3771 if (!cgrp->parent || !((val == 0) || (val == 1)))
3772 return -EINVAL;
3773
3774 parent = mem_cgroup_from_cont(cgrp->parent);
3775
3776 cgroup_lock();
3777 /* oom-kill-disable is a flag for subhierarchy. */
3778 if ((parent->use_hierarchy) ||
3779 (mem->use_hierarchy && !list_empty(&cgrp->children))) {
3780 cgroup_unlock();
3781 return -EINVAL;
3782 }
3783 mem->oom_kill_disable = val;
3784 cgroup_unlock();
3785 return 0;
3540} 3786}
3541 3787
3542static struct cftype mem_cgroup_files[] = { 3788static struct cftype mem_cgroup_files[] = {
@@ -3544,8 +3790,8 @@ static struct cftype mem_cgroup_files[] = {
3544 .name = "usage_in_bytes", 3790 .name = "usage_in_bytes",
3545 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 3791 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3546 .read_u64 = mem_cgroup_read, 3792 .read_u64 = mem_cgroup_read,
3547 .register_event = mem_cgroup_register_event, 3793 .register_event = mem_cgroup_usage_register_event,
3548 .unregister_event = mem_cgroup_unregister_event, 3794 .unregister_event = mem_cgroup_usage_unregister_event,
3549 }, 3795 },
3550 { 3796 {
3551 .name = "max_usage_in_bytes", 3797 .name = "max_usage_in_bytes",
@@ -3594,6 +3840,14 @@ static struct cftype mem_cgroup_files[] = {
3594 .read_u64 = mem_cgroup_move_charge_read, 3840 .read_u64 = mem_cgroup_move_charge_read,
3595 .write_u64 = mem_cgroup_move_charge_write, 3841 .write_u64 = mem_cgroup_move_charge_write,
3596 }, 3842 },
3843 {
3844 .name = "oom_control",
3845 .read_map = mem_cgroup_oom_control_read,
3846 .write_u64 = mem_cgroup_oom_control_write,
3847 .register_event = mem_cgroup_oom_register_event,
3848 .unregister_event = mem_cgroup_oom_unregister_event,
3849 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3850 },
3597}; 3851};
3598 3852
3599#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 3853#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -3602,8 +3856,8 @@ static struct cftype memsw_cgroup_files[] = {
3602 .name = "memsw.usage_in_bytes", 3856 .name = "memsw.usage_in_bytes",
3603 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 3857 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
3604 .read_u64 = mem_cgroup_read, 3858 .read_u64 = mem_cgroup_read,
3605 .register_event = mem_cgroup_register_event, 3859 .register_event = mem_cgroup_usage_register_event,
3606 .unregister_event = mem_cgroup_unregister_event, 3860 .unregister_event = mem_cgroup_usage_unregister_event,
3607 }, 3861 },
3608 { 3862 {
3609 .name = "memsw.max_usage_in_bytes", 3863 .name = "memsw.max_usage_in_bytes",
@@ -3831,6 +4085,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
3831 } else { 4085 } else {
3832 parent = mem_cgroup_from_cont(cont->parent); 4086 parent = mem_cgroup_from_cont(cont->parent);
3833 mem->use_hierarchy = parent->use_hierarchy; 4087 mem->use_hierarchy = parent->use_hierarchy;
4088 mem->oom_kill_disable = parent->oom_kill_disable;
3834 } 4089 }
3835 4090
3836 if (parent && parent->use_hierarchy) { 4091 if (parent && parent->use_hierarchy) {
@@ -3849,6 +4104,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
3849 } 4104 }
3850 mem->last_scanned_child = 0; 4105 mem->last_scanned_child = 0;
3851 spin_lock_init(&mem->reclaim_param_lock); 4106 spin_lock_init(&mem->reclaim_param_lock);
4107 INIT_LIST_HEAD(&mem->oom_notify);
3852 4108
3853 if (parent) 4109 if (parent)
3854 mem->swappiness = get_swappiness(parent); 4110 mem->swappiness = get_swappiness(parent);
@@ -3976,6 +4232,80 @@ enum mc_target_type {
3976 MC_TARGET_SWAP, 4232 MC_TARGET_SWAP,
3977}; 4233};
3978 4234
4235static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4236 unsigned long addr, pte_t ptent)
4237{
4238 struct page *page = vm_normal_page(vma, addr, ptent);
4239
4240 if (!page || !page_mapped(page))
4241 return NULL;
4242 if (PageAnon(page)) {
4243 /* we don't move shared anon */
4244 if (!move_anon() || page_mapcount(page) > 2)
4245 return NULL;
4246 } else if (!move_file())
4247 /* we ignore mapcount for file pages */
4248 return NULL;
4249 if (!get_page_unless_zero(page))
4250 return NULL;
4251
4252 return page;
4253}
4254
4255static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4256 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4257{
4258 int usage_count;
4259 struct page *page = NULL;
4260 swp_entry_t ent = pte_to_swp_entry(ptent);
4261
4262 if (!move_anon() || non_swap_entry(ent))
4263 return NULL;
4264 usage_count = mem_cgroup_count_swap_user(ent, &page);
4265 if (usage_count > 1) { /* we don't move shared anon */
4266 if (page)
4267 put_page(page);
4268 return NULL;
4269 }
4270 if (do_swap_account)
4271 entry->val = ent.val;
4272
4273 return page;
4274}
4275
4276static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4277 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4278{
4279 struct page *page = NULL;
4280 struct inode *inode;
4281 struct address_space *mapping;
4282 pgoff_t pgoff;
4283
4284 if (!vma->vm_file) /* anonymous vma */
4285 return NULL;
4286 if (!move_file())
4287 return NULL;
4288
4289 inode = vma->vm_file->f_path.dentry->d_inode;
4290 mapping = vma->vm_file->f_mapping;
4291 if (pte_none(ptent))
4292 pgoff = linear_page_index(vma, addr);
4293 else /* pte_file(ptent) is true */
4294 pgoff = pte_to_pgoff(ptent);
4295
4296 /* page is moved even if it's not RSS of this task(page-faulted). */
4297 if (!mapping_cap_swap_backed(mapping)) { /* normal file */
4298 page = find_get_page(mapping, pgoff);
4299 } else { /* shmem/tmpfs file. we should take account of swap too. */
4300 swp_entry_t ent;
4301 mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
4302 if (do_swap_account)
4303 entry->val = ent.val;
4304 }
4305
4306 return page;
4307}
4308
3979static int is_target_pte_for_mc(struct vm_area_struct *vma, 4309static int is_target_pte_for_mc(struct vm_area_struct *vma,
3980 unsigned long addr, pte_t ptent, union mc_target *target) 4310 unsigned long addr, pte_t ptent, union mc_target *target)
3981{ 4311{
@@ -3983,43 +4313,16 @@ static int is_target_pte_for_mc(struct vm_area_struct *vma,
3983 struct page_cgroup *pc; 4313 struct page_cgroup *pc;
3984 int ret = 0; 4314 int ret = 0;
3985 swp_entry_t ent = { .val = 0 }; 4315 swp_entry_t ent = { .val = 0 };
3986 int usage_count = 0;
3987 bool move_anon = test_bit(MOVE_CHARGE_TYPE_ANON,
3988 &mc.to->move_charge_at_immigrate);
3989 4316
3990 if (!pte_present(ptent)) { 4317 if (pte_present(ptent))
3991 /* TODO: handle swap of shmes/tmpfs */ 4318 page = mc_handle_present_pte(vma, addr, ptent);
3992 if (pte_none(ptent) || pte_file(ptent)) 4319 else if (is_swap_pte(ptent))
3993 return 0; 4320 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
3994 else if (is_swap_pte(ptent)) { 4321 else if (pte_none(ptent) || pte_file(ptent))
3995 ent = pte_to_swp_entry(ptent); 4322 page = mc_handle_file_pte(vma, addr, ptent, &ent);
3996 if (!move_anon || non_swap_entry(ent)) 4323
3997 return 0; 4324 if (!page && !ent.val)
3998 usage_count = mem_cgroup_count_swap_user(ent, &page);
3999 }
4000 } else {
4001 page = vm_normal_page(vma, addr, ptent);
4002 if (!page || !page_mapped(page))
4003 return 0;
4004 /*
4005 * TODO: We don't move charges of file(including shmem/tmpfs)
4006 * pages for now.
4007 */
4008 if (!move_anon || !PageAnon(page))
4009 return 0;
4010 if (!get_page_unless_zero(page))
4011 return 0;
4012 usage_count = page_mapcount(page);
4013 }
4014 if (usage_count > 1) {
4015 /*
4016 * TODO: We don't move charges of shared(used by multiple
4017 * processes) pages for now.
4018 */
4019 if (page)
4020 put_page(page);
4021 return 0; 4325 return 0;
4022 }
4023 if (page) { 4326 if (page) {
4024 pc = lookup_page_cgroup(page); 4327 pc = lookup_page_cgroup(page);
4025 /* 4328 /*
@@ -4035,8 +4338,8 @@ static int is_target_pte_for_mc(struct vm_area_struct *vma,
4035 if (!ret || !target) 4338 if (!ret || !target)
4036 put_page(page); 4339 put_page(page);
4037 } 4340 }
4038 /* throught */ 4341 /* There is a swap entry and a page doesn't exist or isn't charged */
4039 if (ent.val && do_swap_account && !ret && 4342 if (ent.val && !ret &&
4040 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) { 4343 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
4041 ret = MC_TARGET_SWAP; 4344 ret = MC_TARGET_SWAP;
4042 if (target) 4345 if (target)
@@ -4077,9 +4380,6 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4077 }; 4380 };
4078 if (is_vm_hugetlb_page(vma)) 4381 if (is_vm_hugetlb_page(vma))
4079 continue; 4382 continue;
4080 /* TODO: We don't move charges of shmem/tmpfs pages for now. */
4081 if (vma->vm_flags & VM_SHARED)
4082 continue;
4083 walk_page_range(vma->vm_start, vma->vm_end, 4383 walk_page_range(vma->vm_start, vma->vm_end,
4084 &mem_cgroup_count_precharge_walk); 4384 &mem_cgroup_count_precharge_walk);
4085 } 4385 }
@@ -4102,6 +4402,7 @@ static void mem_cgroup_clear_mc(void)
4102 if (mc.precharge) { 4402 if (mc.precharge) {
4103 __mem_cgroup_cancel_charge(mc.to, mc.precharge); 4403 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
4104 mc.precharge = 0; 4404 mc.precharge = 0;
4405 memcg_oom_recover(mc.to);
4105 } 4406 }
4106 /* 4407 /*
4107 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 4408 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
@@ -4110,6 +4411,7 @@ static void mem_cgroup_clear_mc(void)
4110 if (mc.moved_charge) { 4411 if (mc.moved_charge) {
4111 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); 4412 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
4112 mc.moved_charge = 0; 4413 mc.moved_charge = 0;
4414 memcg_oom_recover(mc.from);
4113 } 4415 }
4114 /* we must fixup refcnts and charges */ 4416 /* we must fixup refcnts and charges */
4115 if (mc.moved_swap) { 4417 if (mc.moved_swap) {
@@ -4274,9 +4576,6 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
4274 }; 4576 };
4275 if (is_vm_hugetlb_page(vma)) 4577 if (is_vm_hugetlb_page(vma))
4276 continue; 4578 continue;
4277 /* TODO: We don't move charges of shmem/tmpfs pages for now. */
4278 if (vma->vm_flags & VM_SHARED)
4279 continue;
4280 ret = walk_page_range(vma->vm_start, vma->vm_end, 4579 ret = walk_page_range(vma->vm_start, vma->vm_end,
4281 &mem_cgroup_move_charge_walk); 4580 &mem_cgroup_move_charge_walk);
4282 if (ret) 4581 if (ret)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 75751012c552..5d6fb339de03 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2098,7 +2098,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2098 /* contextualize the tmpfs mount point mempolicy */ 2098 /* contextualize the tmpfs mount point mempolicy */
2099 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 2099 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2100 if (IS_ERR(new)) 2100 if (IS_ERR(new))
2101 goto put_free; /* no valid nodemask intersection */ 2101 goto free_scratch; /* no valid nodemask intersection */
2102 2102
2103 task_lock(current); 2103 task_lock(current);
2104 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 2104 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
@@ -2114,6 +2114,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2114 2114
2115put_free: 2115put_free:
2116 mpol_put(new); /* drop initial ref */ 2116 mpol_put(new); /* drop initial ref */
2117free_scratch:
2117 NODEMASK_SCRATCH_FREE(scratch); 2118 NODEMASK_SCRATCH_FREE(scratch);
2118 } 2119 }
2119} 2120}
diff --git a/mm/migrate.c b/mm/migrate.c
index 09e2471afa0f..4205b1d6049e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -590,7 +590,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
590 } 590 }
591 591
592 /* charge against new page */ 592 /* charge against new page */
593 charge = mem_cgroup_prepare_migration(page, &mem); 593 charge = mem_cgroup_prepare_migration(page, newpage, &mem);
594 if (charge == -ENOMEM) { 594 if (charge == -ENOMEM) {
595 rc = -ENOMEM; 595 rc = -ENOMEM;
596 goto unlock; 596 goto unlock;
diff --git a/mm/nommu.c b/mm/nommu.c
index 63fa17d121f0..b76f3ee0abe0 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -918,14 +918,6 @@ static int validate_mmap_request(struct file *file,
918 if (!(capabilities & BDI_CAP_MAP_DIRECT)) 918 if (!(capabilities & BDI_CAP_MAP_DIRECT))
919 return -ENODEV; 919 return -ENODEV;
920 920
921 if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) ||
922 ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
923 ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP))
924 ) {
925 printk("MAP_SHARED not completely supported on !MMU\n");
926 return -EINVAL;
927 }
928
929 /* we mustn't privatise shared mappings */ 921 /* we mustn't privatise shared mappings */
930 capabilities &= ~BDI_CAP_MAP_COPY; 922 capabilities &= ~BDI_CAP_MAP_COPY;
931 } 923 }
@@ -941,6 +933,20 @@ static int validate_mmap_request(struct file *file,
941 capabilities &= ~BDI_CAP_MAP_DIRECT; 933 capabilities &= ~BDI_CAP_MAP_DIRECT;
942 } 934 }
943 935
936 if (capabilities & BDI_CAP_MAP_DIRECT) {
937 if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) ||
938 ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
939 ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP))
940 ) {
941 capabilities &= ~BDI_CAP_MAP_DIRECT;
942 if (flags & MAP_SHARED) {
943 printk(KERN_WARNING
944 "MAP_SHARED not completely supported on !MMU\n");
945 return -EINVAL;
946 }
947 }
948 }
949
944 /* handle executable mappings and implied executable 950 /* handle executable mappings and implied executable
945 * mappings */ 951 * mappings */
946 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { 952 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
@@ -996,22 +1002,20 @@ static unsigned long determine_vm_flags(struct file *file,
996 unsigned long vm_flags; 1002 unsigned long vm_flags;
997 1003
998 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); 1004 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
999 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1000 /* vm_flags |= mm->def_flags; */ 1005 /* vm_flags |= mm->def_flags; */
1001 1006
1002 if (!(capabilities & BDI_CAP_MAP_DIRECT)) { 1007 if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
1003 /* attempt to share read-only copies of mapped file chunks */ 1008 /* attempt to share read-only copies of mapped file chunks */
1009 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1004 if (file && !(prot & PROT_WRITE)) 1010 if (file && !(prot & PROT_WRITE))
1005 vm_flags |= VM_MAYSHARE; 1011 vm_flags |= VM_MAYSHARE;
1006 } 1012 } else {
1007 else {
1008 /* overlay a shareable mapping on the backing device or inode 1013 /* overlay a shareable mapping on the backing device or inode
1009 * if possible - used for chardevs, ramfs/tmpfs/shmfs and 1014 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
1010 * romfs/cramfs */ 1015 * romfs/cramfs */
1016 vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS);
1011 if (flags & MAP_SHARED) 1017 if (flags & MAP_SHARED)
1012 vm_flags |= VM_MAYSHARE | VM_SHARED; 1018 vm_flags |= VM_SHARED;
1013 else if ((((vm_flags & capabilities) ^ vm_flags) & BDI_CAP_VMFLAGS) == 0)
1014 vm_flags |= VM_MAYSHARE;
1015 } 1019 }
1016 1020
1017 /* refuse to let anyone share private mappings with this process if 1021 /* refuse to let anyone share private mappings with this process if
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index b68e802a7a7d..709aedfaa014 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -479,12 +479,9 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
479 read_lock(&tasklist_lock); 479 read_lock(&tasklist_lock);
480retry: 480retry:
481 p = select_bad_process(&points, mem); 481 p = select_bad_process(&points, mem);
482 if (PTR_ERR(p) == -1UL) 482 if (!p || PTR_ERR(p) == -1UL)
483 goto out; 483 goto out;
484 484
485 if (!p)
486 p = current;
487
488 if (oom_kill_process(p, gfp_mask, 0, points, mem, 485 if (oom_kill_process(p, gfp_mask, 0, points, mem,
489 "Memory cgroup out of memory")) 486 "Memory cgroup out of memory"))
490 goto retry; 487 goto retry;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 08b349931ebc..431214b941ac 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -57,6 +57,22 @@
57#include <asm/div64.h> 57#include <asm/div64.h>
58#include "internal.h" 58#include "internal.h"
59 59
60#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
61DEFINE_PER_CPU(int, numa_node);
62EXPORT_PER_CPU_SYMBOL(numa_node);
63#endif
64
65#ifdef CONFIG_HAVE_MEMORYLESS_NODES
66/*
67 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
68 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
69 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
70 * defined in <linux/topology.h>.
71 */
72DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
73EXPORT_PER_CPU_SYMBOL(_numa_mem_);
74#endif
75
60/* 76/*
61 * Array of node states. 77 * Array of node states.
62 */ 78 */
@@ -2856,6 +2872,24 @@ static void build_zonelist_cache(pg_data_t *pgdat)
2856 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z); 2872 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2857} 2873}
2858 2874
2875#ifdef CONFIG_HAVE_MEMORYLESS_NODES
2876/*
2877 * Return node id of node used for "local" allocations.
2878 * I.e., first node id of first zone in arg node's generic zonelist.
2879 * Used for initializing percpu 'numa_mem', which is used primarily
2880 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
2881 */
2882int local_memory_node(int node)
2883{
2884 struct zone *zone;
2885
2886 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
2887 gfp_zone(GFP_KERNEL),
2888 NULL,
2889 &zone);
2890 return zone->node;
2891}
2892#endif
2859 2893
2860#else /* CONFIG_NUMA */ 2894#else /* CONFIG_NUMA */
2861 2895
@@ -2970,9 +3004,23 @@ static __init_refok int __build_all_zonelists(void *data)
2970 * needs the percpu allocator in order to allocate its pagesets 3004 * needs the percpu allocator in order to allocate its pagesets
2971 * (a chicken-egg dilemma). 3005 * (a chicken-egg dilemma).
2972 */ 3006 */
2973 for_each_possible_cpu(cpu) 3007 for_each_possible_cpu(cpu) {
2974 setup_pageset(&per_cpu(boot_pageset, cpu), 0); 3008 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
2975 3009
3010#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3011 /*
3012 * We now know the "local memory node" for each node--
3013 * i.e., the node of the first zone in the generic zonelist.
3014 * Set up numa_mem percpu variable for on-line cpus. During
3015 * boot, only the boot cpu should be on-line; we'll init the
3016 * secondary cpus' numa_mem as they come on-line. During
3017 * node/memory hotplug, we'll fixup all on-line cpus.
3018 */
3019 if (cpu_online(cpu))
3020 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3021#endif
3022 }
3023
2976 return 0; 3024 return 0;
2977} 3025}
2978 3026
diff --git a/mm/shmem.c b/mm/shmem.c
index 4ef9797bd430..855eaf5b8d5b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2559,6 +2559,45 @@ out4:
2559 return error; 2559 return error;
2560} 2560}
2561 2561
2562#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2563/**
2564 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2565 * @inode: the inode to be searched
2566 * @pgoff: the offset to be searched
2567 * @pagep: the pointer for the found page to be stored
2568 * @ent: the pointer for the found swap entry to be stored
2569 *
2570 * If a page is found, refcount of it is incremented. Callers should handle
2571 * these refcount.
2572 */
2573void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
2574 struct page **pagep, swp_entry_t *ent)
2575{
2576 swp_entry_t entry = { .val = 0 }, *ptr;
2577 struct page *page = NULL;
2578 struct shmem_inode_info *info = SHMEM_I(inode);
2579
2580 if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2581 goto out;
2582
2583 spin_lock(&info->lock);
2584 ptr = shmem_swp_entry(info, pgoff, NULL);
2585#ifdef CONFIG_SWAP
2586 if (ptr && ptr->val) {
2587 entry.val = ptr->val;
2588 page = find_get_page(&swapper_space, entry.val);
2589 } else
2590#endif
2591 page = find_get_page(inode->i_mapping, pgoff);
2592 if (ptr)
2593 shmem_swp_unmap(ptr);
2594 spin_unlock(&info->lock);
2595out:
2596 *pagep = page;
2597 *ent = entry;
2598}
2599#endif
2600
2562#else /* !CONFIG_SHMEM */ 2601#else /* !CONFIG_SHMEM */
2563 2602
2564/* 2603/*
@@ -2598,6 +2637,31 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
2598 return 0; 2637 return 0;
2599} 2638}
2600 2639
2640#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2641/**
2642 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2643 * @inode: the inode to be searched
2644 * @pgoff: the offset to be searched
2645 * @pagep: the pointer for the found page to be stored
2646 * @ent: the pointer for the found swap entry to be stored
2647 *
2648 * If a page is found, refcount of it is incremented. Callers should handle
2649 * these refcount.
2650 */
2651void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
2652 struct page **pagep, swp_entry_t *ent)
2653{
2654 struct page *page = NULL;
2655
2656 if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2657 goto out;
2658 page = find_get_page(inode->i_mapping, pgoff);
2659out:
2660 *pagep = page;
2661 *ent = (swp_entry_t){ .val = 0 };
2662}
2663#endif
2664
2601#define shmem_vm_ops generic_file_vm_ops 2665#define shmem_vm_ops generic_file_vm_ops
2602#define shmem_file_operations ramfs_file_operations 2666#define shmem_file_operations ramfs_file_operations
2603#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 2667#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
diff --git a/mm/slab.c b/mm/slab.c
index 02786e1a32d2..e49f8f46f46d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -821,7 +821,7 @@ static void init_reap_node(int cpu)
821{ 821{
822 int node; 822 int node;
823 823
824 node = next_node(cpu_to_node(cpu), node_online_map); 824 node = next_node(cpu_to_mem(cpu), node_online_map);
825 if (node == MAX_NUMNODES) 825 if (node == MAX_NUMNODES)
826 node = first_node(node_online_map); 826 node = first_node(node_online_map);
827 827
@@ -1050,7 +1050,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1050 struct array_cache *alien = NULL; 1050 struct array_cache *alien = NULL;
1051 int node; 1051 int node;
1052 1052
1053 node = numa_node_id(); 1053 node = numa_mem_id();
1054 1054
1055 /* 1055 /*
1056 * Make sure we are not freeing a object from another node to the array 1056 * Make sure we are not freeing a object from another node to the array
@@ -1129,7 +1129,7 @@ static void __cpuinit cpuup_canceled(long cpu)
1129{ 1129{
1130 struct kmem_cache *cachep; 1130 struct kmem_cache *cachep;
1131 struct kmem_list3 *l3 = NULL; 1131 struct kmem_list3 *l3 = NULL;
1132 int node = cpu_to_node(cpu); 1132 int node = cpu_to_mem(cpu);
1133 const struct cpumask *mask = cpumask_of_node(node); 1133 const struct cpumask *mask = cpumask_of_node(node);
1134 1134
1135 list_for_each_entry(cachep, &cache_chain, next) { 1135 list_for_each_entry(cachep, &cache_chain, next) {
@@ -1194,7 +1194,7 @@ static int __cpuinit cpuup_prepare(long cpu)
1194{ 1194{
1195 struct kmem_cache *cachep; 1195 struct kmem_cache *cachep;
1196 struct kmem_list3 *l3 = NULL; 1196 struct kmem_list3 *l3 = NULL;
1197 int node = cpu_to_node(cpu); 1197 int node = cpu_to_mem(cpu);
1198 int err; 1198 int err;
1199 1199
1200 /* 1200 /*
@@ -1321,7 +1321,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1321 mutex_unlock(&cache_chain_mutex); 1321 mutex_unlock(&cache_chain_mutex);
1322 break; 1322 break;
1323 } 1323 }
1324 return err ? NOTIFY_BAD : NOTIFY_OK; 1324 return notifier_from_errno(err);
1325} 1325}
1326 1326
1327static struct notifier_block __cpuinitdata cpucache_notifier = { 1327static struct notifier_block __cpuinitdata cpucache_notifier = {
@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
1479 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1479 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1480 */ 1480 */
1481 1481
1482 node = numa_node_id(); 1482 node = numa_mem_id();
1483 1483
1484 /* 1) create the cache_cache */ 1484 /* 1) create the cache_cache */
1485 INIT_LIST_HEAD(&cache_chain); 1485 INIT_LIST_HEAD(&cache_chain);
@@ -2121,7 +2121,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2121 } 2121 }
2122 } 2122 }
2123 } 2123 }
2124 cachep->nodelists[numa_node_id()]->next_reap = 2124 cachep->nodelists[numa_mem_id()]->next_reap =
2125 jiffies + REAPTIMEOUT_LIST3 + 2125 jiffies + REAPTIMEOUT_LIST3 +
2126 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 2126 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2127 2127
@@ -2452,7 +2452,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep)
2452{ 2452{
2453#ifdef CONFIG_SMP 2453#ifdef CONFIG_SMP
2454 check_irq_off(); 2454 check_irq_off();
2455 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); 2455 assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock);
2456#endif 2456#endif
2457} 2457}
2458 2458
@@ -2479,7 +2479,7 @@ static void do_drain(void *arg)
2479{ 2479{
2480 struct kmem_cache *cachep = arg; 2480 struct kmem_cache *cachep = arg;
2481 struct array_cache *ac; 2481 struct array_cache *ac;
2482 int node = numa_node_id(); 2482 int node = numa_mem_id();
2483 2483
2484 check_irq_off(); 2484 check_irq_off();
2485 ac = cpu_cache_get(cachep); 2485 ac = cpu_cache_get(cachep);
@@ -3012,7 +3012,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
3012 3012
3013retry: 3013retry:
3014 check_irq_off(); 3014 check_irq_off();
3015 node = numa_node_id(); 3015 node = numa_mem_id();
3016 ac = cpu_cache_get(cachep); 3016 ac = cpu_cache_get(cachep);
3017 batchcount = ac->batchcount; 3017 batchcount = ac->batchcount;
3018 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 3018 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
@@ -3216,10 +3216,10 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3216 3216
3217 if (in_interrupt() || (flags & __GFP_THISNODE)) 3217 if (in_interrupt() || (flags & __GFP_THISNODE))
3218 return NULL; 3218 return NULL;
3219 nid_alloc = nid_here = numa_node_id(); 3219 nid_alloc = nid_here = numa_mem_id();
3220 get_mems_allowed(); 3220 get_mems_allowed();
3221 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 3221 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3222 nid_alloc = cpuset_mem_spread_node(); 3222 nid_alloc = cpuset_slab_spread_node();
3223 else if (current->mempolicy) 3223 else if (current->mempolicy)
3224 nid_alloc = slab_node(current->mempolicy); 3224 nid_alloc = slab_node(current->mempolicy);
3225 put_mems_allowed(); 3225 put_mems_allowed();
@@ -3281,7 +3281,7 @@ retry:
3281 if (local_flags & __GFP_WAIT) 3281 if (local_flags & __GFP_WAIT)
3282 local_irq_enable(); 3282 local_irq_enable();
3283 kmem_flagcheck(cache, flags); 3283 kmem_flagcheck(cache, flags);
3284 obj = kmem_getpages(cache, local_flags, numa_node_id()); 3284 obj = kmem_getpages(cache, local_flags, numa_mem_id());
3285 if (local_flags & __GFP_WAIT) 3285 if (local_flags & __GFP_WAIT)
3286 local_irq_disable(); 3286 local_irq_disable();
3287 if (obj) { 3287 if (obj) {
@@ -3389,6 +3389,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3389{ 3389{
3390 unsigned long save_flags; 3390 unsigned long save_flags;
3391 void *ptr; 3391 void *ptr;
3392 int slab_node = numa_mem_id();
3392 3393
3393 flags &= gfp_allowed_mask; 3394 flags &= gfp_allowed_mask;
3394 3395
@@ -3401,7 +3402,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3401 local_irq_save(save_flags); 3402 local_irq_save(save_flags);
3402 3403
3403 if (nodeid == -1) 3404 if (nodeid == -1)
3404 nodeid = numa_node_id(); 3405 nodeid = slab_node;
3405 3406
3406 if (unlikely(!cachep->nodelists[nodeid])) { 3407 if (unlikely(!cachep->nodelists[nodeid])) {
3407 /* Node not bootstrapped yet */ 3408 /* Node not bootstrapped yet */
@@ -3409,7 +3410,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3409 goto out; 3410 goto out;
3410 } 3411 }
3411 3412
3412 if (nodeid == numa_node_id()) { 3413 if (nodeid == slab_node) {
3413 /* 3414 /*
3414 * Use the locally cached objects if possible. 3415 * Use the locally cached objects if possible.
3415 * However ____cache_alloc does not allow fallback 3416 * However ____cache_alloc does not allow fallback
@@ -3453,8 +3454,8 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3453 * We may just have run out of memory on the local node. 3454 * We may just have run out of memory on the local node.
3454 * ____cache_alloc_node() knows how to locate memory on other nodes 3455 * ____cache_alloc_node() knows how to locate memory on other nodes
3455 */ 3456 */
3456 if (!objp) 3457 if (!objp)
3457 objp = ____cache_alloc_node(cache, flags, numa_node_id()); 3458 objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3458 3459
3459 out: 3460 out:
3460 return objp; 3461 return objp;
@@ -3551,7 +3552,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3551{ 3552{
3552 int batchcount; 3553 int batchcount;
3553 struct kmem_list3 *l3; 3554 struct kmem_list3 *l3;
3554 int node = numa_node_id(); 3555 int node = numa_mem_id();
3555 3556
3556 batchcount = ac->batchcount; 3557 batchcount = ac->batchcount;
3557#if DEBUG 3558#if DEBUG
@@ -3985,7 +3986,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3985 return -ENOMEM; 3986 return -ENOMEM;
3986 3987
3987 for_each_online_cpu(i) { 3988 for_each_online_cpu(i) {
3988 new->new[i] = alloc_arraycache(cpu_to_node(i), limit, 3989 new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
3989 batchcount, gfp); 3990 batchcount, gfp);
3990 if (!new->new[i]) { 3991 if (!new->new[i]) {
3991 for (i--; i >= 0; i--) 3992 for (i--; i >= 0; i--)
@@ -4007,9 +4008,9 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4007 struct array_cache *ccold = new->new[i]; 4008 struct array_cache *ccold = new->new[i];
4008 if (!ccold) 4009 if (!ccold)
4009 continue; 4010 continue;
4010 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 4011 spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
4011 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); 4012 free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
4012 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 4013 spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
4013 kfree(ccold); 4014 kfree(ccold);
4014 } 4015 }
4015 kfree(new); 4016 kfree(new);
@@ -4115,7 +4116,7 @@ static void cache_reap(struct work_struct *w)
4115{ 4116{
4116 struct kmem_cache *searchp; 4117 struct kmem_cache *searchp;
4117 struct kmem_list3 *l3; 4118 struct kmem_list3 *l3;
4118 int node = numa_node_id(); 4119 int node = numa_mem_id();
4119 struct delayed_work *work = to_delayed_work(w); 4120 struct delayed_work *work = to_delayed_work(w);
4120 4121
4121 if (!mutex_trylock(&cache_chain_mutex)) 4122 if (!mutex_trylock(&cache_chain_mutex))
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index fd8b28361a64..f28ad2cc8428 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -632,13 +632,14 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
632 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), 632 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
633 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 633 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
634 if (!iucv_irq_data[cpu]) 634 if (!iucv_irq_data[cpu])
635 return NOTIFY_BAD; 635 return notifier_from_errno(-ENOMEM);
636
636 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), 637 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
637 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 638 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
638 if (!iucv_param[cpu]) { 639 if (!iucv_param[cpu]) {
639 kfree(iucv_irq_data[cpu]); 640 kfree(iucv_irq_data[cpu]);
640 iucv_irq_data[cpu] = NULL; 641 iucv_irq_data[cpu] = NULL;
641 return NOTIFY_BAD; 642 return notifier_from_errno(-ENOMEM);
642 } 643 }
643 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), 644 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
644 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 645 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
@@ -647,7 +648,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
647 iucv_param[cpu] = NULL; 648 iucv_param[cpu] = NULL;
648 kfree(iucv_irq_data[cpu]); 649 kfree(iucv_irq_data[cpu]);
649 iucv_irq_data[cpu] = NULL; 650 iucv_irq_data[cpu] = NULL;
650 return NOTIFY_BAD; 651 return notifier_from_errno(-ENOMEM);
651 } 652 }
652 break; 653 break;
653 case CPU_UP_CANCELED: 654 case CPU_UP_CANCELED:
@@ -677,7 +678,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
677 cpu_clear(cpu, cpumask); 678 cpu_clear(cpu, cpumask);
678 if (cpus_empty(cpumask)) 679 if (cpus_empty(cpumask))
679 /* Can't offline last IUCV enabled cpu. */ 680 /* Can't offline last IUCV enabled cpu. */
680 return NOTIFY_BAD; 681 return notifier_from_errno(-EINVAL);
681 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); 682 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
682 if (cpus_empty(iucv_irq_cpumask)) 683 if (cpus_empty(iucv_irq_cpumask))
683 smp_call_function_single(first_cpu(iucv_buffer_cpumask), 684 smp_call_function_single(first_cpu(iucv_buffer_cpumask),
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 76af5f9623e3..a932ae52f921 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -242,6 +242,7 @@ case "$arg" in
242 echo "$output_file" | grep -q "\.gz$" && compr="gzip -9 -f" 242 echo "$output_file" | grep -q "\.gz$" && compr="gzip -9 -f"
243 echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f" 243 echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f"
244 echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f" 244 echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f"
245 echo "$output_file" | grep -q "\.lzo$" && compr="lzop -9 -f"
245 echo "$output_file" | grep -q "\.cpio$" && compr="cat" 246 echo "$output_file" | grep -q "\.cpio$" && compr="cat"
246 shift 247 shift
247 ;; 248 ;;
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 5d4402a1161a..38783dcf6c61 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -124,6 +124,7 @@ extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
124extern int install_user_keyrings(void); 124extern int install_user_keyrings(void);
125extern int install_thread_keyring_to_cred(struct cred *); 125extern int install_thread_keyring_to_cred(struct cred *);
126extern int install_process_keyring_to_cred(struct cred *); 126extern int install_process_keyring_to_cred(struct cred *);
127extern int install_session_keyring_to_cred(struct cred *, struct key *);
127 128
128extern struct key *request_key_and_link(struct key_type *type, 129extern struct key *request_key_and_link(struct key_type *type,
129 const char *description, 130 const char *description,
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 8f4dce1987c4..13074b454743 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1269,7 +1269,7 @@ long keyctl_session_to_parent(void)
1269 goto not_permitted; 1269 goto not_permitted;
1270 1270
1271 /* the parent must be single threaded */ 1271 /* the parent must be single threaded */
1272 if (atomic_read(&parent->signal->count) != 1) 1272 if (!thread_group_empty(parent))
1273 goto not_permitted; 1273 goto not_permitted;
1274 1274
1275 /* the parent and the child must have different session keyrings or 1275 /* the parent and the child must have different session keyrings or
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 20a38fed61b1..6b8e4ff4cc68 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -216,8 +216,7 @@ static int install_process_keyring(void)
216/* 216/*
217 * install a session keyring directly to a credentials struct 217 * install a session keyring directly to a credentials struct
218 */ 218 */
219static int install_session_keyring_to_cred(struct cred *cred, 219int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
220 struct key *keyring)
221{ 220{
222 unsigned long flags; 221 unsigned long flags;
223 struct key *old; 222 struct key *old;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index f656e9c069e3..f5ec9ac5d57c 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -58,6 +58,38 @@ void complete_request_key(struct key_construction *cons, int error)
58} 58}
59EXPORT_SYMBOL(complete_request_key); 59EXPORT_SYMBOL(complete_request_key);
60 60
61static int umh_keys_init(struct subprocess_info *info)
62{
63 struct cred *cred = (struct cred*)current_cred();
64 struct key *keyring = info->data;
65 /*
66 * This is called in context of freshly forked kthread before
67 * kernel_execve(), we can just change our ->session_keyring.
68 */
69 return install_session_keyring_to_cred(cred, keyring);
70}
71
72static void umh_keys_cleanup(struct subprocess_info *info)
73{
74 struct key *keyring = info->data;
75 key_put(keyring);
76}
77
78static int call_usermodehelper_keys(char *path, char **argv, char **envp,
79 struct key *session_keyring, enum umh_wait wait)
80{
81 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
82 struct subprocess_info *info =
83 call_usermodehelper_setup(path, argv, envp, gfp_mask);
84
85 if (!info)
86 return -ENOMEM;
87
88 call_usermodehelper_setfns(info, umh_keys_init, umh_keys_cleanup,
89 key_get(session_keyring));
90 return call_usermodehelper_exec(info, wait);
91}
92
61/* 93/*
62 * request userspace finish the construction of a key 94 * request userspace finish the construction of a key
63 * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" 95 * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index a2ff86189d2a..e9d98be190c5 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -345,7 +345,9 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
345 new_hw_ptr = hw_base + pos; 345 new_hw_ptr = hw_base + pos;
346 } 346 }
347 __delta: 347 __delta:
348 delta = (new_hw_ptr - old_hw_ptr) % runtime->boundary; 348 delta = new_hw_ptr - old_hw_ptr;
349 if (delta < 0)
350 delta += runtime->boundary;
349 if (xrun_debug(substream, in_interrupt ? 351 if (xrun_debug(substream, in_interrupt ?
350 XRUN_DEBUG_PERIODUPDATE : XRUN_DEBUG_HWPTRUPDATE)) { 352 XRUN_DEBUG_PERIODUPDATE : XRUN_DEBUG_HWPTRUPDATE)) {
351 char name[16]; 353 char name[16];
@@ -439,8 +441,13 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
439 snd_pcm_playback_silence(substream, new_hw_ptr); 441 snd_pcm_playback_silence(substream, new_hw_ptr);
440 442
441 if (in_interrupt) { 443 if (in_interrupt) {
442 runtime->hw_ptr_interrupt = new_hw_ptr - 444 delta = new_hw_ptr - runtime->hw_ptr_interrupt;
443 (new_hw_ptr % runtime->period_size); 445 if (delta < 0)
446 delta += runtime->boundary;
447 delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
448 runtime->hw_ptr_interrupt += delta;
449 if (runtime->hw_ptr_interrupt >= runtime->boundary)
450 runtime->hw_ptr_interrupt -= runtime->boundary;
444 } 451 }
445 runtime->hw_ptr_base = hw_base; 452 runtime->hw_ptr_base = hw_base;
446 runtime->status->hw_ptr = new_hw_ptr; 453 runtime->status->hw_ptr = new_hw_ptr;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 644c2bb17b86..303ac04ff6e4 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -27,7 +27,6 @@
27#include <linux/pm_qos_params.h> 27#include <linux/pm_qos_params.h>
28#include <linux/uio.h> 28#include <linux/uio.h>
29#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30#include <linux/math64.h>
31#include <sound/core.h> 30#include <sound/core.h>
32#include <sound/control.h> 31#include <sound/control.h>
33#include <sound/info.h> 32#include <sound/info.h>
@@ -370,38 +369,6 @@ static int period_to_usecs(struct snd_pcm_runtime *runtime)
370 return usecs; 369 return usecs;
371} 370}
372 371
373static int calc_boundary(struct snd_pcm_runtime *runtime)
374{
375 u_int64_t boundary;
376
377 boundary = (u_int64_t)runtime->buffer_size *
378 (u_int64_t)runtime->period_size;
379#if BITS_PER_LONG < 64
380 /* try to find lowest common multiple for buffer and period */
381 if (boundary > LONG_MAX - runtime->buffer_size) {
382 u_int32_t remainder = -1;
383 u_int32_t divident = runtime->buffer_size;
384 u_int32_t divisor = runtime->period_size;
385 while (remainder) {
386 remainder = divident % divisor;
387 if (remainder) {
388 divident = divisor;
389 divisor = remainder;
390 }
391 }
392 boundary = div_u64(boundary, divisor);
393 if (boundary > LONG_MAX - runtime->buffer_size)
394 return -ERANGE;
395 }
396#endif
397 if (boundary == 0)
398 return -ERANGE;
399 runtime->boundary = boundary;
400 while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
401 runtime->boundary *= 2;
402 return 0;
403}
404
405static int snd_pcm_hw_params(struct snd_pcm_substream *substream, 372static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
406 struct snd_pcm_hw_params *params) 373 struct snd_pcm_hw_params *params)
407{ 374{
@@ -477,9 +444,9 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
477 runtime->stop_threshold = runtime->buffer_size; 444 runtime->stop_threshold = runtime->buffer_size;
478 runtime->silence_threshold = 0; 445 runtime->silence_threshold = 0;
479 runtime->silence_size = 0; 446 runtime->silence_size = 0;
480 err = calc_boundary(runtime); 447 runtime->boundary = runtime->buffer_size;
481 if (err < 0) 448 while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
482 goto _error; 449 runtime->boundary *= 2;
483 450
484 snd_pcm_timer_resolution_change(substream); 451 snd_pcm_timer_resolution_change(substream);
485 runtime->status->state = SNDRV_PCM_STATE_SETUP; 452 runtime->status->state = SNDRV_PCM_STATE_SETUP;
diff --git a/sound/pci/aw2/aw2-alsa.c b/sound/pci/aw2/aw2-alsa.c
index 67921f93a41e..c15002242d98 100644
--- a/sound/pci/aw2/aw2-alsa.c
+++ b/sound/pci/aw2/aw2-alsa.c
@@ -26,7 +26,7 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <asm/io.h> 29#include <linux/io.h>
30#include <sound/core.h> 30#include <sound/core.h>
31#include <sound/initval.h> 31#include <sound/initval.h>
32#include <sound/pcm.h> 32#include <sound/pcm.h>
@@ -44,9 +44,6 @@ MODULE_LICENSE("GPL");
44/********************************* 44/*********************************
45 * DEFINES 45 * DEFINES
46 ********************************/ 46 ********************************/
47#define PCI_VENDOR_ID_SAA7146 0x1131
48#define PCI_DEVICE_ID_SAA7146 0x7146
49
50#define CTL_ROUTE_ANALOG 0 47#define CTL_ROUTE_ANALOG 0
51#define CTL_ROUTE_DIGITAL 1 48#define CTL_ROUTE_DIGITAL 1
52 49
@@ -165,7 +162,7 @@ module_param_array(enable, bool, NULL, 0444);
165MODULE_PARM_DESC(enable, "Enable Audiowerk2 soundcard."); 162MODULE_PARM_DESC(enable, "Enable Audiowerk2 soundcard.");
166 163
167static DEFINE_PCI_DEVICE_TABLE(snd_aw2_ids) = { 164static DEFINE_PCI_DEVICE_TABLE(snd_aw2_ids) = {
168 {PCI_VENDOR_ID_SAA7146, PCI_DEVICE_ID_SAA7146, 0, 0, 165 {PCI_VENDOR_ID_PHILIPS, PCI_DEVICE_ID_PHILIPS_SAA7146, 0, 0,
169 0, 0, 0}, 166 0, 0, 0},
170 {0} 167 {0}
171}; 168};
@@ -419,7 +416,7 @@ static int snd_aw2_pcm_playback_open(struct snd_pcm_substream *substream)
419{ 416{
420 struct snd_pcm_runtime *runtime = substream->runtime; 417 struct snd_pcm_runtime *runtime = substream->runtime;
421 418
422 snd_printdd(KERN_DEBUG "aw2: Playback_open \n"); 419 snd_printdd(KERN_DEBUG "aw2: Playback_open\n");
423 runtime->hw = snd_aw2_playback_hw; 420 runtime->hw = snd_aw2_playback_hw;
424 return 0; 421 return 0;
425} 422}
@@ -435,7 +432,7 @@ static int snd_aw2_pcm_capture_open(struct snd_pcm_substream *substream)
435{ 432{
436 struct snd_pcm_runtime *runtime = substream->runtime; 433 struct snd_pcm_runtime *runtime = substream->runtime;
437 434
438 snd_printdd(KERN_DEBUG "aw2: Capture_open \n"); 435 snd_printdd(KERN_DEBUG "aw2: Capture_open\n");
439 runtime->hw = snd_aw2_capture_hw; 436 runtime->hw = snd_aw2_capture_hw;
440 return 0; 437 return 0;
441} 438}
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 4b302d86f5f2..7a9401462c1c 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -35,6 +35,7 @@
35#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/mutex.h> 37#include <linux/mutex.h>
38#include <linux/moduleparam.h>
38 39
39#include <sound/core.h> 40#include <sound/core.h>
40#include <sound/tlv.h> 41#include <sound/tlv.h>
@@ -50,6 +51,10 @@
50#define EMU10K1_CENTER_LFE_FROM_FRONT 51#define EMU10K1_CENTER_LFE_FROM_FRONT
51#endif 52#endif
52 53
54static bool high_res_gpr_volume;
55module_param(high_res_gpr_volume, bool, 0444);
56MODULE_PARM_DESC(high_res_gpr_volume, "GPR mixer controls use 31-bit range.");
57
53/* 58/*
54 * Tables 59 * Tables
55 */ 60 */
@@ -296,6 +301,7 @@ static const u32 db_table[101] = {
296 301
297/* EMU10k1/EMU10k2 DSP control db gain */ 302/* EMU10k1/EMU10k2 DSP control db gain */
298static const DECLARE_TLV_DB_SCALE(snd_emu10k1_db_scale1, -4000, 40, 1); 303static const DECLARE_TLV_DB_SCALE(snd_emu10k1_db_scale1, -4000, 40, 1);
304static const DECLARE_TLV_DB_LINEAR(snd_emu10k1_db_linear, TLV_DB_GAIN_MUTE, 0);
299 305
300static const u32 onoff_table[2] = { 306static const u32 onoff_table[2] = {
301 0x00000000, 0x00000001 307 0x00000000, 0x00000001
@@ -1072,10 +1078,17 @@ snd_emu10k1_init_mono_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
1072 strcpy(ctl->id.name, name); 1078 strcpy(ctl->id.name, name);
1073 ctl->vcount = ctl->count = 1; 1079 ctl->vcount = ctl->count = 1;
1074 ctl->gpr[0] = gpr + 0; ctl->value[0] = defval; 1080 ctl->gpr[0] = gpr + 0; ctl->value[0] = defval;
1075 ctl->min = 0; 1081 if (high_res_gpr_volume) {
1076 ctl->max = 100; 1082 ctl->min = 0;
1077 ctl->tlv = snd_emu10k1_db_scale1; 1083 ctl->max = 0x7fffffff;
1078 ctl->translation = EMU10K1_GPR_TRANSLATION_TABLE100; 1084 ctl->tlv = snd_emu10k1_db_linear;
1085 ctl->translation = EMU10K1_GPR_TRANSLATION_NONE;
1086 } else {
1087 ctl->min = 0;
1088 ctl->max = 100;
1089 ctl->tlv = snd_emu10k1_db_scale1;
1090 ctl->translation = EMU10K1_GPR_TRANSLATION_TABLE100;
1091 }
1079} 1092}
1080 1093
1081static void __devinit 1094static void __devinit
@@ -1087,10 +1100,17 @@ snd_emu10k1_init_stereo_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
1087 ctl->vcount = ctl->count = 2; 1100 ctl->vcount = ctl->count = 2;
1088 ctl->gpr[0] = gpr + 0; ctl->value[0] = defval; 1101 ctl->gpr[0] = gpr + 0; ctl->value[0] = defval;
1089 ctl->gpr[1] = gpr + 1; ctl->value[1] = defval; 1102 ctl->gpr[1] = gpr + 1; ctl->value[1] = defval;
1090 ctl->min = 0; 1103 if (high_res_gpr_volume) {
1091 ctl->max = 100; 1104 ctl->min = 0;
1092 ctl->tlv = snd_emu10k1_db_scale1; 1105 ctl->max = 0x7fffffff;
1093 ctl->translation = EMU10K1_GPR_TRANSLATION_TABLE100; 1106 ctl->tlv = snd_emu10k1_db_linear;
1107 ctl->translation = EMU10K1_GPR_TRANSLATION_NONE;
1108 } else {
1109 ctl->min = 0;
1110 ctl->max = 100;
1111 ctl->tlv = snd_emu10k1_db_scale1;
1112 ctl->translation = EMU10K1_GPR_TRANSLATION_TABLE100;
1113 }
1094} 1114}
1095 1115
1096static void __devinit 1116static void __devinit
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 170610e1d7da..77e22c2a8caa 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1097,6 +1097,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
1097 struct azx *chip = dev_id; 1097 struct azx *chip = dev_id;
1098 struct azx_dev *azx_dev; 1098 struct azx_dev *azx_dev;
1099 u32 status; 1099 u32 status;
1100 u8 sd_status;
1100 int i, ok; 1101 int i, ok;
1101 1102
1102 spin_lock(&chip->reg_lock); 1103 spin_lock(&chip->reg_lock);
@@ -1110,8 +1111,10 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
1110 for (i = 0; i < chip->num_streams; i++) { 1111 for (i = 0; i < chip->num_streams; i++) {
1111 azx_dev = &chip->azx_dev[i]; 1112 azx_dev = &chip->azx_dev[i];
1112 if (status & azx_dev->sd_int_sta_mask) { 1113 if (status & azx_dev->sd_int_sta_mask) {
1114 sd_status = azx_sd_readb(azx_dev, SD_STS);
1113 azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK); 1115 azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK);
1114 if (!azx_dev->substream || !azx_dev->running) 1116 if (!azx_dev->substream || !azx_dev->running ||
1117 !(sd_status & SD_INT_COMPLETE))
1115 continue; 1118 continue;
1116 /* check whether this IRQ is really acceptable */ 1119 /* check whether this IRQ is really acceptable */
1117 ok = azx_position_ok(chip, azx_dev); 1120 ok = azx_position_ok(chip, azx_dev);
@@ -2279,12 +2282,14 @@ static int azx_dev_free(struct snd_device *device)
2279 * white/black-listing for position_fix 2282 * white/black-listing for position_fix
2280 */ 2283 */
2281static struct snd_pci_quirk position_fix_list[] __devinitdata = { 2284static struct snd_pci_quirk position_fix_list[] __devinitdata = {
2285 SND_PCI_QUIRK(0x1025, 0x009f, "Acer Aspire 5110", POS_FIX_LPIB),
2282 SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB), 2286 SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
2283 SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB), 2287 SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
2284 SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB), 2288 SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
2285 SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB), 2289 SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
2286 SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
2287 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), 2290 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
2291 SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
2292 SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB),
2288 SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), 2293 SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
2289 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), 2294 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
2290 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), 2295 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 53538b0f9991..17d4548cc353 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7025,6 +7025,14 @@ static struct hda_input_mux alc889A_mb31_capture_source = {
7025 }, 7025 },
7026}; 7026};
7027 7027
7028static struct hda_input_mux alc889A_imac91_capture_source = {
7029 .num_items = 2,
7030 .items = {
7031 { "Mic", 0x01 },
7032 { "Line", 0x2 }, /* Not sure! */
7033 },
7034};
7035
7028/* 7036/*
7029 * 2ch mode 7037 * 2ch mode
7030 */ 7038 */
@@ -7486,15 +7494,8 @@ static struct snd_kcontrol_new alc885_macmini3_mixer[] = {
7486}; 7494};
7487 7495
7488static struct snd_kcontrol_new alc885_imac91_mixer[] = { 7496static struct snd_kcontrol_new alc885_imac91_mixer[] = {
7489 HDA_CODEC_VOLUME("Line-Out Playback Volume", 0x0c, 0x00, HDA_OUTPUT), 7497 HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0c, 0x00, HDA_OUTPUT),
7490 HDA_BIND_MUTE ("Line-Out Playback Switch", 0x0c, 0x02, HDA_INPUT), 7498 HDA_BIND_MUTE("Speaker Playback Switch", 0x0c, 0x02, HDA_INPUT),
7491 HDA_CODEC_MUTE ("Speaker Playback Switch", 0x14, 0x00, HDA_OUTPUT),
7492 HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0d, 0x00, HDA_OUTPUT),
7493 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
7494 HDA_CODEC_MUTE ("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
7495 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT),
7496 HDA_CODEC_MUTE ("Mic Playback Switch", 0x0b, 0x00, HDA_INPUT),
7497 HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x00, HDA_INPUT),
7498 { } /* end */ 7499 { } /* end */
7499}; 7500};
7500 7501
@@ -7995,61 +7996,56 @@ static struct hda_verb alc885_mbp3_init_verbs[] = {
7995 7996
7996/* iMac 9,1 */ 7997/* iMac 9,1 */
7997static struct hda_verb alc885_imac91_init_verbs[] = { 7998static struct hda_verb alc885_imac91_init_verbs[] = {
7998 /* Line-Out mixer: unmute input/output amp left and right (volume = 0) */ 7999 /* Internal Speaker Pin (0x0c) */
7999 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, 8000 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, (PIN_OUT | AC_PINCTL_VREF_50) },
8000 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 8001 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8001 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, 8002 {0x18, AC_VERB_SET_CONNECT_SEL, 0x00},
8002 /* Rear mixer */ 8003 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, (PIN_OUT | AC_PINCTL_VREF_50) },
8003 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, 8004 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8004 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 8005 {0x1a, AC_VERB_SET_CONNECT_SEL, 0x00},
8005 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, 8006 /* HP Pin: Rear */
8006 /* HP Pin: output 0 (0x0c) */
8007 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP}, 8007 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
8008 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, 8008 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8009 {0x14, AC_VERB_SET_CONNECT_SEL, 0x00}, 8009 {0x14, AC_VERB_SET_CONNECT_SEL, 0x00},
8010 {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, 8010 {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, (ALC880_HP_EVENT | AC_USRSP_EN)},
8011 /* Internal Speakers: output 0 (0x0d) */ 8011 /* Line in Rear */
8012 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, 8012 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_VREF_50},
8013 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, 8013 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
8014 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
8015 /* Mic (rear) pin: input vref at 80% */
8016 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
8017 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
8018 /* Front Mic pin: input vref at 80% */ 8014 /* Front Mic pin: input vref at 80% */
8019 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, 8015 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
8020 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, 8016 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
8021 /* Line In pin: use output 1 when in LineOut mode */ 8017 /* Rear mixer */
8022 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, 8018 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
8023 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, 8019 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8024 {0x1a, AC_VERB_SET_CONNECT_SEL, 0x01}, 8020 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
8025 8021 /* Line-Out mixer: unmute input/output amp left and right (volume = 0) */
8026 /* FIXME: use matrix-type input source selection */ 8022 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
8027 /* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */ 8023 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8028 /* Input mixer1: unmute Mic, F-Mic, Line, CD inputs */ 8024 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
8025 /* 0x24 [Audio Mixer] wcaps 0x20010b: Stereo Amp-In */
8029 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 8026 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8030 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, 8027 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
8031 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, 8028 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
8032 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)}, 8029 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
8033 /* Input mixer2 */ 8030 /* 0x23 [Audio Mixer] wcaps 0x20010b: Stereo Amp-In */
8034 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 8031 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8035 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, 8032 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
8036 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, 8033 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
8037 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)}, 8034 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
8038 /* Input mixer3 */ 8035 /* 0x22 [Audio Mixer] wcaps 0x20010b: Stereo Amp-In */
8039 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 8036 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8040 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, 8037 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
8041 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, 8038 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
8042 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)}, 8039 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
8043 /* ADC1: mute amp left and right */ 8040 /* 0x07 [Audio Input] wcaps 0x10011b: Stereo Amp-In */
8044 {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 8041 {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8045 {0x07, AC_VERB_SET_CONNECT_SEL, 0x00}, 8042 {0x07, AC_VERB_SET_CONNECT_SEL, 0x00},
8046 /* ADC2: mute amp left and right */ 8043 /* 0x08 [Audio Input] wcaps 0x10011b: Stereo Amp-In */
8047 {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 8044 {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8048 {0x08, AC_VERB_SET_CONNECT_SEL, 0x00}, 8045 {0x08, AC_VERB_SET_CONNECT_SEL, 0x00},
8049 /* ADC3: mute amp left and right */ 8046 /* 0x09 [Audio Input] wcaps 0x10011b: Stereo Amp-In */
8050 {0x09, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 8047 {0x09, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8051 {0x09, AC_VERB_SET_CONNECT_SEL, 0x00}, 8048 {0x09, AC_VERB_SET_CONNECT_SEL, 0x00},
8052
8053 { } 8049 { }
8054}; 8050};
8055 8051
@@ -8118,7 +8114,7 @@ static void alc885_imac91_setup(struct hda_codec *codec)
8118 struct alc_spec *spec = codec->spec; 8114 struct alc_spec *spec = codec->spec;
8119 8115
8120 spec->autocfg.hp_pins[0] = 0x14; 8116 spec->autocfg.hp_pins[0] = 0x14;
8121 spec->autocfg.speaker_pins[0] = 0x15; 8117 spec->autocfg.speaker_pins[0] = 0x18;
8122 spec->autocfg.speaker_pins[1] = 0x1a; 8118 spec->autocfg.speaker_pins[1] = 0x1a;
8123} 8119}
8124 8120
@@ -9627,14 +9623,14 @@ static struct alc_config_preset alc882_presets[] = {
9627 .init_hook = alc885_imac24_init_hook, 9623 .init_hook = alc885_imac24_init_hook,
9628 }, 9624 },
9629 [ALC885_IMAC91] = { 9625 [ALC885_IMAC91] = {
9630 .mixers = { alc885_imac91_mixer, alc882_chmode_mixer }, 9626 .mixers = {alc885_imac91_mixer},
9631 .init_verbs = { alc885_imac91_init_verbs, 9627 .init_verbs = { alc885_imac91_init_verbs,
9632 alc880_gpio1_init_verbs }, 9628 alc880_gpio1_init_verbs },
9633 .num_dacs = ARRAY_SIZE(alc882_dac_nids), 9629 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
9634 .dac_nids = alc882_dac_nids, 9630 .dac_nids = alc882_dac_nids,
9635 .channel_mode = alc885_mbp_4ch_modes, 9631 .channel_mode = alc885_mba21_ch_modes,
9636 .num_channel_mode = ARRAY_SIZE(alc885_mbp_4ch_modes), 9632 .num_channel_mode = ARRAY_SIZE(alc885_mba21_ch_modes),
9637 .input_mux = &alc882_capture_source, 9633 .input_mux = &alc889A_imac91_capture_source,
9638 .dig_out_nid = ALC882_DIGOUT_NID, 9634 .dig_out_nid = ALC882_DIGOUT_NID,
9639 .dig_in_nid = ALC882_DIGIN_NID, 9635 .dig_in_nid = ALC882_DIGIN_NID,
9640 .unsol_event = alc_automute_amp_unsol_event, 9636 .unsol_event = alc_automute_amp_unsol_event,
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index a0e06d82da1f..f1e7babd6920 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -2078,12 +2078,12 @@ static struct snd_pci_quirk stac927x_cfg_tbl[] = {
2078 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2000, 2078 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2000,
2079 "Intel D965", STAC_D965_3ST), 2079 "Intel D965", STAC_D965_3ST),
2080 /* Dell 3 stack systems */ 2080 /* Dell 3 stack systems */
2081 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_3ST),
2082 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01dd, "Dell Dimension E520", STAC_DELL_3ST), 2081 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01dd, "Dell Dimension E520", STAC_DELL_3ST),
2083 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ed, "Dell ", STAC_DELL_3ST), 2082 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ed, "Dell ", STAC_DELL_3ST),
2084 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f4, "Dell ", STAC_DELL_3ST), 2083 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f4, "Dell ", STAC_DELL_3ST),
2085 /* Dell 3 stack systems with verb table in BIOS */ 2084 /* Dell 3 stack systems with verb table in BIOS */
2086 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS), 2085 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS),
2086 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_BIOS),
2087 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS), 2087 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS),
2088 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS), 2088 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS),
2089 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_BIOS), 2089 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_BIOS),
diff --git a/sound/usb/caiaq/input.c b/sound/usb/caiaq/input.c
index 8bbfbfd4c658..dcb620796d9e 100644
--- a/sound/usb/caiaq/input.c
+++ b/sound/usb/caiaq/input.c
@@ -171,7 +171,7 @@ static void snd_caiaq_input_read_analog(struct snd_usb_caiaqdev *dev,
171 input_report_abs(input_dev, ABS_HAT0Y, (buf[4] << 8) | buf[5]); 171 input_report_abs(input_dev, ABS_HAT0Y, (buf[4] << 8) | buf[5]);
172 input_report_abs(input_dev, ABS_HAT1X, (buf[12] << 8) | buf[13]); 172 input_report_abs(input_dev, ABS_HAT1X, (buf[12] << 8) | buf[13]);
173 input_report_abs(input_dev, ABS_HAT1Y, (buf[2] << 8) | buf[3]); 173 input_report_abs(input_dev, ABS_HAT1Y, (buf[2] << 8) | buf[3]);
174 input_report_abs(input_dev, ABS_HAT2X, (buf[15] << 8) | buf[15]); 174 input_report_abs(input_dev, ABS_HAT2X, (buf[14] << 8) | buf[15]);
175 input_report_abs(input_dev, ABS_HAT2Y, (buf[0] << 8) | buf[1]); 175 input_report_abs(input_dev, ABS_HAT2Y, (buf[0] << 8) | buf[1]);
176 input_report_abs(input_dev, ABS_HAT3X, (buf[10] << 8) | buf[11]); 176 input_report_abs(input_dev, ABS_HAT3X, (buf[10] << 8) | buf[11]);
177 input_report_abs(input_dev, ABS_HAT3Y, (buf[6] << 8) | buf[7]); 177 input_report_abs(input_dev, ABS_HAT3Y, (buf[6] << 8) | buf[7]);
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 8b1e4b124a9f..46785643c66d 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -645,6 +645,105 @@ static struct usb_protocol_ops snd_usbmidi_cme_ops = {
645}; 645};
646 646
647/* 647/*
648 * AKAI MPD16 protocol:
649 *
650 * For control port (endpoint 1):
651 * ==============================
652 * One or more chunks consisting of first byte of (0x10 | msg_len) and then a
653 * SysEx message (msg_len=9 bytes long).
654 *
655 * For data port (endpoint 2):
656 * ===========================
657 * One or more chunks consisting of first byte of (0x20 | msg_len) and then a
658 * MIDI message (msg_len bytes long)
659 *
660 * Messages sent: Active Sense, Note On, Poly Pressure, Control Change.
661 */
662static void snd_usbmidi_akai_input(struct snd_usb_midi_in_endpoint *ep,
663 uint8_t *buffer, int buffer_length)
664{
665 unsigned int pos = 0;
666 unsigned int len = (unsigned int)buffer_length;
667 while (pos < len) {
668 unsigned int port = (buffer[pos] >> 4) - 1;
669 unsigned int msg_len = buffer[pos] & 0x0f;
670 pos++;
671 if (pos + msg_len <= len && port < 2)
672 snd_usbmidi_input_data(ep, 0, &buffer[pos], msg_len);
673 pos += msg_len;
674 }
675}
676
677#define MAX_AKAI_SYSEX_LEN 9
678
679static void snd_usbmidi_akai_output(struct snd_usb_midi_out_endpoint *ep,
680 struct urb *urb)
681{
682 uint8_t *msg;
683 int pos, end, count, buf_end;
684 uint8_t tmp[MAX_AKAI_SYSEX_LEN];
685 struct snd_rawmidi_substream *substream = ep->ports[0].substream;
686
687 if (!ep->ports[0].active)
688 return;
689
690 msg = urb->transfer_buffer + urb->transfer_buffer_length;
691 buf_end = ep->max_transfer - MAX_AKAI_SYSEX_LEN - 1;
692
693 /* only try adding more data when there's space for at least 1 SysEx */
694 while (urb->transfer_buffer_length < buf_end) {
695 count = snd_rawmidi_transmit_peek(substream,
696 tmp, MAX_AKAI_SYSEX_LEN);
697 if (!count) {
698 ep->ports[0].active = 0;
699 return;
700 }
701 /* try to skip non-SysEx data */
702 for (pos = 0; pos < count && tmp[pos] != 0xF0; pos++)
703 ;
704
705 if (pos > 0) {
706 snd_rawmidi_transmit_ack(substream, pos);
707 continue;
708 }
709
710 /* look for the start or end marker */
711 for (end = 1; end < count && tmp[end] < 0xF0; end++)
712 ;
713
714 /* next SysEx started before the end of current one */
715 if (end < count && tmp[end] == 0xF0) {
716 /* it's incomplete - drop it */
717 snd_rawmidi_transmit_ack(substream, end);
718 continue;
719 }
720 /* SysEx complete */
721 if (end < count && tmp[end] == 0xF7) {
722 /* queue it, ack it, and get the next one */
723 count = end + 1;
724 msg[0] = 0x10 | count;
725 memcpy(&msg[1], tmp, count);
726 snd_rawmidi_transmit_ack(substream, count);
727 urb->transfer_buffer_length += count + 1;
728 msg += count + 1;
729 continue;
730 }
731 /* less than 9 bytes and no end byte - wait for more */
732 if (count < MAX_AKAI_SYSEX_LEN) {
733 ep->ports[0].active = 0;
734 return;
735 }
736 /* 9 bytes and no end marker in sight - malformed, skip it */
737 snd_rawmidi_transmit_ack(substream, count);
738 }
739}
740
741static struct usb_protocol_ops snd_usbmidi_akai_ops = {
742 .input = snd_usbmidi_akai_input,
743 .output = snd_usbmidi_akai_output,
744};
745
746/*
648 * Novation USB MIDI protocol: number of data bytes is in the first byte 747 * Novation USB MIDI protocol: number of data bytes is in the first byte
649 * (when receiving) (+1!) or in the second byte (when sending); data begins 748 * (when receiving) (+1!) or in the second byte (when sending); data begins
650 * at the third byte. 749 * at the third byte.
@@ -1434,6 +1533,11 @@ static struct port_info {
1434 EXTERNAL_PORT(0x086a, 0x0001, 8, "%s Broadcast"), 1533 EXTERNAL_PORT(0x086a, 0x0001, 8, "%s Broadcast"),
1435 EXTERNAL_PORT(0x086a, 0x0002, 8, "%s Broadcast"), 1534 EXTERNAL_PORT(0x086a, 0x0002, 8, "%s Broadcast"),
1436 EXTERNAL_PORT(0x086a, 0x0003, 4, "%s Broadcast"), 1535 EXTERNAL_PORT(0x086a, 0x0003, 4, "%s Broadcast"),
1536 /* Akai MPD16 */
1537 CONTROL_PORT(0x09e8, 0x0062, 0, "%s Control"),
1538 PORT_INFO(0x09e8, 0x0062, 1, "%s MIDI", 0,
1539 SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC |
1540 SNDRV_SEQ_PORT_TYPE_HARDWARE),
1437 /* Access Music Virus TI */ 1541 /* Access Music Virus TI */
1438 EXTERNAL_PORT(0x133e, 0x0815, 0, "%s MIDI"), 1542 EXTERNAL_PORT(0x133e, 0x0815, 0, "%s MIDI"),
1439 PORT_INFO(0x133e, 0x0815, 1, "%s Synth", 0, 1543 PORT_INFO(0x133e, 0x0815, 1, "%s Synth", 0,
@@ -2035,6 +2139,12 @@ int snd_usbmidi_create(struct snd_card *card,
2035 umidi->usb_protocol_ops = &snd_usbmidi_cme_ops; 2139 umidi->usb_protocol_ops = &snd_usbmidi_cme_ops;
2036 err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints); 2140 err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
2037 break; 2141 break;
2142 case QUIRK_MIDI_AKAI:
2143 umidi->usb_protocol_ops = &snd_usbmidi_akai_ops;
2144 err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
2145 /* endpoint 1 is input-only */
2146 endpoints[1].out_cables = 0;
2147 break;
2038 default: 2148 default:
2039 snd_printd(KERN_ERR "invalid quirk type %d\n", quirk->type); 2149 snd_printd(KERN_ERR "invalid quirk type %d\n", quirk->type);
2040 err = -ENXIO; 2150 err = -ENXIO;
diff --git a/sound/usb/midi.h b/sound/usb/midi.h
index 2089ec987c66..2fca80b744c0 100644
--- a/sound/usb/midi.h
+++ b/sound/usb/midi.h
@@ -37,6 +37,8 @@ struct snd_usb_midi_endpoint_info {
37 37
38/* for QUIRK_MIDI_CME, data is NULL */ 38/* for QUIRK_MIDI_CME, data is NULL */
39 39
40/* for QUIRK_MIDI_AKAI, data is NULL */
41
40int snd_usbmidi_create(struct snd_card *card, 42int snd_usbmidi_create(struct snd_card *card,
41 struct usb_interface *iface, 43 struct usb_interface *iface,
42 struct list_head *midi_list, 44 struct list_head *midi_list,
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 91ddef31bcbd..f8797f61a24b 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -1973,6 +1973,17 @@ YAMAHA_DEVICE(0x7010, "UB99"),
1973 } 1973 }
1974}, 1974},
1975 1975
1976/* AKAI devices */
1977{
1978 USB_DEVICE(0x09e8, 0x0062),
1979 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
1980 .vendor_name = "AKAI",
1981 .product_name = "MPD16",
1982 .ifnum = 0,
1983 .type = QUIRK_MIDI_AKAI,
1984 }
1985},
1986
1976/* TerraTec devices */ 1987/* TerraTec devices */
1977{ 1988{
1978 USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012), 1989 USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012),
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 136e5b4cf6de..b45e54c09ba2 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -289,6 +289,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
289 [QUIRK_MIDI_FASTLANE] = create_any_midi_quirk, 289 [QUIRK_MIDI_FASTLANE] = create_any_midi_quirk,
290 [QUIRK_MIDI_EMAGIC] = create_any_midi_quirk, 290 [QUIRK_MIDI_EMAGIC] = create_any_midi_quirk,
291 [QUIRK_MIDI_CME] = create_any_midi_quirk, 291 [QUIRK_MIDI_CME] = create_any_midi_quirk,
292 [QUIRK_MIDI_AKAI] = create_any_midi_quirk,
292 [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk, 293 [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
293 [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk, 294 [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
294 [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, 295 [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index d679e72a3e5c..06ebf24d3a4d 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -74,6 +74,7 @@ enum quirk_type {
74 QUIRK_MIDI_FASTLANE, 74 QUIRK_MIDI_FASTLANE,
75 QUIRK_MIDI_EMAGIC, 75 QUIRK_MIDI_EMAGIC,
76 QUIRK_MIDI_CME, 76 QUIRK_MIDI_CME,
77 QUIRK_MIDI_AKAI,
77 QUIRK_MIDI_US122L, 78 QUIRK_MIDI_US122L,
78 QUIRK_AUDIO_STANDARD_INTERFACE, 79 QUIRK_AUDIO_STANDARD_INTERFACE,
79 QUIRK_AUDIO_FIXED_ENDPOINT, 80 QUIRK_AUDIO_FIXED_ENDPOINT,
diff --git a/usr/Makefile b/usr/Makefile
index 1e6a9e4a72cc..6b4b6da0b67d 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -15,6 +15,9 @@ suffix_$(CONFIG_INITRAMFS_COMPRESSION_BZIP2) = .bz2
15# Lzma 15# Lzma
16suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZMA) = .lzma 16suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZMA) = .lzma
17 17
18# Lzo
19suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZO) = .lzo
20
18# Generate builtin.o based on initramfs_data.o 21# Generate builtin.o based on initramfs_data.o
19obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data$(suffix_y).o 22obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data$(suffix_y).o
20 23
@@ -45,7 +48,7 @@ endif
45quiet_cmd_initfs = GEN $@ 48quiet_cmd_initfs = GEN $@
46 cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input) 49 cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
47 50
48targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio 51targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio.lzo initramfs_data.cpio
49# do not try to update files included in initramfs 52# do not try to update files included in initramfs
50$(deps_initramfs): ; 53$(deps_initramfs): ;
51 54
diff --git a/usr/initramfs_data.lzo.S b/usr/initramfs_data.lzo.S
new file mode 100644
index 000000000000..59211905da84
--- /dev/null
+++ b/usr/initramfs_data.lzo.S
@@ -0,0 +1,29 @@
1/*
2 initramfs_data includes the compressed binary that is the
3 filesystem used for early user space.
4 Note: Older versions of "as" (prior to binutils 2.11.90.0.23
5 released on 2001-07-14) dit not support .incbin.
6 If you are forced to use older binutils than that then the
7 following trick can be applied to create the resulting binary:
8
9
10 ld -m elf_i386 --format binary --oformat elf32-i386 -r \
11 -T initramfs_data.scr initramfs_data.cpio.gz -o initramfs_data.o
12 ld -m elf_i386 -r -o built-in.o initramfs_data.o
13
14 initramfs_data.scr looks like this:
15SECTIONS
16{
17 .init.ramfs : { *(.data) }
18}
19
20 The above example is for i386 - the parameters vary from architectures.
21 Eventually look up LDFLAGS_BLOB in an older version of the
22 arch/$(ARCH)/Makefile to see the flags used before .incbin was introduced.
23
24 Using .incbin has the advantage over ld that the correct flags are set
25 in the ELF header, as required by certain architectures.
26*/
27
28.section .init.ramfs,"a"
29.incbin "usr/initramfs_data.cpio.lzo"