aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-class-backlight20
-rw-r--r--Documentation/ABI/testing/configfs-spear-pcie-gadget31
-rw-r--r--Documentation/ABI/testing/sysfs-bus-rbd2
-rw-r--r--Documentation/Changes8
-rw-r--r--Documentation/CodingStyle7
-rw-r--r--Documentation/filesystems/adfs.txt18
-rw-r--r--Documentation/i2c/busses/i2c-diolan-u2c26
-rw-r--r--Documentation/kernel-parameters.txt16
-rw-r--r--Documentation/misc-devices/spear-pcie-gadget.txt130
-rw-r--r--Documentation/vm/page-types.c105
-rw-r--r--Documentation/x86/x86_64/boot-options.txt5
-rw-r--r--MAINTAINERS87
-rw-r--r--arch/alpha/include/asm/types.h1
-rw-r--r--arch/arm/include/asm/types.h3
-rw-r--r--arch/arm/mach-mmp/include/mach/mmp2.h2
-rw-r--r--arch/arm/mach-mmp/include/mach/pxa168.h2
-rw-r--r--arch/arm/mach-mmp/include/mach/pxa910.h2
-rw-r--r--arch/arm/mach-mxs/include/mach/dma.h26
-rw-r--r--arch/arm/mach-pxa/balloon3.c3
-rw-r--r--arch/arm/mach-pxa/cm-x300.c2
-rw-r--r--arch/arm/mach-pxa/colibri-evalboard.c3
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270-income.c3
-rw-r--r--arch/arm/mach-pxa/corgi.c2
-rw-r--r--arch/arm/mach-pxa/csb726.c2
-rw-r--r--arch/arm/mach-pxa/devices.c2
-rw-r--r--arch/arm/mach-pxa/em-x270.c2
-rw-r--r--arch/arm/mach-pxa/ezx.c2
-rw-r--r--arch/arm/mach-pxa/hx4700.c2
-rw-r--r--arch/arm/mach-pxa/littleton.c2
-rw-r--r--arch/arm/mach-pxa/magician.c2
-rw-r--r--arch/arm/mach-pxa/mainstone.c2
-rw-r--r--arch/arm/mach-pxa/mioa701.c2
-rw-r--r--arch/arm/mach-pxa/mxm8x10.c2
-rw-r--r--arch/arm/mach-pxa/palm27x.c3
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c2
-rw-r--r--arch/arm/mach-pxa/poodle.c2
-rw-r--r--arch/arm/mach-pxa/pxa27x.c3
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c2
-rw-r--r--arch/arm/mach-pxa/pxa95x.c2
-rw-r--r--arch/arm/mach-pxa/raumfeld.c2
-rw-r--r--arch/arm/mach-pxa/saar.c2
-rw-r--r--arch/arm/mach-pxa/saarb.c3
-rw-r--r--arch/arm/mach-pxa/spitz.c3
-rw-r--r--arch/arm/mach-pxa/stargate2.c2
-rw-r--r--arch/arm/mach-pxa/tavorevb3.c3
-rw-r--r--arch/arm/mach-pxa/tosa.c2
-rw-r--r--arch/arm/mach-pxa/trizeps4.c2
-rw-r--r--arch/arm/mach-pxa/viper.c2
-rw-r--r--arch/arm/mach-pxa/vpac270.c3
-rw-r--r--arch/arm/mach-pxa/xcep.c3
-rw-r--r--arch/arm/mach-pxa/z2.c3
-rw-r--r--arch/arm/mach-pxa/zeus.c3
-rw-r--r--arch/arm/mach-pxa/zylonite_pxa300.c2
-rw-r--r--arch/arm/mach-s3c2410/h1940-bluetooth.c11
-rw-r--r--arch/arm/mach-s3c2410/include/mach/h1940.h3
-rw-r--r--arch/arm/mach-s3c2410/mach-h1940.c303
-rw-r--r--arch/arm/mach-s3c2440/mach-mini2440.c7
-rw-r--r--arch/arm/mach-s3c2440/mach-rx1950.c74
-rw-r--r--arch/arm/plat-nomadik/include/plat/ste_dma40.h22
-rw-r--r--arch/avr32/include/asm/types.h8
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c15
-rw-r--r--arch/cris/include/asm/thread_info.h2
-rw-r--r--arch/cris/include/asm/types.h3
-rw-r--r--arch/frv/include/asm/processor.h2
-rw-r--r--arch/frv/include/asm/thread_info.h13
-rw-r--r--arch/frv/include/asm/types.h8
-rw-r--r--arch/frv/kernel/process.c5
-rw-r--r--arch/h8300/include/asm/types.h4
-rw-r--r--arch/ia64/include/asm/thread_info.h14
-rw-r--r--arch/ia64/include/asm/types.h3
-rw-r--r--arch/m32r/include/asm/thread_info.h13
-rw-r--r--arch/m32r/include/asm/types.h3
-rw-r--r--arch/m68k/include/asm/types.h3
-rw-r--r--arch/mips/include/asm/thread_info.h6
-rw-r--r--arch/mips/include/asm/types.h6
-rw-r--r--arch/mn10300/include/asm/thread_info.h6
-rw-r--r--arch/mn10300/include/asm/types.h7
-rw-r--r--arch/parisc/include/asm/types.h3
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/types.h5
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/s390/include/asm/types.h6
-rw-r--r--arch/score/include/asm/thread_info.h2
-rw-r--r--arch/sh/include/asm/thread_info.h2
-rw-r--r--arch/sh/kernel/process.c16
-rw-r--r--arch/sparc/include/asm/thread_info_32.h6
-rw-r--r--arch/sparc/include/asm/thread_info_64.h24
-rw-r--r--arch/sparc/include/asm/types.h4
-rw-r--r--arch/sparc/mm/srmmu.c4
-rw-r--r--arch/sparc/mm/sun4c.c4
-rw-r--r--arch/tile/include/asm/thread_info.h2
-rw-r--r--arch/tile/kernel/process.c4
-rw-r--r--arch/um/drivers/line.c4
-rw-r--r--arch/um/include/asm/processor-generic.h2
-rw-r--r--arch/um/include/shared/line.h4
-rw-r--r--arch/um/sys-i386/asm/elf.h2
-rw-r--r--arch/um/sys-x86_64/asm/elf.h2
-rw-r--r--arch/x86/Kconfig12
-rw-r--r--arch/x86/include/asm/dma.h6
-rw-r--r--arch/x86/include/asm/thread_info.h10
-rw-r--r--arch/x86/include/asm/types.h8
-rw-r--r--arch/x86/kernel/Makefile3
-rw-r--r--arch/x86/kernel/dumpstack.c10
-rw-r--r--arch/x86/kernel/head64.c3
-rw-r--r--arch/x86/kernel/setup.c25
-rw-r--r--arch/x86/mm/init_64.c11
-rw-r--r--arch/x86/xen/mmu.c21
-rw-r--r--arch/xtensa/include/asm/types.h4
-rw-r--r--crypto/deflate.c3
-rw-r--r--crypto/zlib.c18
-rw-r--r--drivers/acpi/video.c16
-rw-r--r--drivers/block/rbd.c361
-rw-r--r--drivers/dca/dca-core.c6
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dmatest.c14
-rw-r--r--drivers/dma/dw_dmac.c103
-rw-r--r--drivers/dma/dw_dmac_regs.h12
-rw-r--r--drivers/dma/fsldma.c551
-rw-r--r--drivers/dma/fsldma.h6
-rw-r--r--drivers/dma/mxs-dma.c724
-rw-r--r--drivers/dma/pch_dma.c35
-rw-r--r--drivers/dma/ste_dma40.c1402
-rw-r--r--drivers/dma/ste_dma40_ll.c218
-rw-r--r--drivers/dma/ste_dma40_ll.h66
-rw-r--r--drivers/firmware/Kconfig12
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/sigma.c115
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c6
-rw-r--r--drivers/gpu/drm/radeon/Kconfig1
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c257
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h6
-rw-r--r--drivers/hid/hid-picolcd.c1
-rw-r--r--drivers/i2c/busses/Kconfig32
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c535
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c161
-rw-r--r--drivers/i2c/busses/i2c-mxs.c4
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c176
-rw-r--r--drivers/i2c/busses/i2c-pxa.c115
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-bd2802.c47
-rw-r--r--drivers/leds/leds-lm3530.c378
-rw-r--r--drivers/leds/leds-lp5521.c14
-rw-r--r--drivers/leds/leds-lp5523.c20
-rw-r--r--drivers/leds/leds-net5501.c2
-rw-r--r--drivers/macintosh/via-pmu-backlight.c1
-rw-r--r--drivers/misc/Kconfig10
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/apds9802als.c17
-rw-r--r--drivers/misc/atmel_tclib.c4
-rw-r--r--drivers/misc/bh1780gli.c19
-rw-r--r--drivers/misc/bmp085.c6
-rw-r--r--drivers/misc/ep93xx_pwm.c6
-rw-r--r--drivers/misc/hmc6352.c4
-rw-r--r--drivers/misc/pch_phub.c1
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c908
-rw-r--r--drivers/mmc/host/omap.c6
-rw-r--r--drivers/mmc/host/omap_hsmmc.c7
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/c_can/c_can.c6
-rw-r--r--drivers/net/ftmac100.c2
-rw-r--r--drivers/net/gianfar.c16
-rw-r--r--drivers/net/gianfar.h1
-rw-r--r--drivers/net/macvlan.c18
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/ppp_deflate.c2
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/platform/x86/acer-wmi.c1
-rw-r--r--drivers/platform/x86/asus-laptop.c1
-rw-r--r--drivers/platform/x86/asus_acpi.c1
-rw-r--r--drivers/platform/x86/classmate-laptop.c1
-rw-r--r--drivers/platform/x86/compal-laptop.c1
-rw-r--r--drivers/platform/x86/dell-laptop.c1
-rw-r--r--drivers/platform/x86/eeepc-laptop.c1
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c1
-rw-r--r--drivers/platform/x86/msi-laptop.c1
-rw-r--r--drivers/platform/x86/msi-wmi.c1
-rw-r--r--drivers/platform/x86/panasonic-laptop.c1
-rw-r--r--drivers/platform/x86/sony-laptop.c3
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c1
-rw-r--r--drivers/platform/x86/toshiba_acpi.c1
-rw-r--r--drivers/pnp/base.h2
-rw-r--r--drivers/pnp/manager.c7
-rw-r--r--drivers/pnp/resource.c7
-rw-r--r--drivers/pps/generators/pps_gen_parport.c5
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ds1374.c19
-rw-r--r--drivers/rtc/rtc-ds1511.c2
-rw-r--r--drivers/rtc/rtc-isl1208.c176
-rw-r--r--drivers/rtc/rtc-tegra.c488
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c1
-rw-r--r--drivers/staging/samsung-laptop/samsung-laptop.c1
-rw-r--r--drivers/tty/tty_buffer.c14
-rw-r--r--drivers/tty/tty_ldisc.c14
-rw-r--r--drivers/usb/misc/appledisplay.c1
-rw-r--r--drivers/vhost/net.c159
-rw-r--r--drivers/vhost/vhost.c55
-rw-r--r--drivers/video/atmel_lcdfb.c1
-rw-r--r--drivers/video/aty/aty128fb.c1
-rw-r--r--drivers/video/aty/atyfb_base.c1
-rw-r--r--drivers/video/aty/radeon_backlight.c1
-rw-r--r--drivers/video/backlight/88pm860x_bl.c1
-rw-r--r--drivers/video/backlight/Kconfig18
-rw-r--r--drivers/video/backlight/Makefile3
-rw-r--r--drivers/video/backlight/adp5520_bl.c1
-rw-r--r--drivers/video/backlight/adp8860_bl.c1
-rw-r--r--drivers/video/backlight/adx_bl.c1
-rw-r--r--drivers/video/backlight/apple_bl.c241
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c1
-rw-r--r--drivers/video/backlight/backlight.c24
-rw-r--r--drivers/video/backlight/corgi_lcd.c1
-rw-r--r--drivers/video/backlight/cr_bllcd.c1
-rw-r--r--drivers/video/backlight/da903x_bl.c1
-rw-r--r--drivers/video/backlight/ep93xx_bl.c1
-rw-r--r--drivers/video/backlight/generic_bl.c1
-rw-r--r--drivers/video/backlight/hp680_bl.c1
-rw-r--r--drivers/video/backlight/jornada720_bl.c5
-rw-r--r--drivers/video/backlight/jornada720_lcd.c4
-rw-r--r--drivers/video/backlight/kb3886_bl.c1
-rw-r--r--drivers/video/backlight/ld9040.c819
-rw-r--r--drivers/video/backlight/ld9040_gamma.h200
-rw-r--r--drivers/video/backlight/locomolcd.c1
-rw-r--r--drivers/video/backlight/max8925_bl.c1
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c400
-rw-r--r--drivers/video/backlight/omap1_bl.c1
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c1
-rw-r--r--drivers/video/backlight/progear_bl.c1
-rw-r--r--drivers/video/backlight/pwm_bl.c12
-rw-r--r--drivers/video/backlight/s6e63m0.c1
-rw-r--r--drivers/video/backlight/tosa_bl.c1
-rw-r--r--drivers/video/backlight/wm831x_bl.c1
-rw-r--r--drivers/video/bf54x-lq043fb.c1
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c1
-rw-r--r--drivers/video/imxfb.c1
-rw-r--r--drivers/video/nvidia/nv_backlight.c1
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c1
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c1
-rw-r--r--drivers/video/omap2/displays/panel-taal.c2
-rw-r--r--drivers/video/riva/fbdev.c1
-rw-r--r--drivers/video/via/viafbdev.h3
-rw-r--r--fs/9p/acl.c4
-rw-r--r--fs/9p/fid.c19
-rw-r--r--fs/9p/v9fs.h6
-rw-r--r--fs/9p/vfs_file.c13
-rw-r--r--fs/9p/vfs_inode.c29
-rw-r--r--fs/9p/vfs_inode_dotl.c22
-rw-r--r--fs/9p/vfs_super.c2
-rw-r--r--fs/adfs/adfs.h25
-rw-r--r--fs/adfs/dir_f.c23
-rw-r--r--fs/adfs/dir_fplus.c119
-rw-r--r--fs/adfs/inode.c62
-rw-r--r--fs/adfs/super.c23
-rw-r--r--fs/aio.c4
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/bio.c2
-rw-r--r--fs/btrfs/zlib.c3
-rw-r--r--fs/ceph/debugfs.c6
-rw-r--r--fs/ceph/dir.c24
-rw-r--r--fs/ceph/file.c10
-rw-r--r--fs/ceph/inode.c25
-rw-r--r--fs/ceph/super.c9
-rw-r--r--fs/ceph/super.h66
-rw-r--r--fs/coda/sysctl.c8
-rw-r--r--fs/devpts/inode.c2
-rw-r--r--fs/eventpoll.c52
-rw-r--r--fs/fuse/cuse.c12
-rw-r--r--fs/fuse/dev.c27
-rw-r--r--fs/fuse/dir.c38
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/fuse/fuse_i.h1
-rw-r--r--fs/hugetlbfs/inode.c3
-rw-r--r--fs/jffs2/compr_zlib.c7
-rw-r--r--fs/logfs/compr.c2
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/proc/task_mmu.c108
-rw-r--r--include/asm-generic/types.h27
-rw-r--r--include/asm-generic/unistd.h2
-rw-r--r--include/linux/backlight.h9
-rw-r--r--include/linux/ceph/ceph_fs.h19
-rw-r--r--include/linux/ceph/libceph.h1
-rw-r--r--include/linux/ceph/osd_client.h57
-rw-r--r--include/linux/ceph/rados.h39
-rw-r--r--include/linux/compaction.h9
-rw-r--r--include/linux/compiler-gcc.h8
-rw-r--r--include/linux/compiler-gcc3.h8
-rw-r--r--include/linux/compiler-gcc4.h8
-rw-r--r--include/linux/crc32.h2
-rw-r--r--include/linux/dw_dmac.h44
-rw-r--r--include/linux/err.h8
-rw-r--r--include/linux/ethtool.h34
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/i2c/pxa-i2c.h (renamed from arch/arm/plat-pxa/include/plat/i2c.h)0
-rw-r--r--include/linux/if_ppp.h16
-rw-r--r--include/linux/kbd_kern.h2
-rw-r--r--include/linux/kernel.h70
-rw-r--r--include/linux/kthread.h14
-rw-r--r--include/linux/led-lm3530.h107
-rw-r--r--include/linux/leds.h4
-rw-r--r--include/linux/memcontrol.h10
-rw-r--r--include/linux/mm.h14
-rw-r--r--include/linux/mm_types.h8
-rw-r--r--include/linux/netfilter/ipset/ip_set_getport.h10
-rw-r--r--include/linux/netfilter/nfnetlink_log.h4
-rw-r--r--include/linux/netfilter/nfnetlink_queue.h4
-rw-r--r--include/linux/netfilter/xt_connbytes.h4
-rw-r--r--include/linux/netfilter/xt_quota.h2
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/pagemap.h5
-rw-r--r--include/linux/pwm_backlight.h3
-rw-r--r--include/linux/rmap.h45
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/sigma.h60
-rw-r--r--include/linux/slab.h1
-rw-r--r--include/linux/slub_def.h9
-rw-r--r--include/linux/smp.h3
-rw-r--r--include/linux/swap.h10
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/types.h6
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--include/linux/zlib.h11
-rw-r--r--include/net/ip_vs.h2
-rw-r--r--include/net/snmp.h4
-rw-r--r--include/net/xfrm.h1
-rw-r--r--init/calibrate.c102
-rw-r--r--init/do_mounts.c3
-rw-r--r--init/do_mounts_rd.c2
-rw-r--r--init/main.c90
-rw-r--r--kernel/cgroup.c14
-rw-r--r--kernel/cpu.c11
-rw-r--r--kernel/fork.c143
-rw-r--r--kernel/kallsyms.c10
-rw-r--r--kernel/kthread.c31
-rw-r--r--kernel/module.c4
-rw-r--r--kernel/panic.c10
-rw-r--r--kernel/printk.c36
-rw-r--r--kernel/smp.c81
-rw-r--r--kernel/softirq.c5
-rw-r--r--kernel/stop_machine.c6
-rw-r--r--kernel/watchdog.c27
-rw-r--r--kernel/workqueue.c6
-rw-r--r--lib/Kconfig.debug31
-rw-r--r--lib/Makefile2
-rw-r--r--lib/kstrtox.c227
-rw-r--r--lib/show_mem.c9
-rw-r--r--lib/test-kstrtox.c739
-rw-r--r--lib/vsprintf.c157
-rw-r--r--lib/zlib_deflate/deflate.c31
-rw-r--r--lib/zlib_deflate/defutil.h17
-rw-r--r--mm/Kconfig.debug25
-rw-r--r--mm/compaction.c65
-rw-r--r--mm/filemap.c127
-rw-r--r--mm/huge_memory.c20
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/ksm.c23
-rw-r--r--mm/memblock.c241
-rw-r--r--mm/memcontrol.c35
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c5
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/migrate.c54
-rw-r--r--mm/oom_kill.c52
-rw-r--r--mm/page-writeback.c15
-rw-r--r--mm/page_alloc.c81
-rw-r--r--mm/page_cgroup.c7
-rw-r--r--mm/pagewalk.c24
-rw-r--r--mm/rmap.c80
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c57
-rw-r--r--mm/slob.c6
-rw-r--r--mm/slub.c372
-rw-r--r--mm/swap.c189
-rw-r--r--mm/swapfile.c372
-rw-r--r--mm/truncate.c22
-rw-r--r--mm/vmalloc.c158
-rw-r--r--mm/vmscan.c36
-rw-r--r--mm/vmstat.c9
-rw-r--r--net/8021q/vlan_dev.c1
-rw-r--r--net/9p/client.c10
-rw-r--r--net/9p/protocol.c6
-rw-r--r--net/9p/trans_common.c10
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/9p/trans_rdma.c6
-rw-r--r--net/9p/trans_virtio.c72
-rw-r--r--net/9p/util.c2
-rw-r--r--net/appletalk/ddp.c3
-rw-r--r--net/bridge/br_netfilter.c3
-rw-r--r--net/ceph/armor.c4
-rw-r--r--net/ceph/ceph_common.c1
-rw-r--r--net/ceph/osd_client.c624
-rw-r--r--net/core/drop_monitor.c2
-rw-r--r--net/core/ethtool.c3
-rw-r--r--net/core/pktgen.c5
-rw-r--r--net/econet/af_econet.c2
-rw-r--r--net/ipv4/netfilter/ip_tables.c4
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c5
-rw-r--r--net/ipv6/netfilter/ip6_tables.c4
-rw-r--r--net/ipv6/sysctl_net_ipv6.c18
-rw-r--r--net/ipx/af_ipx.c2
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/netfilter/ipset/ip_set_core.c22
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c34
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c34
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c34
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c30
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c23
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/socket.c114
-rw-r--r--net/xfrm/xfrm_state.c15
-rw-r--r--net/xfrm/xfrm_user.c2
-rwxr-xr-xscripts/bloat-o-meter4
-rwxr-xr-xscripts/checkpatch.pl10
-rwxr-xr-xscripts/get_maintainer.pl11
-rw-r--r--sound/pci/hda/patch_realtek.c27
-rw-r--r--sound/pci/hda/patch_via.c58
-rw-r--r--sound/soc/codecs/sgtl5000.c14
-rw-r--r--sound/soc/codecs/uda134x.c3
-rw-r--r--sound/soc/samsung/s3c24xx_uda134x.c3
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--sound/sound_firmware.c2
-rw-r--r--sound/usb/card.c22
429 files changed, 12890 insertions, 4343 deletions
diff --git a/Documentation/ABI/stable/sysfs-class-backlight b/Documentation/ABI/stable/sysfs-class-backlight
index 4d637e1c4ff7..70302f370e7e 100644
--- a/Documentation/ABI/stable/sysfs-class-backlight
+++ b/Documentation/ABI/stable/sysfs-class-backlight
@@ -34,3 +34,23 @@ Contact: Richard Purdie <rpurdie@rpsys.net>
34Description: 34Description:
35 Maximum brightness for <backlight>. 35 Maximum brightness for <backlight>.
36Users: HAL 36Users: HAL
37
38What: /sys/class/backlight/<backlight>/type
39Date: September 2010
40KernelVersion: 2.6.37
41Contact: Matthew Garrett <mjg@redhat.com>
42Description:
43 The type of interface controlled by <backlight>.
44 "firmware": The driver uses a standard firmware interface
45 "platform": The driver uses a platform-specific interface
46 "raw": The driver controls hardware registers directly
47
48 In the general case, when multiple backlight
49 interfaces are available for a single device, firmware
50 control should be preferred to platform control should
51 be preferred to raw control. Using a firmware
52 interface reduces the probability of confusion with
53 the hardware and the OS independently updating the
54 backlight state. Platform interfaces are mostly a
55 holdover from pre-standardisation of firmware
56 interfaces.
diff --git a/Documentation/ABI/testing/configfs-spear-pcie-gadget b/Documentation/ABI/testing/configfs-spear-pcie-gadget
new file mode 100644
index 000000000000..875988146a63
--- /dev/null
+++ b/Documentation/ABI/testing/configfs-spear-pcie-gadget
@@ -0,0 +1,31 @@
1What: /config/pcie-gadget
2Date: Feb 2011
3KernelVersion: 2.6.37
4Contact: Pratyush Anand <pratyush.anand@st.com>
5Description:
6
7 Interface is used to configure selected dual mode PCIe controller
8 as device and then program its various registers to configure it
9 as a particular device type.
10 This interfaces can be used to show spear's PCIe device capability.
11
12 Nodes are only visible when configfs is mounted. To mount configfs
13 in /config directory use:
14 # mount -t configfs none /config/
15
16 For nth PCIe Device Controller
17 /config/pcie-gadget.n/
18 link ... used to enable ltssm and read its status.
19 int_type ...used to configure and read type of supported
20 interrupt
21 no_of_msi ... used to configure number of MSI vector needed and
22 to read no of MSI granted.
23 inta ... write 1 to assert INTA and 0 to de-assert.
24 send_msi ... write MSI vector to be sent.
25 vendor_id ... used to write and read vendor id (hex)
26 device_id ... used to write and read device id (hex)
27 bar0_size ... used to write and read bar0_size
28 bar0_address ... used to write and read bar0 mapped area in hex.
29 bar0_rw_offset ... used to write and read offset of bar0 where
30 bar0_data will be written or read.
31 bar0_data ... used to write and read data at bar0_rw_offset.
diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd
index 90a87e2a572b..fa72ccb2282e 100644
--- a/Documentation/ABI/testing/sysfs-bus-rbd
+++ b/Documentation/ABI/testing/sysfs-bus-rbd
@@ -1,6 +1,6 @@
1What: /sys/bus/rbd/ 1What: /sys/bus/rbd/
2Date: November 2010 2Date: November 2010
3Contact: Yehuda Sadeh <yehuda@hq.newdream.net>, 3Contact: Yehuda Sadeh <yehuda@newdream.net>,
4 Sage Weil <sage@newdream.net> 4 Sage Weil <sage@newdream.net>
5Description: 5Description:
6 6
diff --git a/Documentation/Changes b/Documentation/Changes
index 4fb88f15f2ef..5f4828a034e3 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -35,7 +35,7 @@ o util-linux 2.10o # fdformat --version
35o module-init-tools 0.9.10 # depmod -V 35o module-init-tools 0.9.10 # depmod -V
36o e2fsprogs 1.41.4 # e2fsck -V 36o e2fsprogs 1.41.4 # e2fsck -V
37o jfsutils 1.1.3 # fsck.jfs -V 37o jfsutils 1.1.3 # fsck.jfs -V
38o reiserfsprogs 3.6.3 # reiserfsck -V 2>&1|grep reiserfsprogs 38o reiserfsprogs 3.6.3 # reiserfsck -V
39o xfsprogs 2.6.0 # xfs_db -V 39o xfsprogs 2.6.0 # xfs_db -V
40o squashfs-tools 4.0 # mksquashfs -version 40o squashfs-tools 4.0 # mksquashfs -version
41o btrfs-progs 0.18 # btrfsck 41o btrfs-progs 0.18 # btrfsck
@@ -46,9 +46,9 @@ o isdn4k-utils 3.1pre1 # isdnctrl 2>&1|grep version
46o nfs-utils 1.0.5 # showmount --version 46o nfs-utils 1.0.5 # showmount --version
47o procps 3.2.0 # ps --version 47o procps 3.2.0 # ps --version
48o oprofile 0.9 # oprofiled --version 48o oprofile 0.9 # oprofiled --version
49o udev 081 # udevinfo -V 49o udev 081 # udevd --version
50o grub 0.93 # grub --version 50o grub 0.93 # grub --version || grub-install --version
51o mcelog 0.6 51o mcelog 0.6 # mcelog --version
52o iptables 1.4.2 # iptables -V 52o iptables 1.4.2 # iptables -V
53 53
54 54
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index 1cd3478e5834..58b0bf917834 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -168,6 +168,13 @@ Do not unnecessarily use braces where a single statement will do.
168if (condition) 168if (condition)
169 action(); 169 action();
170 170
171and
172
173if (condition)
174 do_this();
175else
176 do_that();
177
171This does not apply if one branch of a conditional statement is a single 178This does not apply if one branch of a conditional statement is a single
172statement. Use braces in both branches. 179statement. Use braces in both branches.
173 180
diff --git a/Documentation/filesystems/adfs.txt b/Documentation/filesystems/adfs.txt
index 9e8811f92b84..5949766353f7 100644
--- a/Documentation/filesystems/adfs.txt
+++ b/Documentation/filesystems/adfs.txt
@@ -9,6 +9,9 @@ Mount options for ADFS
9 will be nnn. Default 0700. 9 will be nnn. Default 0700.
10 othmask=nnn The permission mask for ADFS 'other' permissions 10 othmask=nnn The permission mask for ADFS 'other' permissions
11 will be nnn. Default 0077. 11 will be nnn. Default 0077.
12 ftsuffix=n When ftsuffix=0, no file type suffix will be applied.
13 When ftsuffix=1, a hexadecimal suffix corresponding to
14 the RISC OS file type will be added. Default 0.
12 15
13Mapping of ADFS permissions to Linux permissions 16Mapping of ADFS permissions to Linux permissions
14------------------------------------------------ 17------------------------------------------------
@@ -55,3 +58,18 @@ Mapping of ADFS permissions to Linux permissions
55 58
56 You can therefore tailor the permission translation to whatever you 59 You can therefore tailor the permission translation to whatever you
57 desire the permissions should be under Linux. 60 desire the permissions should be under Linux.
61
62RISC OS file type suffix
63------------------------
64
65 RISC OS file types are stored in bits 19..8 of the file load address.
66
67 To enable non-RISC OS systems to be used to store files without losing
68 file type information, a file naming convention was devised (initially
69 for use with NFS) such that a hexadecimal suffix of the form ,xyz
70 denoted the file type: e.g. BasicFile,ffb is a BASIC (0xffb) file. This
71 naming convention is now also used by RISC OS emulators such as RPCEmu.
72
73 Mounting an ADFS disc with option ftsuffix=1 will cause appropriate file
74 type suffixes to be appended to file names read from a directory. If the
75 ftsuffix option is zero or omitted, no file type suffixes will be added.
diff --git a/Documentation/i2c/busses/i2c-diolan-u2c b/Documentation/i2c/busses/i2c-diolan-u2c
new file mode 100644
index 000000000000..30fe4bb9a069
--- /dev/null
+++ b/Documentation/i2c/busses/i2c-diolan-u2c
@@ -0,0 +1,26 @@
1Kernel driver i2c-diolan-u2c
2
3Supported adapters:
4 * Diolan U2C-12 I2C-USB adapter
5 Documentation:
6 http://www.diolan.com/i2c/u2c12.html
7
8Author: Guenter Roeck <guenter.roeck@ericsson.com>
9
10Description
11-----------
12
13This is the driver for the Diolan U2C-12 USB-I2C adapter.
14
15The Diolan U2C-12 I2C-USB Adapter provides a low cost solution to connect
16a computer to I2C slave devices using a USB interface. It also supports
17connectivity to SPI devices.
18
19This driver only supports the I2C interface of U2C-12. The driver does not use
20interrupts.
21
22
23Module parameters
24-----------------
25
26* frequency: I2C bus frequency
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index d18a9e12152a..c357a31411cd 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -872,6 +872,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
872 If specified, z/VM IUCV HVC accepts connections 872 If specified, z/VM IUCV HVC accepts connections
873 from listed z/VM user IDs only. 873 from listed z/VM user IDs only.
874 874
875 keep_bootcon [KNL]
876 Do not unregister boot console at start. This is only
877 useful for debugging when something happens in the window
878 between unregistering the boot console and initializing
879 the real console.
880
875 i2c_bus= [HW] Override the default board specific I2C bus speed 881 i2c_bus= [HW] Override the default board specific I2C bus speed
876 or register an additional I2C bus that is not 882 or register an additional I2C bus that is not
877 registered from board initialization code. 883 registered from board initialization code.
@@ -1597,11 +1603,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1597 Format: [state][,regs][,debounce][,die] 1603 Format: [state][,regs][,debounce][,die]
1598 1604
1599 nmi_watchdog= [KNL,BUGS=X86] Debugging features for SMP kernels 1605 nmi_watchdog= [KNL,BUGS=X86] Debugging features for SMP kernels
1600 Format: [panic,][num] 1606 Format: [panic,][nopanic,][num]
1601 Valid num: 0 1607 Valid num: 0
1602 0 - turn nmi_watchdog off 1608 0 - turn nmi_watchdog off
1603 When panic is specified, panic when an NMI watchdog 1609 When panic is specified, panic when an NMI watchdog
1604 timeout occurs. 1610 timeout occurs (or 'nopanic' to override the opposite
1611 default).
1605 This is useful when you use a panic=... timeout and 1612 This is useful when you use a panic=... timeout and
1606 need the box quickly up again. 1613 need the box quickly up again.
1607 1614
@@ -1825,6 +1832,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1825 perfmon on Intel CPUs instead of the 1832 perfmon on Intel CPUs instead of the
1826 CPU specific event set. 1833 CPU specific event set.
1827 1834
1835 oops=panic Always panic on oopses. Default is to just kill the process,
1836 but there is a small probability of deadlocking the machine.
1837 This will also cause panics on machine check exceptions.
1838 Useful together with panic=30 to trigger a reboot.
1839
1828 OSS [HW,OSS] 1840 OSS [HW,OSS]
1829 See Documentation/sound/oss/oss-parameters.txt 1841 See Documentation/sound/oss/oss-parameters.txt
1830 1842
diff --git a/Documentation/misc-devices/spear-pcie-gadget.txt b/Documentation/misc-devices/spear-pcie-gadget.txt
new file mode 100644
index 000000000000..02c13ef5e908
--- /dev/null
+++ b/Documentation/misc-devices/spear-pcie-gadget.txt
@@ -0,0 +1,130 @@
1Spear PCIe Gadget Driver:
2
3Author
4=============
5Pratyush Anand (pratyush.anand@st.com)
6
7Location
8============
9driver/misc/spear13xx_pcie_gadget.c
10
11Supported Chip:
12===================
13SPEAr1300
14SPEAr1310
15
16Menuconfig option:
17==========================
18Device Drivers
19 Misc devices
20 PCIe gadget support for SPEAr13XX platform
21purpose
22===========
23This driver has several nodes which can be read/written by configfs interface.
24Its main purpose is to configure selected dual mode PCIe controller as device
25and then program its various registers to configure it as a particular device
26type. This driver can be used to show spear's PCIe device capability.
27
28Description of different nodes:
29=================================
30
31read behavior of nodes:
32------------------------------
33link :gives ltssm status.
34int_type :type of supported interrupt
35no_of_msi :zero if MSI is not enabled by host. A positive value is the
36 number of MSI vector granted.
37vendor_id :returns programmed vendor id (hex)
38device_id :returns programmed device id(hex)
39bar0_size: :returns size of bar0 in hex.
40bar0_address :returns address of bar0 mapped area in hex.
41bar0_rw_offset :returns offset of bar0 for which bar0_data will return value.
42bar0_data :returns data at bar0_rw_offset.
43
44write behavior of nodes:
45------------------------------
46link :write UP to enable ltsmm DOWN to disable
47int_type :write interrupt type to be configured and (int_type could be
48 INTA, MSI or NO_INT). Select MSI only when you have programmed
49 no_of_msi node.
50no_of_msi :number of MSI vector needed.
51inta :write 1 to assert INTA and 0 to de-assert.
52send_msi :write MSI vector to be sent.
53vendor_id :write vendor id(hex) to be programmed.
54device_id :write device id(hex) to be programmed.
55bar0_size :write size of bar0 in hex. default bar0 size is 1000 (hex)
56 bytes.
57bar0_address :write address of bar0 mapped area in hex. (default mapping of
58 bar0 is SYSRAM1(E0800000). Always program bar size before bar
59 address. Kernel might modify bar size and address for alignment, so
60 read back bar size and address after writing to cross check.
61bar0_rw_offset :write offset of bar0 for which bar0_data will write value.
62bar0_data :write data to be written at bar0_rw_offset.
63
64Node programming example
65===========================
66Program all PCIe registers in such a way that when this device is connected
67to the PCIe host, then host sees this device as 1MB RAM.
68#mount -t configfs none /Config
69For nth PCIe Device Controller
70# cd /config/pcie_gadget.n/
71Now you have all the nodes in this directory.
72program vendor id as 0x104a
73# echo 104A >> vendor_id
74
75program device id as 0xCD80
76# echo CD80 >> device_id
77
78program BAR0 size as 1MB
79# echo 100000 >> bar0_size
80
81check for programmed bar0 size
82# cat bar0_size
83
84Program BAR0 Address as DDR (0x2100000). This is the physical address of
85memory, which is to be made visible to PCIe host. Similarly any other peripheral
86can also be made visible to PCIe host. E.g., if you program base address of UART
87as BAR0 address then when this device will be connected to a host, it will be
88visible as UART.
89# echo 2100000 >> bar0_address
90
91program interrupt type : INTA
92# echo INTA >> int_type
93
94go for link up now.
95# echo UP >> link
96
97It will have to be insured that, once link up is done on gadget, then only host
98is initialized and start to search PCIe devices on its port.
99
100/*wait till link is up*/
101# cat link
102wait till it returns UP.
103
104To assert INTA
105# echo 1 >> inta
106
107To de-assert INTA
108# echo 0 >> inta
109
110if MSI is to be used as interrupt, program no of msi vector needed (say4)
111# echo 4 >> no_of_msi
112
113select MSI as interrupt type
114# echo MSI >> int_type
115
116go for link up now
117# echo UP >> link
118
119wait till link is up
120# cat link
121An application can repetitively read this node till link is found UP. It can
122sleep between two read.
123
124wait till msi is enabled
125# cat no_of_msi
126Should return 4 (number of requested MSI vector)
127
128to send msi vector 2
129# echo 2 >> send_msi
130#cd -
diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c
index cc96ee2666f2..7445caa26d05 100644
--- a/Documentation/vm/page-types.c
+++ b/Documentation/vm/page-types.c
@@ -32,8 +32,20 @@
32#include <sys/types.h> 32#include <sys/types.h>
33#include <sys/errno.h> 33#include <sys/errno.h>
34#include <sys/fcntl.h> 34#include <sys/fcntl.h>
35#include <sys/mount.h>
36#include <sys/statfs.h>
37#include "../../include/linux/magic.h"
35 38
36 39
40#ifndef MAX_PATH
41# define MAX_PATH 256
42#endif
43
44#ifndef STR
45# define _STR(x) #x
46# define STR(x) _STR(x)
47#endif
48
37/* 49/*
38 * pagemap kernel ABI bits 50 * pagemap kernel ABI bits
39 */ 51 */
@@ -152,6 +164,12 @@ static const char *page_flag_names[] = {
152}; 164};
153 165
154 166
167static const char *debugfs_known_mountpoints[] = {
168 "/sys/kernel/debug",
169 "/debug",
170 0,
171};
172
155/* 173/*
156 * data structures 174 * data structures
157 */ 175 */
@@ -184,7 +202,7 @@ static int kpageflags_fd;
184static int opt_hwpoison; 202static int opt_hwpoison;
185static int opt_unpoison; 203static int opt_unpoison;
186 204
187static const char hwpoison_debug_fs[] = "/debug/hwpoison"; 205static char hwpoison_debug_fs[MAX_PATH+1];
188static int hwpoison_inject_fd; 206static int hwpoison_inject_fd;
189static int hwpoison_forget_fd; 207static int hwpoison_forget_fd;
190 208
@@ -464,21 +482,100 @@ static uint64_t kpageflags_flags(uint64_t flags)
464 return flags; 482 return flags;
465} 483}
466 484
485/* verify that a mountpoint is actually a debugfs instance */
486static int debugfs_valid_mountpoint(const char *debugfs)
487{
488 struct statfs st_fs;
489
490 if (statfs(debugfs, &st_fs) < 0)
491 return -ENOENT;
492 else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
493 return -ENOENT;
494
495 return 0;
496}
497
498/* find the path to the mounted debugfs */
499static const char *debugfs_find_mountpoint(void)
500{
501 const char **ptr;
502 char type[100];
503 FILE *fp;
504
505 ptr = debugfs_known_mountpoints;
506 while (*ptr) {
507 if (debugfs_valid_mountpoint(*ptr) == 0) {
508 strcpy(hwpoison_debug_fs, *ptr);
509 return hwpoison_debug_fs;
510 }
511 ptr++;
512 }
513
514 /* give up and parse /proc/mounts */
515 fp = fopen("/proc/mounts", "r");
516 if (fp == NULL)
517 perror("Can't open /proc/mounts for read");
518
519 while (fscanf(fp, "%*s %"
520 STR(MAX_PATH)
521 "s %99s %*s %*d %*d\n",
522 hwpoison_debug_fs, type) == 2) {
523 if (strcmp(type, "debugfs") == 0)
524 break;
525 }
526 fclose(fp);
527
528 if (strcmp(type, "debugfs") != 0)
529 return NULL;
530
531 return hwpoison_debug_fs;
532}
533
534/* mount the debugfs somewhere if it's not mounted */
535
536static void debugfs_mount(void)
537{
538 const char **ptr;
539
540 /* see if it's already mounted */
541 if (debugfs_find_mountpoint())
542 return;
543
544 ptr = debugfs_known_mountpoints;
545 while (*ptr) {
546 if (mount(NULL, *ptr, "debugfs", 0, NULL) == 0) {
547 /* save the mountpoint */
548 strcpy(hwpoison_debug_fs, *ptr);
549 break;
550 }
551 ptr++;
552 }
553
554 if (*ptr == NULL) {
555 perror("mount debugfs");
556 exit(EXIT_FAILURE);
557 }
558}
559
467/* 560/*
468 * page actions 561 * page actions
469 */ 562 */
470 563
471static void prepare_hwpoison_fd(void) 564static void prepare_hwpoison_fd(void)
472{ 565{
473 char buf[100]; 566 char buf[MAX_PATH + 1];
567
568 debugfs_mount();
474 569
475 if (opt_hwpoison && !hwpoison_inject_fd) { 570 if (opt_hwpoison && !hwpoison_inject_fd) {
476 sprintf(buf, "%s/corrupt-pfn", hwpoison_debug_fs); 571 snprintf(buf, MAX_PATH, "%s/hwpoison/corrupt-pfn",
572 hwpoison_debug_fs);
477 hwpoison_inject_fd = checked_open(buf, O_WRONLY); 573 hwpoison_inject_fd = checked_open(buf, O_WRONLY);
478 } 574 }
479 575
480 if (opt_unpoison && !hwpoison_forget_fd) { 576 if (opt_unpoison && !hwpoison_forget_fd) {
481 sprintf(buf, "%s/unpoison-pfn", hwpoison_debug_fs); 577 snprintf(buf, MAX_PATH, "%s/hwpoison/unpoison-pfn",
578 hwpoison_debug_fs);
482 hwpoison_forget_fd = checked_open(buf, O_WRONLY); 579 hwpoison_forget_fd = checked_open(buf, O_WRONLY);
483 } 580 }
484} 581}
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt
index 48c13b8ab90c..092e596a1301 100644
--- a/Documentation/x86/x86_64/boot-options.txt
+++ b/Documentation/x86/x86_64/boot-options.txt
@@ -293,11 +293,6 @@ IOMMU (input/output memory management unit)
293 293
294Debugging 294Debugging
295 295
296 oops=panic Always panic on oopses. Default is to just kill the process,
297 but there is a small probability of deadlocking the machine.
298 This will also cause panics on machine check exceptions.
299 Useful together with panic=30 to trigger a reboot.
300
301 kstack=N Print N words from the kernel stack in oops dumps. 296 kstack=N Print N words from the kernel stack in oops dumps.
302 297
303 pagefaulttrace Dump all page faults. Only useful for extreme debugging 298 pagefaulttrace Dump all page faults. Only useful for extreme debugging
diff --git a/MAINTAINERS b/MAINTAINERS
index 38077a656820..e11953dc8fa3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -72,7 +72,7 @@ Descriptions of section entries:
72 L: Mailing list that is relevant to this area 72 L: Mailing list that is relevant to this area
73 W: Web-page with status/info 73 W: Web-page with status/info
74 Q: Patchwork web based patch tracking system site 74 Q: Patchwork web based patch tracking system site
75 T: SCM tree type and location. Type is one of: git, hg, quilt, stgit. 75 T: SCM tree type and location. Type is one of: git, hg, quilt, stgit, topgit.
76 S: Status, one of the following: 76 S: Status, one of the following:
77 Supported: Someone is actually paid to look after this. 77 Supported: Someone is actually paid to look after this.
78 Maintained: Someone actually looks after it. 78 Maintained: Someone actually looks after it.
@@ -288,35 +288,35 @@ F: sound/pci/ad1889.*
288AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER 288AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER
289M: Michael Hennerich <michael.hennerich@analog.com> 289M: Michael Hennerich <michael.hennerich@analog.com>
290L: device-driver-devel@blackfin.uclinux.org 290L: device-driver-devel@blackfin.uclinux.org
291W: http://wiki-analog.com/AD5254 291W: http://wiki.analog.com/AD5254
292S: Supported 292S: Supported
293F: drivers/misc/ad525x_dpot.c 293F: drivers/misc/ad525x_dpot.c
294 294
295AD5398 CURRENT REGULATOR DRIVER (AD5398/AD5821) 295AD5398 CURRENT REGULATOR DRIVER (AD5398/AD5821)
296M: Michael Hennerich <michael.hennerich@analog.com> 296M: Michael Hennerich <michael.hennerich@analog.com>
297L: device-driver-devel@blackfin.uclinux.org 297L: device-driver-devel@blackfin.uclinux.org
298W: http://wiki-analog.com/AD5398 298W: http://wiki.analog.com/AD5398
299S: Supported 299S: Supported
300F: drivers/regulator/ad5398.c 300F: drivers/regulator/ad5398.c
301 301
302AD714X CAPACITANCE TOUCH SENSOR DRIVER (AD7142/3/7/8/7A) 302AD714X CAPACITANCE TOUCH SENSOR DRIVER (AD7142/3/7/8/7A)
303M: Michael Hennerich <michael.hennerich@analog.com> 303M: Michael Hennerich <michael.hennerich@analog.com>
304L: device-driver-devel@blackfin.uclinux.org 304L: device-driver-devel@blackfin.uclinux.org
305W: http://wiki-analog.com/AD7142 305W: http://wiki.analog.com/AD7142
306S: Supported 306S: Supported
307F: drivers/input/misc/ad714x.c 307F: drivers/input/misc/ad714x.c
308 308
309AD7877 TOUCHSCREEN DRIVER 309AD7877 TOUCHSCREEN DRIVER
310M: Michael Hennerich <michael.hennerich@analog.com> 310M: Michael Hennerich <michael.hennerich@analog.com>
311L: device-driver-devel@blackfin.uclinux.org 311L: device-driver-devel@blackfin.uclinux.org
312W: http://wiki-analog.com/AD7877 312W: http://wiki.analog.com/AD7877
313S: Supported 313S: Supported
314F: drivers/input/touchscreen/ad7877.c 314F: drivers/input/touchscreen/ad7877.c
315 315
316AD7879 TOUCHSCREEN DRIVER (AD7879/AD7889) 316AD7879 TOUCHSCREEN DRIVER (AD7879/AD7889)
317M: Michael Hennerich <michael.hennerich@analog.com> 317M: Michael Hennerich <michael.hennerich@analog.com>
318L: device-driver-devel@blackfin.uclinux.org 318L: device-driver-devel@blackfin.uclinux.org
319W: http://wiki-analog.com/AD7879 319W: http://wiki.analog.com/AD7879
320S: Supported 320S: Supported
321F: drivers/input/touchscreen/ad7879.c 321F: drivers/input/touchscreen/ad7879.c
322 322
@@ -342,18 +342,18 @@ F: drivers/net/wireless/adm8211.*
342ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501) 342ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501)
343M: Michael Hennerich <michael.hennerich@analog.com> 343M: Michael Hennerich <michael.hennerich@analog.com>
344L: device-driver-devel@blackfin.uclinux.org 344L: device-driver-devel@blackfin.uclinux.org
345W: http://wiki-analog.com/ADP5520 345W: http://wiki.analog.com/ADP5520
346S: Supported 346S: Supported
347F: drivers/mfd/adp5520.c 347F: drivers/mfd/adp5520.c
348F: drivers/video/backlight/adp5520_bl.c 348F: drivers/video/backlight/adp5520_bl.c
349F: drivers/led/leds-adp5520.c 349F: drivers/leds/leds-adp5520.c
350F: drivers/gpio/adp5520-gpio.c 350F: drivers/gpio/adp5520-gpio.c
351F: drivers/input/keyboard/adp5520-keys.c 351F: drivers/input/keyboard/adp5520-keys.c
352 352
353ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587) 353ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587)
354M: Michael Hennerich <michael.hennerich@analog.com> 354M: Michael Hennerich <michael.hennerich@analog.com>
355L: device-driver-devel@blackfin.uclinux.org 355L: device-driver-devel@blackfin.uclinux.org
356W: http://wiki-analog.com/ADP5588 356W: http://wiki.analog.com/ADP5588
357S: Supported 357S: Supported
358F: drivers/input/keyboard/adp5588-keys.c 358F: drivers/input/keyboard/adp5588-keys.c
359F: drivers/gpio/adp5588-gpio.c 359F: drivers/gpio/adp5588-gpio.c
@@ -361,7 +361,7 @@ F: drivers/gpio/adp5588-gpio.c
361ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863) 361ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863)
362M: Michael Hennerich <michael.hennerich@analog.com> 362M: Michael Hennerich <michael.hennerich@analog.com>
363L: device-driver-devel@blackfin.uclinux.org 363L: device-driver-devel@blackfin.uclinux.org
364W: http://wiki-analog.com/ADP8860 364W: http://wiki.analog.com/ADP8860
365S: Supported 365S: Supported
366F: drivers/video/backlight/adp8860_bl.c 366F: drivers/video/backlight/adp8860_bl.c
367 367
@@ -388,7 +388,7 @@ F: drivers/hwmon/adt7475.c
388ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346) 388ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346)
389M: Michael Hennerich <michael.hennerich@analog.com> 389M: Michael Hennerich <michael.hennerich@analog.com>
390L: device-driver-devel@blackfin.uclinux.org 390L: device-driver-devel@blackfin.uclinux.org
391W: http://wiki-analog.com/ADXL345 391W: http://wiki.analog.com/ADXL345
392S: Supported 392S: Supported
393F: drivers/input/misc/adxl34x.c 393F: drivers/input/misc/adxl34x.c
394 394
@@ -528,11 +528,9 @@ F: drivers/infiniband/hw/amso1100/
528ANALOG DEVICES INC ASOC CODEC DRIVERS 528ANALOG DEVICES INC ASOC CODEC DRIVERS
529L: device-driver-devel@blackfin.uclinux.org 529L: device-driver-devel@blackfin.uclinux.org
530L: alsa-devel@alsa-project.org (moderated for non-subscribers) 530L: alsa-devel@alsa-project.org (moderated for non-subscribers)
531W: http://wiki-analog.com/ 531W: http://wiki.analog.com/
532S: Supported 532S: Supported
533F: sound/soc/codecs/ad1* 533F: sound/soc/codecs/ad1*
534F: sound/soc/codecs/adau*
535F: sound/soc/codecs/adav*
536F: sound/soc/codecs/ssm* 534F: sound/soc/codecs/ssm*
537 535
538ANALOG DEVICES INC ASOC DRIVERS 536ANALOG DEVICES INC ASOC DRIVERS
@@ -697,8 +695,8 @@ S: Maintained
697ARM/CLKDEV SUPPORT 695ARM/CLKDEV SUPPORT
698M: Russell King <linux@arm.linux.org.uk> 696M: Russell King <linux@arm.linux.org.uk>
699L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 697L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
700F: arch/arm/common/clkdev.c
701F: arch/arm/include/asm/clkdev.h 698F: arch/arm/include/asm/clkdev.h
699F: drivers/clk/clkdev.c
702 700
703ARM/COMPULAB CM-X270/EM-X270 and CM-X300 MACHINE SUPPORT 701ARM/COMPULAB CM-X270/EM-X270 and CM-X300 MACHINE SUPPORT
704M: Mike Rapoport <mike@compulab.co.il> 702M: Mike Rapoport <mike@compulab.co.il>
@@ -919,6 +917,7 @@ F: drivers/mmc/host/msm_sdcc.c
919F: drivers/mmc/host/msm_sdcc.h 917F: drivers/mmc/host/msm_sdcc.h
920F: drivers/tty/serial/msm_serial.h 918F: drivers/tty/serial/msm_serial.h
921F: drivers/tty/serial/msm_serial.c 919F: drivers/tty/serial/msm_serial.c
920F: drivers/platform/msm/
922T: git git://codeaurora.org/quic/kernel/davidb/linux-msm.git 921T: git git://codeaurora.org/quic/kernel/davidb/linux-msm.git
923S: Maintained 922S: Maintained
924 923
@@ -1078,7 +1077,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1078S: Maintained 1077S: Maintained
1079 1078
1080ARM/TETON BGA MACHINE SUPPORT 1079ARM/TETON BGA MACHINE SUPPORT
1081M: Mark F. Brown <mark.brown314@gmail.com> 1080M: "Mark F. Brown" <mark.brown314@gmail.com>
1082L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1081L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1083S: Maintained 1082S: Maintained
1084 1083
@@ -1482,7 +1481,7 @@ F: drivers/mtd/devices/block2mtd.c
1482 1481
1483BLUETOOTH DRIVERS 1482BLUETOOTH DRIVERS
1484M: Marcel Holtmann <marcel@holtmann.org> 1483M: Marcel Holtmann <marcel@holtmann.org>
1485M: Gustavo F. Padovan <padovan@profusion.mobi> 1484M: "Gustavo F. Padovan" <padovan@profusion.mobi>
1486L: linux-bluetooth@vger.kernel.org 1485L: linux-bluetooth@vger.kernel.org
1487W: http://www.bluez.org/ 1486W: http://www.bluez.org/
1488T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth-2.6.git 1487T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth-2.6.git
@@ -1491,7 +1490,7 @@ F: drivers/bluetooth/
1491 1490
1492BLUETOOTH SUBSYSTEM 1491BLUETOOTH SUBSYSTEM
1493M: Marcel Holtmann <marcel@holtmann.org> 1492M: Marcel Holtmann <marcel@holtmann.org>
1494M: Gustavo F. Padovan <padovan@profusion.mobi> 1493M: "Gustavo F. Padovan" <padovan@profusion.mobi>
1495L: linux-bluetooth@vger.kernel.org 1494L: linux-bluetooth@vger.kernel.org
1496W: http://www.bluez.org/ 1495W: http://www.bluez.org/
1497T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth-2.6.git 1496T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth-2.6.git
@@ -2138,6 +2137,12 @@ F: Documentation/serial/digiepca.txt
2138F: drivers/char/epca* 2137F: drivers/char/epca*
2139F: drivers/char/digi* 2138F: drivers/char/digi*
2140 2139
2140DIOLAN U2C-12 I2C DRIVER
2141M: Guenter Roeck <guenter.roeck@ericsson.com>
2142L: linux-i2c@vger.kernel.org
2143S: Maintained
2144F: drivers/i2c/busses/i2c-diolan-u2c.c
2145
2141DIRECTORY NOTIFICATION (DNOTIFY) 2146DIRECTORY NOTIFICATION (DNOTIFY)
2142M: Eric Paris <eparis@parisplace.org> 2147M: Eric Paris <eparis@parisplace.org>
2143S: Maintained 2148S: Maintained
@@ -2475,8 +2480,7 @@ F: include/linux/cb710.h
2475ENE KB2426 (ENE0100/ENE020XX) INFRARED RECEIVER 2480ENE KB2426 (ENE0100/ENE020XX) INFRARED RECEIVER
2476M: Maxim Levitsky <maximlevitsky@gmail.com> 2481M: Maxim Levitsky <maximlevitsky@gmail.com>
2477S: Maintained 2482S: Maintained
2478F: drivers/media/IR/ene_ir.c 2483F: drivers/media/rc/ene_ir.*
2479F: drivers/media/IR/ene_ir.h
2480 2484
2481EPSON 1355 FRAMEBUFFER DRIVER 2485EPSON 1355 FRAMEBUFFER DRIVER
2482M: Christopher Hoover <ch@murgatroid.com> 2486M: Christopher Hoover <ch@murgatroid.com>
@@ -2820,7 +2824,6 @@ F: include/linux/gigaset_dev.h
2820 2824
2821GPIO SUBSYSTEM 2825GPIO SUBSYSTEM
2822M: Grant Likely <grant.likely@secretlab.ca> 2826M: Grant Likely <grant.likely@secretlab.ca>
2823L: linux-kernel@vger.kernel.org
2824S: Maintained 2827S: Maintained
2825T: git git://git.secretlab.ca/git/linux-2.6.git 2828T: git git://git.secretlab.ca/git/linux-2.6.git
2826F: Documentation/gpio/gpio.txt 2829F: Documentation/gpio/gpio.txt
@@ -2843,7 +2846,6 @@ F: drivers/platform/x86/hdaps.c
2843HWPOISON MEMORY FAILURE HANDLING 2846HWPOISON MEMORY FAILURE HANDLING
2844M: Andi Kleen <andi@firstfloor.org> 2847M: Andi Kleen <andi@firstfloor.org>
2845L: linux-mm@kvack.org 2848L: linux-mm@kvack.org
2846L: linux-kernel@vger.kernel.org
2847T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison 2849T: git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison
2848S: Maintained 2850S: Maintained
2849F: mm/memory-failure.c 2851F: mm/memory-failure.c
@@ -2944,7 +2946,7 @@ F: Documentation/blockdev/cpqarray.txt
2944F: drivers/block/cpqarray.* 2946F: drivers/block/cpqarray.*
2945 2947
2946HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa) 2948HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
2947M: Stephen M. Cameron <scameron@beardog.cce.hp.com> 2949M: "Stephen M. Cameron" <scameron@beardog.cce.hp.com>
2948L: iss_storagedev@hp.com 2950L: iss_storagedev@hp.com
2949S: Supported 2951S: Supported
2950F: Documentation/scsi/hpsa.txt 2952F: Documentation/scsi/hpsa.txt
@@ -3001,7 +3003,7 @@ F: kernel/hrtimer.c
3001F: kernel/time/clockevents.c 3003F: kernel/time/clockevents.c
3002F: kernel/time/tick*.* 3004F: kernel/time/tick*.*
3003F: kernel/time/timer_*.c 3005F: kernel/time/timer_*.c
3004F include/linux/clockevents.h 3006F: include/linux/clockevents.h
3005F: include/linux/hrtimer.h 3007F: include/linux/hrtimer.h
3006 3008
3007HIGH-SPEED SCC DRIVER FOR AX.25 3009HIGH-SPEED SCC DRIVER FOR AX.25
@@ -3174,15 +3176,6 @@ L: linux-pm@lists.linux-foundation.org
3174S: Supported 3176S: Supported
3175F: drivers/idle/i7300_idle.c 3177F: drivers/idle/i7300_idle.c
3176 3178
3177IEEE 1394 SUBSYSTEM
3178M: Stefan Richter <stefanr@s5r6.in-berlin.de>
3179L: linux1394-devel@lists.sourceforge.net
3180W: http://ieee1394.wiki.kernel.org/
3181T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
3182S: Obsolete
3183F: Documentation/debugging-via-ohci1394.txt
3184F: drivers/ieee1394/
3185
3186IEEE 802.15.4 SUBSYSTEM 3179IEEE 802.15.4 SUBSYSTEM
3187M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> 3180M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
3188M: Sergey Lapin <slapin@ossfans.org> 3181M: Sergey Lapin <slapin@ossfans.org>
@@ -4226,7 +4219,7 @@ F: Documentation/serial/moxa-smartio
4226F: drivers/char/mxser.* 4219F: drivers/char/mxser.*
4227 4220
4228MSI LAPTOP SUPPORT 4221MSI LAPTOP SUPPORT
4229M: Lee, Chun-Yi <jlee@novell.com> 4222M: "Lee, Chun-Yi" <jlee@novell.com>
4230L: platform-driver-x86@vger.kernel.org 4223L: platform-driver-x86@vger.kernel.org
4231S: Maintained 4224S: Maintained
4232F: drivers/platform/x86/msi-laptop.c 4225F: drivers/platform/x86/msi-laptop.c
@@ -4721,7 +4714,6 @@ F: drivers/i2c/busses/i2c-pasemi.c
4721 4714
4722PADATA PARALLEL EXECUTION MECHANISM 4715PADATA PARALLEL EXECUTION MECHANISM
4723M: Steffen Klassert <steffen.klassert@secunet.com> 4716M: Steffen Klassert <steffen.klassert@secunet.com>
4724L: linux-kernel@vger.kernel.org
4725L: linux-crypto@vger.kernel.org 4717L: linux-crypto@vger.kernel.org
4726S: Maintained 4718S: Maintained
4727F: kernel/padata.c 4719F: kernel/padata.c
@@ -4871,7 +4863,6 @@ F: include/crypto/pcrypt.h
4871PER-CPU MEMORY ALLOCATOR 4863PER-CPU MEMORY ALLOCATOR
4872M: Tejun Heo <tj@kernel.org> 4864M: Tejun Heo <tj@kernel.org>
4873M: Christoph Lameter <cl@linux-foundation.org> 4865M: Christoph Lameter <cl@linux-foundation.org>
4874L: linux-kernel@vger.kernel.org
4875T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git 4866T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
4876S: Maintained 4867S: Maintained
4877F: include/linux/percpu*.h 4868F: include/linux/percpu*.h
@@ -5430,7 +5421,6 @@ S: Supported
5430F: include/linux/clocksource.h 5421F: include/linux/clocksource.h
5431F: include/linux/time.h 5422F: include/linux/time.h
5432F: include/linux/timex.h 5423F: include/linux/timex.h
5433F: include/linux/timekeeping.h
5434F: kernel/time/clocksource.c 5424F: kernel/time/clocksource.c
5435F: kernel/time/time*.c 5425F: kernel/time/time*.c
5436F: kernel/time/ntp.c 5426F: kernel/time/ntp.c
@@ -5518,7 +5508,7 @@ SCx200 CPU SUPPORT
5518M: Jim Cromie <jim.cromie@gmail.com> 5508M: Jim Cromie <jim.cromie@gmail.com>
5519S: Odd Fixes 5509S: Odd Fixes
5520F: Documentation/i2c/busses/scx200_acb 5510F: Documentation/i2c/busses/scx200_acb
5521F: arch/x86/kernel/scx200_32.c 5511F: arch/x86/platform/scx200/
5522F: drivers/watchdog/scx200_wdt.c 5512F: drivers/watchdog/scx200_wdt.c
5523F: drivers/i2c/busses/scx200* 5513F: drivers/i2c/busses/scx200*
5524F: drivers/mtd/maps/scx200_docflash.c 5514F: drivers/mtd/maps/scx200_docflash.c
@@ -5662,24 +5652,13 @@ M: Robin Holt <holt@sgi.com>
5662S: Maintained 5652S: Maintained
5663F: drivers/misc/sgi-xp/ 5653F: drivers/misc/sgi-xp/
5664 5654
5665SHARP LH SUPPORT (LH7952X & LH7A40X)
5666M: Marc Singer <elf@buici.com>
5667W: http://projects.buici.com/arm
5668L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
5669S: Maintained
5670F: Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen
5671F: arch/arm/mach-lh7a40x/
5672F: drivers/tty/serial/serial_lh7a40x.c
5673F: drivers/usb/gadget/lh7a40*
5674F: drivers/usb/host/ohci-lh7a40*
5675
5676SIMPLE FIRMWARE INTERFACE (SFI) 5655SIMPLE FIRMWARE INTERFACE (SFI)
5677M: Len Brown <lenb@kernel.org> 5656M: Len Brown <lenb@kernel.org>
5678L: sfi-devel@simplefirmware.org 5657L: sfi-devel@simplefirmware.org
5679W: http://simplefirmware.org/ 5658W: http://simplefirmware.org/
5680T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-sfi-2.6.git 5659T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-sfi-2.6.git
5681S: Supported 5660S: Supported
5682F: arch/x86/kernel/*sfi* 5661F: arch/x86/platform/sfi/
5683F: drivers/sfi/ 5662F: drivers/sfi/
5684F: include/linux/sfi*.h 5663F: include/linux/sfi*.h
5685 5664
@@ -6487,12 +6466,11 @@ S: Maintained
6487F: drivers/net/usb/rtl8150.c 6466F: drivers/net/usb/rtl8150.c
6488 6467
6489USB SE401 DRIVER 6468USB SE401 DRIVER
6490M: Jeroen Vreeken <pe1rxq@amsat.org>
6491L: linux-usb@vger.kernel.org 6469L: linux-usb@vger.kernel.org
6492W: http://www.chello.nl/~j.vreeken/se401/ 6470W: http://www.chello.nl/~j.vreeken/se401/
6493S: Maintained 6471S: Orphan
6494F: Documentation/video4linux/se401.txt 6472F: Documentation/video4linux/se401.txt
6495F: drivers/media/video/se401.* 6473F: drivers/staging/se401/
6496 6474
6497USB SERIAL BELKIN F5U103 DRIVER 6475USB SERIAL BELKIN F5U103 DRIVER
6498M: William Greathouse <wgreathouse@smva.com> 6476M: William Greathouse <wgreathouse@smva.com>
@@ -6842,7 +6820,7 @@ F: drivers/scsi/wd7000.c
6842WINBOND CIR DRIVER 6820WINBOND CIR DRIVER
6843M: David Härdeman <david@hardeman.nu> 6821M: David Härdeman <david@hardeman.nu>
6844S: Maintained 6822S: Maintained
6845F: drivers/input/misc/winbond-cir.c 6823F: drivers/media/rc/winbond-cir.c
6846 6824
6847WIMAX STACK 6825WIMAX STACK
6848M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> 6826M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
@@ -6919,7 +6897,6 @@ F: sound/soc/codecs/wm*
6919 6897
6920WORKQUEUE 6898WORKQUEUE
6921M: Tejun Heo <tj@kernel.org> 6899M: Tejun Heo <tj@kernel.org>
6922L: linux-kernel@vger.kernel.org
6923T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git 6900T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git
6924S: Maintained 6901S: Maintained
6925F: include/linux/workqueue.h 6902F: include/linux/workqueue.h
diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h
index bd621ecd1eb3..e46e50382d28 100644
--- a/arch/alpha/include/asm/types.h
+++ b/arch/alpha/include/asm/types.h
@@ -27,7 +27,6 @@ typedef unsigned int umode_t;
27#ifdef __KERNEL__ 27#ifdef __KERNEL__
28#ifndef __ASSEMBLY__ 28#ifndef __ASSEMBLY__
29 29
30typedef u64 dma_addr_t;
31typedef u64 dma64_addr_t; 30typedef u64 dma64_addr_t;
32 31
33#endif /* __ASSEMBLY__ */ 32#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h
index 345df01534a4..c684e3769f47 100644
--- a/arch/arm/include/asm/types.h
+++ b/arch/arm/include/asm/types.h
@@ -18,9 +18,6 @@ typedef unsigned short umode_t;
18 18
19#ifndef __ASSEMBLY__ 19#ifndef __ASSEMBLY__
20 20
21/* Dma addresses are 32-bits wide. */
22
23typedef u32 dma_addr_t;
24typedef u32 dma64_addr_t; 21typedef u32 dma64_addr_t;
25 22
26#endif /* __ASSEMBLY__ */ 23#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/mach-mmp/include/mach/mmp2.h b/arch/arm/mach-mmp/include/mach/mmp2.h
index 4aec493640b4..2cbf6df09b82 100644
--- a/arch/arm/mach-mmp/include/mach/mmp2.h
+++ b/arch/arm/mach-mmp/include/mach/mmp2.h
@@ -11,8 +11,8 @@ extern void __init mmp2_init_irq(void);
11extern void mmp2_clear_pmic_int(void); 11extern void mmp2_clear_pmic_int(void);
12 12
13#include <linux/i2c.h> 13#include <linux/i2c.h>
14#include <linux/i2c/pxa-i2c.h>
14#include <mach/devices.h> 15#include <mach/devices.h>
15#include <plat/i2c.h>
16 16
17extern struct pxa_device_desc mmp2_device_uart1; 17extern struct pxa_device_desc mmp2_device_uart1;
18extern struct pxa_device_desc mmp2_device_uart2; 18extern struct pxa_device_desc mmp2_device_uart2;
diff --git a/arch/arm/mach-mmp/include/mach/pxa168.h b/arch/arm/mach-mmp/include/mach/pxa168.h
index 1801e4206232..a52b3d2f325c 100644
--- a/arch/arm/mach-mmp/include/mach/pxa168.h
+++ b/arch/arm/mach-mmp/include/mach/pxa168.h
@@ -8,8 +8,8 @@ extern void __init pxa168_init_irq(void);
8extern void pxa168_clear_keypad_wakeup(void); 8extern void pxa168_clear_keypad_wakeup(void);
9 9
10#include <linux/i2c.h> 10#include <linux/i2c.h>
11#include <linux/i2c/pxa-i2c.h>
11#include <mach/devices.h> 12#include <mach/devices.h>
12#include <plat/i2c.h>
13#include <plat/pxa3xx_nand.h> 13#include <plat/pxa3xx_nand.h>
14#include <video/pxa168fb.h> 14#include <video/pxa168fb.h>
15#include <plat/pxa27x_keypad.h> 15#include <plat/pxa27x_keypad.h>
diff --git a/arch/arm/mach-mmp/include/mach/pxa910.h b/arch/arm/mach-mmp/include/mach/pxa910.h
index f13c49d6f8dc..91be75591398 100644
--- a/arch/arm/mach-mmp/include/mach/pxa910.h
+++ b/arch/arm/mach-mmp/include/mach/pxa910.h
@@ -7,8 +7,8 @@ extern struct sys_timer pxa910_timer;
7extern void __init pxa910_init_irq(void); 7extern void __init pxa910_init_irq(void);
8 8
9#include <linux/i2c.h> 9#include <linux/i2c.h>
10#include <linux/i2c/pxa-i2c.h>
10#include <mach/devices.h> 11#include <mach/devices.h>
11#include <plat/i2c.h>
12#include <plat/pxa3xx_nand.h> 12#include <plat/pxa3xx_nand.h>
13 13
14extern struct pxa_device_desc pxa910_device_uart1; 14extern struct pxa_device_desc pxa910_device_uart1;
diff --git a/arch/arm/mach-mxs/include/mach/dma.h b/arch/arm/mach-mxs/include/mach/dma.h
new file mode 100644
index 000000000000..7f4aeeaba8df
--- /dev/null
+++ b/arch/arm/mach-mxs/include/mach/dma.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __MACH_MXS_DMA_H__
10#define __MACH_MXS_DMA_H__
11
12struct mxs_dma_data {
13 int chan_irq;
14};
15
16static inline int mxs_dma_is_apbh(struct dma_chan *chan)
17{
18 return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbh");
19}
20
21static inline int mxs_dma_is_apbx(struct dma_chan *chan)
22{
23 return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbx");
24}
25
26#endif /* __MACH_MXS_DMA_H__ */
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index e194d928cdaa..d2af73321dae 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -27,6 +27,7 @@
27#include <linux/mtd/partitions.h> 27#include <linux/mtd/partitions.h>
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/i2c/pcf857x.h> 29#include <linux/i2c/pcf857x.h>
30#include <linux/i2c/pxa-i2c.h>
30#include <linux/mtd/nand.h> 31#include <linux/mtd/nand.h>
31#include <linux/mtd/physmap.h> 32#include <linux/mtd/physmap.h>
32#include <linux/regulator/max1586.h> 33#include <linux/regulator/max1586.h>
@@ -51,8 +52,6 @@
51#include <mach/irda.h> 52#include <mach/irda.h>
52#include <mach/ohci.h> 53#include <mach/ohci.h>
53 54
54#include <plat/i2c.h>
55
56#include "generic.h" 55#include "generic.h"
57#include "devices.h" 56#include "devices.h"
58 57
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index 7984268508b6..bfca7ed2fea3 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -29,6 +29,7 @@
29 29
30#include <linux/i2c.h> 30#include <linux/i2c.h>
31#include <linux/i2c/pca953x.h> 31#include <linux/i2c/pca953x.h>
32#include <linux/i2c/pxa-i2c.h>
32 33
33#include <linux/mfd/da903x.h> 34#include <linux/mfd/da903x.h>
34#include <linux/regulator/machine.h> 35#include <linux/regulator/machine.h>
@@ -48,7 +49,6 @@
48#include <mach/pxafb.h> 49#include <mach/pxafb.h>
49#include <mach/mmc.h> 50#include <mach/mmc.h>
50#include <mach/ohci.h> 51#include <mach/ohci.h>
51#include <plat/i2c.h>
52#include <plat/pxa3xx_nand.h> 52#include <plat/pxa3xx_nand.h>
53#include <mach/audio.h> 53#include <mach/audio.h>
54#include <mach/pxa3xx-u2d.h> 54#include <mach/pxa3xx-u2d.h>
diff --git a/arch/arm/mach-pxa/colibri-evalboard.c b/arch/arm/mach-pxa/colibri-evalboard.c
index 28f667e52ef9..81c3c433e2d6 100644
--- a/arch/arm/mach-pxa/colibri-evalboard.c
+++ b/arch/arm/mach-pxa/colibri-evalboard.c
@@ -20,6 +20,7 @@
20#include <mach/hardware.h> 20#include <mach/hardware.h>
21#include <asm/mach/arch.h> 21#include <asm/mach/arch.h>
22#include <linux/i2c.h> 22#include <linux/i2c.h>
23#include <linux/i2c/pxa-i2c.h>
23 24
24#include <mach/pxa27x.h> 25#include <mach/pxa27x.h>
25#include <mach/colibri.h> 26#include <mach/colibri.h>
@@ -27,8 +28,6 @@
27#include <mach/ohci.h> 28#include <mach/ohci.h>
28#include <mach/pxa27x-udc.h> 29#include <mach/pxa27x-udc.h>
29 30
30#include <plat/i2c.h>
31
32#include "generic.h" 31#include "generic.h"
33#include "devices.h" 32#include "devices.h"
34 33
diff --git a/arch/arm/mach-pxa/colibri-pxa270-income.c b/arch/arm/mach-pxa/colibri-pxa270-income.c
index 07b62a096f17..ee797397dc5b 100644
--- a/arch/arm/mach-pxa/colibri-pxa270-income.c
+++ b/arch/arm/mach-pxa/colibri-pxa270-income.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/pwm_backlight.h> 23#include <linux/pwm_backlight.h>
24#include <linux/i2c/pxa-i2c.h>
24#include <linux/sysdev.h> 25#include <linux/sysdev.h>
25 26
26#include <asm/irq.h> 27#include <asm/irq.h>
@@ -33,8 +34,6 @@
33#include <mach/pxa27x-udc.h> 34#include <mach/pxa27x-udc.h>
34#include <mach/pxafb.h> 35#include <mach/pxafb.h>
35 36
36#include <plat/i2c.h>
37
38#include "devices.h" 37#include "devices.h"
39#include "generic.h" 38#include "generic.h"
40 39
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index a5452a3a276d..d4e705caefea 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -24,6 +24,7 @@
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/backlight.h> 25#include <linux/backlight.h>
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27#include <linux/i2c/pxa-i2c.h>
27#include <linux/io.h> 28#include <linux/io.h>
28#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
29#include <linux/spi/ads7846.h> 30#include <linux/spi/ads7846.h>
@@ -45,7 +46,6 @@
45#include <asm/mach/irq.h> 46#include <asm/mach/irq.h>
46 47
47#include <mach/pxa25x.h> 48#include <mach/pxa25x.h>
48#include <plat/i2c.h>
49#include <mach/irda.h> 49#include <mach/irda.h>
50#include <mach/mmc.h> 50#include <mach/mmc.h>
51#include <mach/udc.h> 51#include <mach/udc.h>
diff --git a/arch/arm/mach-pxa/csb726.c b/arch/arm/mach-pxa/csb726.c
index a305424a967d..0481c29a70e8 100644
--- a/arch/arm/mach-pxa/csb726.c
+++ b/arch/arm/mach-pxa/csb726.c
@@ -17,12 +17,12 @@
17#include <linux/mtd/partitions.h> 17#include <linux/mtd/partitions.h>
18#include <linux/sm501.h> 18#include <linux/sm501.h>
19#include <linux/smsc911x.h> 19#include <linux/smsc911x.h>
20#include <linux/i2c/pxa-i2c.h>
20 21
21#include <asm/mach-types.h> 22#include <asm/mach-types.h>
22#include <asm/mach/arch.h> 23#include <asm/mach/arch.h>
23#include <mach/csb726.h> 24#include <mach/csb726.h>
24#include <mach/mfp-pxa27x.h> 25#include <mach/mfp-pxa27x.h>
25#include <plat/i2c.h>
26#include <mach/mmc.h> 26#include <mach/mmc.h>
27#include <mach/ohci.h> 27#include <mach/ohci.h>
28#include <mach/pxa2xx-regs.h> 28#include <mach/pxa2xx-regs.h>
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 4c766e3b4af3..c4bf08b3eb61 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -4,6 +4,7 @@
4#include <linux/platform_device.h> 4#include <linux/platform_device.h>
5#include <linux/dma-mapping.h> 5#include <linux/dma-mapping.h>
6#include <linux/spi/pxa2xx_spi.h> 6#include <linux/spi/pxa2xx_spi.h>
7#include <linux/i2c/pxa-i2c.h>
7 8
8#include <asm/pmu.h> 9#include <asm/pmu.h>
9#include <mach/udc.h> 10#include <mach/udc.h>
@@ -16,7 +17,6 @@
16#include <mach/camera.h> 17#include <mach/camera.h>
17#include <mach/audio.h> 18#include <mach/audio.h>
18#include <mach/hardware.h> 19#include <mach/hardware.h>
19#include <plat/i2c.h>
20#include <plat/pxa3xx_nand.h> 20#include <plat/pxa3xx_nand.h>
21 21
22#include "devices.h" 22#include "devices.h"
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index a78bb3097739..b411d7cbf5a1 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -31,6 +31,7 @@
31#include <linux/apm-emulation.h> 31#include <linux/apm-emulation.h>
32#include <linux/i2c.h> 32#include <linux/i2c.h>
33#include <linux/i2c/pca953x.h> 33#include <linux/i2c/pca953x.h>
34#include <linux/i2c/pxa-i2c.h>
34#include <linux/regulator/userspace-consumer.h> 35#include <linux/regulator/userspace-consumer.h>
35 36
36#include <media/soc_camera.h> 37#include <media/soc_camera.h>
@@ -45,7 +46,6 @@
45#include <mach/ohci.h> 46#include <mach/ohci.h>
46#include <mach/mmc.h> 47#include <mach/mmc.h>
47#include <plat/pxa27x_keypad.h> 48#include <plat/pxa27x_keypad.h>
48#include <plat/i2c.h>
49#include <mach/camera.h> 49#include <mach/camera.h>
50 50
51#include "generic.h" 51#include "generic.h"
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index 87cec0abe5b0..93f05e024313 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -20,6 +20,7 @@
20#include <linux/gpio.h> 20#include <linux/gpio.h>
21#include <linux/gpio_keys.h> 21#include <linux/gpio_keys.h>
22#include <linux/leds-lp3944.h> 22#include <linux/leds-lp3944.h>
23#include <linux/i2c/pxa-i2c.h>
23 24
24#include <media/soc_camera.h> 25#include <media/soc_camera.h>
25 26
@@ -30,7 +31,6 @@
30#include <mach/pxa27x.h> 31#include <mach/pxa27x.h>
31#include <mach/pxafb.h> 32#include <mach/pxafb.h>
32#include <mach/ohci.h> 33#include <mach/ohci.h>
33#include <plat/i2c.h>
34#include <mach/hardware.h> 34#include <mach/hardware.h>
35#include <plat/pxa27x_keypad.h> 35#include <plat/pxa27x_keypad.h>
36#include <mach/camera.h> 36#include <mach/camera.h>
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index a908e0a5f396..6de0ad0eea65 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -35,6 +35,7 @@
35#include <linux/spi/spi.h> 35#include <linux/spi/spi.h>
36#include <linux/spi/pxa2xx_spi.h> 36#include <linux/spi/pxa2xx_spi.h>
37#include <linux/usb/gpio_vbus.h> 37#include <linux/usb/gpio_vbus.h>
38#include <linux/i2c/pxa-i2c.h>
38 39
39#include <mach/hardware.h> 40#include <mach/hardware.h>
40#include <asm/mach-types.h> 41#include <asm/mach-types.h>
@@ -42,7 +43,6 @@
42 43
43#include <mach/pxa27x.h> 44#include <mach/pxa27x.h>
44#include <mach/hx4700.h> 45#include <mach/hx4700.h>
45#include <plat/i2c.h>
46#include <mach/irda.h> 46#include <mach/irda.h>
47 47
48#include <video/platform_lcd.h> 48#include <video/platform_lcd.h>
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index ccb7bfad17ca..87c1ed9ccd2f 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -28,6 +28,7 @@
28#include <linux/leds.h> 28#include <linux/leds.h>
29#include <linux/mfd/da903x.h> 29#include <linux/mfd/da903x.h>
30#include <linux/i2c/max732x.h> 30#include <linux/i2c/max732x.h>
31#include <linux/i2c/pxa-i2c.h>
31 32
32#include <asm/types.h> 33#include <asm/types.h>
33#include <asm/setup.h> 34#include <asm/setup.h>
@@ -45,7 +46,6 @@
45#include <mach/mmc.h> 46#include <mach/mmc.h>
46#include <plat/pxa27x_keypad.h> 47#include <plat/pxa27x_keypad.h>
47#include <mach/littleton.h> 48#include <mach/littleton.h>
48#include <plat/i2c.h>
49#include <plat/pxa3xx_nand.h> 49#include <plat/pxa3xx_nand.h>
50 50
51#include "generic.h" 51#include "generic.h"
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index 41198f0dc3ac..5535991c4a3c 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -28,6 +28,7 @@
28#include <linux/regulator/bq24022.h> 28#include <linux/regulator/bq24022.h>
29#include <linux/regulator/machine.h> 29#include <linux/regulator/machine.h>
30#include <linux/usb/gpio_vbus.h> 30#include <linux/usb/gpio_vbus.h>
31#include <linux/i2c/pxa-i2c.h>
31 32
32#include <mach/hardware.h> 33#include <mach/hardware.h>
33#include <asm/mach-types.h> 34#include <asm/mach-types.h>
@@ -36,7 +37,6 @@
36#include <mach/pxa27x.h> 37#include <mach/pxa27x.h>
37#include <mach/magician.h> 38#include <mach/magician.h>
38#include <mach/pxafb.h> 39#include <mach/pxafb.h>
39#include <plat/i2c.h>
40#include <mach/mmc.h> 40#include <mach/mmc.h>
41#include <mach/irda.h> 41#include <mach/irda.h>
42#include <mach/ohci.h> 42#include <mach/ohci.h>
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index d4b6f2375f2c..f9542220595a 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -27,6 +27,7 @@
27#include <linux/gpio_keys.h> 27#include <linux/gpio_keys.h>
28#include <linux/pwm_backlight.h> 28#include <linux/pwm_backlight.h>
29#include <linux/smc91x.h> 29#include <linux/smc91x.h>
30#include <linux/i2c/pxa-i2c.h>
30 31
31#include <asm/types.h> 32#include <asm/types.h>
32#include <asm/setup.h> 33#include <asm/setup.h>
@@ -46,7 +47,6 @@
46#include <mach/mainstone.h> 47#include <mach/mainstone.h>
47#include <mach/audio.h> 48#include <mach/audio.h>
48#include <mach/pxafb.h> 49#include <mach/pxafb.h>
49#include <plat/i2c.h>
50#include <mach/mmc.h> 50#include <mach/mmc.h>
51#include <mach/irda.h> 51#include <mach/irda.h>
52#include <mach/ohci.h> 52#include <mach/ohci.h>
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index faafea3542fb..78d98a8607ec 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -39,6 +39,7 @@
39#include <linux/usb/gpio_vbus.h> 39#include <linux/usb/gpio_vbus.h>
40#include <linux/regulator/max1586.h> 40#include <linux/regulator/max1586.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/i2c/pxa-i2c.h>
42 43
43#include <asm/mach-types.h> 44#include <asm/mach-types.h>
44#include <asm/mach/arch.h> 45#include <asm/mach/arch.h>
@@ -50,7 +51,6 @@
50#include <mach/mmc.h> 51#include <mach/mmc.h>
51#include <mach/udc.h> 52#include <mach/udc.h>
52#include <mach/pxa27x-udc.h> 53#include <mach/pxa27x-udc.h>
53#include <plat/i2c.h>
54#include <mach/camera.h> 54#include <mach/camera.h>
55#include <mach/audio.h> 55#include <mach/audio.h>
56#include <media/soc_camera.h> 56#include <media/soc_camera.h>
diff --git a/arch/arm/mach-pxa/mxm8x10.c b/arch/arm/mach-pxa/mxm8x10.c
index cdf7f41e2bb3..b5a8fd3fce04 100644
--- a/arch/arm/mach-pxa/mxm8x10.c
+++ b/arch/arm/mach-pxa/mxm8x10.c
@@ -22,8 +22,8 @@
22#include <linux/serial_8250.h> 22#include <linux/serial_8250.h>
23#include <linux/dm9000.h> 23#include <linux/dm9000.h>
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/i2c/pxa-i2c.h>
25 26
26#include <plat/i2c.h>
27#include <plat/pxa3xx_nand.h> 27#include <plat/pxa3xx_nand.h>
28 28
29#include <mach/pxafb.h> 29#include <mach/pxafb.h>
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 35572c427fa8..72adb3ae2b43 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -22,6 +22,7 @@
22#include <linux/power_supply.h> 22#include <linux/power_supply.h>
23#include <linux/usb/gpio_vbus.h> 23#include <linux/usb/gpio_vbus.h>
24#include <linux/regulator/max1586.h> 24#include <linux/regulator/max1586.h>
25#include <linux/i2c/pxa-i2c.h>
25 26
26#include <asm/mach-types.h> 27#include <asm/mach-types.h>
27#include <asm/mach/arch.h> 28#include <asm/mach/arch.h>
@@ -36,8 +37,6 @@
36#include <mach/palmasoc.h> 37#include <mach/palmasoc.h>
37#include <mach/palm27x.h> 38#include <mach/palm27x.h>
38 39
39#include <plat/i2c.h>
40
41#include "generic.h" 40#include "generic.h"
42#include "devices.h" 41#include "devices.h"
43 42
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index 90820faa711a..9dbf3ccd4150 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -23,12 +23,12 @@
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include <linux/i2c/pxa-i2c.h>
26#include <linux/pwm_backlight.h> 27#include <linux/pwm_backlight.h>
27 28
28#include <media/soc_camera.h> 29#include <media/soc_camera.h>
29 30
30#include <asm/gpio.h> 31#include <asm/gpio.h>
31#include <plat/i2c.h>
32#include <mach/camera.h> 32#include <mach/camera.h>
33#include <asm/mach/map.h> 33#include <asm/mach/map.h>
34#include <mach/pxa27x.h> 34#include <mach/pxa27x.h>
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index 4f0ff1ab623d..35353af345d5 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -23,6 +23,7 @@
23#include <linux/mtd/physmap.h> 23#include <linux/mtd/physmap.h>
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include <linux/i2c/pxa-i2c.h>
26#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
27#include <linux/spi/ads7846.h> 28#include <linux/spi/ads7846.h>
28#include <linux/spi/pxa2xx_spi.h> 29#include <linux/spi/pxa2xx_spi.h>
@@ -44,7 +45,6 @@
44#include <mach/irda.h> 45#include <mach/irda.h>
45#include <mach/poodle.h> 46#include <mach/poodle.h>
46#include <mach/pxafb.h> 47#include <mach/pxafb.h>
47#include <plat/i2c.h>
48 48
49#include <asm/hardware/scoop.h> 49#include <asm/hardware/scoop.h>
50#include <asm/hardware/locomo.h> 50#include <asm/hardware/locomo.h>
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 28b11be00b3f..1cb5d0f9723f 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -19,6 +19,7 @@
19#include <linux/sysdev.h> 19#include <linux/sysdev.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/i2c/pxa-i2c.h>
22 23
23#include <asm/mach/map.h> 24#include <asm/mach/map.h>
24#include <mach/hardware.h> 25#include <mach/hardware.h>
@@ -32,8 +33,6 @@
32#include <mach/dma.h> 33#include <mach/dma.h>
33#include <mach/smemc.h> 34#include <mach/smemc.h>
34 35
35#include <plat/i2c.h>
36
37#include "generic.h" 36#include "generic.h"
38#include "devices.h" 37#include "devices.h"
39#include "clock.h" 38#include "clock.h"
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 1230343d9c70..f374247b8466 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -21,6 +21,7 @@
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/sysdev.h> 23#include <linux/sysdev.h>
24#include <linux/i2c/pxa-i2c.h>
24 25
25#include <asm/mach/map.h> 26#include <asm/mach/map.h>
26#include <mach/hardware.h> 27#include <mach/hardware.h>
@@ -32,7 +33,6 @@
32#include <mach/dma.h> 33#include <mach/dma.h>
33#include <mach/regs-intc.h> 34#include <mach/regs-intc.h>
34#include <mach/smemc.h> 35#include <mach/smemc.h>
35#include <plat/i2c.h>
36 36
37#include "generic.h" 37#include "generic.h"
38#include "devices.h" 38#include "devices.h"
diff --git a/arch/arm/mach-pxa/pxa95x.c b/arch/arm/mach-pxa/pxa95x.c
index 437980f72710..23b229bd06e9 100644
--- a/arch/arm/mach-pxa/pxa95x.c
+++ b/arch/arm/mach-pxa/pxa95x.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/pm.h> 16#include <linux/pm.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/i2c/pxa-i2c.h>
18#include <linux/irq.h> 19#include <linux/irq.h>
19#include <linux/io.h> 20#include <linux/io.h>
20#include <linux/sysdev.h> 21#include <linux/sysdev.h>
@@ -27,7 +28,6 @@
27#include <mach/pm.h> 28#include <mach/pm.h>
28#include <mach/dma.h> 29#include <mach/dma.h>
29#include <mach/regs-intc.h> 30#include <mach/regs-intc.h>
30#include <plat/i2c.h>
31 31
32#include "generic.h" 32#include "generic.h"
33#include "devices.h" 33#include "devices.h"
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 8361151be054..47094188e029 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -32,6 +32,7 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/pwm_backlight.h> 33#include <linux/pwm_backlight.h>
34#include <linux/i2c.h> 34#include <linux/i2c.h>
35#include <linux/i2c/pxa-i2c.h>
35#include <linux/spi/spi.h> 36#include <linux/spi/spi.h>
36#include <linux/spi/spi_gpio.h> 37#include <linux/spi/spi_gpio.h>
37#include <linux/lis3lv02d.h> 38#include <linux/lis3lv02d.h>
@@ -53,7 +54,6 @@
53#include <mach/ohci.h> 54#include <mach/ohci.h>
54#include <mach/pxafb.h> 55#include <mach/pxafb.h>
55#include <mach/mmc.h> 56#include <mach/mmc.h>
56#include <plat/i2c.h>
57#include <plat/pxa3xx_nand.h> 57#include <plat/pxa3xx_nand.h>
58 58
59#include "generic.h" 59#include "generic.h"
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
index c1ca8cb467fc..eb83c89428ef 100644
--- a/arch/arm/mach-pxa/saar.c
+++ b/arch/arm/mach-pxa/saar.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/fb.h> 21#include <linux/fb.h>
22#include <linux/i2c.h> 22#include <linux/i2c.h>
23#include <linux/i2c/pxa-i2c.h>
23#include <linux/smc91x.h> 24#include <linux/smc91x.h>
24#include <linux/mfd/da903x.h> 25#include <linux/mfd/da903x.h>
25#include <linux/mtd/mtd.h> 26#include <linux/mtd/mtd.h>
@@ -31,7 +32,6 @@
31#include <asm/mach/flash.h> 32#include <asm/mach/flash.h>
32 33
33#include <mach/pxa930.h> 34#include <mach/pxa930.h>
34#include <plat/i2c.h>
35#include <mach/pxafb.h> 35#include <mach/pxafb.h>
36 36
37#include "devices.h" 37#include "devices.h"
diff --git a/arch/arm/mach-pxa/saarb.c b/arch/arm/mach-pxa/saarb.c
index e497922f761a..9322fe527c7f 100644
--- a/arch/arm/mach-pxa/saarb.c
+++ b/arch/arm/mach-pxa/saarb.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/i2c.h> 15#include <linux/i2c.h>
16#include <linux/i2c/pxa-i2c.h>
16#include <linux/mfd/88pm860x.h> 17#include <linux/mfd/88pm860x.h>
17 18
18#include <asm/mach-types.h> 19#include <asm/mach-types.h>
@@ -24,8 +25,6 @@
24#include <mach/mfp-pxa930.h> 25#include <mach/mfp-pxa930.h>
25#include <mach/gpio.h> 26#include <mach/gpio.h>
26 27
27#include <plat/i2c.h>
28
29#include "generic.h" 28#include "generic.h"
30 29
31#define SAARB_NR_IRQS (IRQ_BOARD_START + 40) 30#define SAARB_NR_IRQS (IRQ_BOARD_START + 40)
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index b49a2c21124c..38e2c0912b9a 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -19,6 +19,7 @@
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20#include <linux/leds.h> 20#include <linux/leds.h>
21#include <linux/i2c.h> 21#include <linux/i2c.h>
22#include <linux/i2c/pxa-i2c.h>
22#include <linux/i2c/pca953x.h> 23#include <linux/i2c/pca953x.h>
23#include <linux/spi/spi.h> 24#include <linux/spi/spi.h>
24#include <linux/spi/ads7846.h> 25#include <linux/spi/ads7846.h>
@@ -47,8 +48,6 @@
47#include <mach/sharpsl_pm.h> 48#include <mach/sharpsl_pm.h>
48#include <mach/smemc.h> 49#include <mach/smemc.h>
49 50
50#include <plat/i2c.h>
51
52#include "generic.h" 51#include "generic.h"
53#include "devices.h" 52#include "devices.h"
54 53
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 9a14fdb83c82..cb5611daf5fe 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -25,6 +25,7 @@
25#include <linux/mtd/plat-ram.h> 25#include <linux/mtd/plat-ram.h>
26#include <linux/mtd/partitions.h> 26#include <linux/mtd/partitions.h>
27 27
28#include <linux/i2c/pxa-i2c.h>
28#include <linux/i2c/pcf857x.h> 29#include <linux/i2c/pcf857x.h>
29#include <linux/i2c/at24.h> 30#include <linux/i2c/at24.h>
30#include <linux/smc91x.h> 31#include <linux/smc91x.h>
@@ -43,7 +44,6 @@
43#include <asm/mach/flash.h> 44#include <asm/mach/flash.h>
44 45
45#include <mach/pxa27x.h> 46#include <mach/pxa27x.h>
46#include <plat/i2c.h>
47#include <mach/mmc.h> 47#include <mach/mmc.h>
48#include <mach/udc.h> 48#include <mach/udc.h>
49#include <mach/pxa27x-udc.h> 49#include <mach/pxa27x-udc.h>
diff --git a/arch/arm/mach-pxa/tavorevb3.c b/arch/arm/mach-pxa/tavorevb3.c
index 70191a9450eb..79f4422f12f4 100644
--- a/arch/arm/mach-pxa/tavorevb3.c
+++ b/arch/arm/mach-pxa/tavorevb3.c
@@ -15,6 +15,7 @@
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/i2c.h> 17#include <linux/i2c.h>
18#include <linux/i2c/pxa-i2c.h>
18#include <linux/gpio.h> 19#include <linux/gpio.h>
19#include <linux/mfd/88pm860x.h> 20#include <linux/mfd/88pm860x.h>
20 21
@@ -23,8 +24,6 @@
23 24
24#include <mach/pxa930.h> 25#include <mach/pxa930.h>
25 26
26#include <plat/i2c.h>
27
28#include "devices.h" 27#include "devices.h"
29#include "generic.h" 28#include "generic.h"
30 29
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index f2582ec300d9..5ad3807af334 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -34,6 +34,7 @@
34#include <linux/spi/spi.h> 34#include <linux/spi/spi.h>
35#include <linux/spi/pxa2xx_spi.h> 35#include <linux/spi/pxa2xx_spi.h>
36#include <linux/input/matrix_keypad.h> 36#include <linux/input/matrix_keypad.h>
37#include <linux/i2c/pxa-i2c.h>
37 38
38#include <asm/setup.h> 39#include <asm/setup.h>
39#include <asm/mach-types.h> 40#include <asm/mach-types.h>
@@ -41,7 +42,6 @@
41#include <mach/pxa25x.h> 42#include <mach/pxa25x.h>
42#include <mach/reset.h> 43#include <mach/reset.h>
43#include <mach/irda.h> 44#include <mach/irda.h>
44#include <plat/i2c.h>
45#include <mach/mmc.h> 45#include <mach/mmc.h>
46#include <mach/udc.h> 46#include <mach/udc.h>
47#include <mach/tosa_bt.h> 47#include <mach/tosa_bt.h>
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index 423261d63d07..857bb2e63486 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -26,6 +26,7 @@
26#include <linux/dm9000.h> 26#include <linux/dm9000.h>
27#include <linux/mtd/physmap.h> 27#include <linux/mtd/physmap.h>
28#include <linux/mtd/partitions.h> 28#include <linux/mtd/partitions.h>
29#include <linux/i2c/pxa-i2c.h>
29 30
30#include <asm/types.h> 31#include <asm/types.h>
31#include <asm/setup.h> 32#include <asm/setup.h>
@@ -47,7 +48,6 @@
47#include <mach/irda.h> 48#include <mach/irda.h>
48#include <mach/ohci.h> 49#include <mach/ohci.h>
49#include <mach/smemc.h> 50#include <mach/smemc.h>
50#include <plat/i2c.h>
51 51
52#include "generic.h" 52#include "generic.h"
53#include "devices.h" 53#include "devices.h"
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 49eeeab23689..12279214c875 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -36,6 +36,7 @@
36#include <linux/gpio.h> 36#include <linux/gpio.h>
37#include <linux/jiffies.h> 37#include <linux/jiffies.h>
38#include <linux/i2c-gpio.h> 38#include <linux/i2c-gpio.h>
39#include <linux/i2c/pxa-i2c.h>
39#include <linux/serial_8250.h> 40#include <linux/serial_8250.h>
40#include <linux/smc91x.h> 41#include <linux/smc91x.h>
41#include <linux/pwm_backlight.h> 42#include <linux/pwm_backlight.h>
@@ -47,7 +48,6 @@
47#include <mach/pxa25x.h> 48#include <mach/pxa25x.h>
48#include <mach/audio.h> 49#include <mach/audio.h>
49#include <mach/pxafb.h> 50#include <mach/pxafb.h>
50#include <plat/i2c.h>
51#include <mach/regs-uart.h> 51#include <mach/regs-uart.h>
52#include <mach/arcom-pcmcia.h> 52#include <mach/arcom-pcmcia.h>
53#include <mach/viper.h> 53#include <mach/viper.h>
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index b9b579715ff6..e709fd459268 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -26,6 +26,7 @@
26#include <linux/ucb1400.h> 26#include <linux/ucb1400.h>
27#include <linux/ata_platform.h> 27#include <linux/ata_platform.h>
28#include <linux/regulator/max1586.h> 28#include <linux/regulator/max1586.h>
29#include <linux/i2c/pxa-i2c.h>
29 30
30#include <asm/mach-types.h> 31#include <asm/mach-types.h>
31#include <asm/mach/arch.h> 32#include <asm/mach/arch.h>
@@ -40,8 +41,6 @@
40#include <mach/udc.h> 41#include <mach/udc.h>
41#include <mach/pata_pxa.h> 42#include <mach/pata_pxa.h>
42 43
43#include <plat/i2c.h>
44
45#include "generic.h" 44#include "generic.h"
46#include "devices.h" 45#include "devices.h"
47 46
diff --git a/arch/arm/mach-pxa/xcep.c b/arch/arm/mach-pxa/xcep.c
index 51c0281c6e0a..f55f8f2e0db3 100644
--- a/arch/arm/mach-pxa/xcep.c
+++ b/arch/arm/mach-pxa/xcep.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19#include <linux/i2c/pxa-i2c.h>
19#include <linux/smc91x.h> 20#include <linux/smc91x.h>
20#include <linux/mtd/mtd.h> 21#include <linux/mtd/mtd.h>
21#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
@@ -26,8 +27,6 @@
26#include <asm/mach/irq.h> 27#include <asm/mach/irq.h>
27#include <asm/mach/map.h> 28#include <asm/mach/map.h>
28 29
29#include <plat/i2c.h>
30
31#include <mach/hardware.h> 30#include <mach/hardware.h>
32#include <mach/pxa2xx-regs.h> 31#include <mach/pxa2xx-regs.h>
33#include <mach/mfp-pxa25x.h> 32#include <mach/mfp-pxa25x.h>
diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c
index a323e076129e..aaf883754ef4 100644
--- a/arch/arm/mach-pxa/z2.c
+++ b/arch/arm/mach-pxa/z2.c
@@ -29,6 +29,7 @@
29#include <linux/gpio_keys.h> 29#include <linux/gpio_keys.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/regulator/machine.h> 31#include <linux/regulator/machine.h>
32#include <linux/i2c/pxa-i2c.h>
32 33
33#include <asm/mach-types.h> 34#include <asm/mach-types.h>
34#include <asm/mach/arch.h> 35#include <asm/mach/arch.h>
@@ -40,8 +41,6 @@
40#include <mach/mmc.h> 41#include <mach/mmc.h>
41#include <plat/pxa27x_keypad.h> 42#include <plat/pxa27x_keypad.h>
42 43
43#include <plat/i2c.h>
44
45#include "generic.h" 44#include "generic.h"
46#include "devices.h" 45#include "devices.h"
47 46
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index b92aa3b8c4f7..730f51e57c17 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -25,6 +25,7 @@
25#include <linux/mtd/partitions.h> 25#include <linux/mtd/partitions.h>
26#include <linux/mtd/physmap.h> 26#include <linux/mtd/physmap.h>
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/i2c/pxa-i2c.h>
28#include <linux/i2c/pca953x.h> 29#include <linux/i2c/pca953x.h>
29#include <linux/apm-emulation.h> 30#include <linux/apm-emulation.h>
30#include <linux/can/platform/mcp251x.h> 31#include <linux/can/platform/mcp251x.h>
@@ -33,8 +34,6 @@
33#include <asm/mach/arch.h> 34#include <asm/mach/arch.h>
34#include <asm/mach/map.h> 35#include <asm/mach/map.h>
35 36
36#include <plat/i2c.h>
37
38#include <mach/pxa2xx-regs.h> 37#include <mach/pxa2xx-regs.h>
39#include <mach/regs-uart.h> 38#include <mach/regs-uart.h>
40#include <mach/ohci.h> 39#include <mach/ohci.h>
diff --git a/arch/arm/mach-pxa/zylonite_pxa300.c b/arch/arm/mach-pxa/zylonite_pxa300.c
index 3aa73b3e33f2..93c64d8d7de9 100644
--- a/arch/arm/mach-pxa/zylonite_pxa300.c
+++ b/arch/arm/mach-pxa/zylonite_pxa300.c
@@ -17,11 +17,11 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/i2c.h> 19#include <linux/i2c.h>
20#include <linux/i2c/pxa-i2c.h>
20#include <linux/i2c/pca953x.h> 21#include <linux/i2c/pca953x.h>
21#include <linux/gpio.h> 22#include <linux/gpio.h>
22 23
23#include <mach/pxa300.h> 24#include <mach/pxa300.h>
24#include <plat/i2c.h>
25#include <mach/zylonite.h> 25#include <mach/zylonite.h>
26 26
27#include "generic.h" 27#include "generic.h"
diff --git a/arch/arm/mach-s3c2410/h1940-bluetooth.c b/arch/arm/mach-s3c2410/h1940-bluetooth.c
index 6b86a722a7db..2c126bbca08d 100644
--- a/arch/arm/mach-s3c2410/h1940-bluetooth.c
+++ b/arch/arm/mach-s3c2410/h1940-bluetooth.c
@@ -18,12 +18,14 @@
18#include <linux/leds.h> 18#include <linux/leds.h>
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20#include <linux/rfkill.h> 20#include <linux/rfkill.h>
21#include <linux/leds.h>
21 22
22#include <mach/regs-gpio.h> 23#include <mach/regs-gpio.h>
23#include <mach/hardware.h> 24#include <mach/hardware.h>
24#include <mach/h1940-latch.h> 25#include <mach/h1940-latch.h>
26#include <mach/h1940.h>
25 27
26#define DRV_NAME "h1940-bt" 28#define DRV_NAME "h1940-bt"
27 29
28/* Bluetooth control */ 30/* Bluetooth control */
29static void h1940bt_enable(int on) 31static void h1940bt_enable(int on)
@@ -37,6 +39,8 @@ static void h1940bt_enable(int on)
37 gpio_set_value(S3C2410_GPH(1), 1); 39 gpio_set_value(S3C2410_GPH(1), 1);
38 mdelay(10); 40 mdelay(10);
39 gpio_set_value(S3C2410_GPH(1), 0); 41 gpio_set_value(S3C2410_GPH(1), 0);
42
43 h1940_led_blink_set(-EINVAL, GPIO_LED_BLINK, NULL, NULL);
40 } 44 }
41 else { 45 else {
42 gpio_set_value(S3C2410_GPH(1), 1); 46 gpio_set_value(S3C2410_GPH(1), 1);
@@ -44,6 +48,8 @@ static void h1940bt_enable(int on)
44 gpio_set_value(S3C2410_GPH(1), 0); 48 gpio_set_value(S3C2410_GPH(1), 0);
45 mdelay(10); 49 mdelay(10);
46 gpio_set_value(H1940_LATCH_BLUETOOTH_POWER, 0); 50 gpio_set_value(H1940_LATCH_BLUETOOTH_POWER, 0);
51
52 h1940_led_blink_set(-EINVAL, GPIO_LED_NO_BLINK_LOW, NULL, NULL);
47 } 53 }
48} 54}
49 55
@@ -85,7 +91,6 @@ static int __devinit h1940bt_probe(struct platform_device *pdev)
85 s3c_gpio_cfgpin(S3C2410_GPH(3), S3C2410_GPH3_RXD0); 91 s3c_gpio_cfgpin(S3C2410_GPH(3), S3C2410_GPH3_RXD0);
86 s3c_gpio_setpull(S3C2410_GPH(3), S3C_GPIO_PULL_NONE); 92 s3c_gpio_setpull(S3C2410_GPH(3), S3C_GPIO_PULL_NONE);
87 93
88
89 rfk = rfkill_alloc(DRV_NAME, &pdev->dev, RFKILL_TYPE_BLUETOOTH, 94 rfk = rfkill_alloc(DRV_NAME, &pdev->dev, RFKILL_TYPE_BLUETOOTH,
90 &h1940bt_rfkill_ops, NULL); 95 &h1940bt_rfkill_ops, NULL);
91 if (!rfk) { 96 if (!rfk) {
@@ -93,8 +98,6 @@ static int __devinit h1940bt_probe(struct platform_device *pdev)
93 goto err_rfk_alloc; 98 goto err_rfk_alloc;
94 } 99 }
95 100
96 rfkill_set_led_trigger_name(rfk, "h1940-bluetooth");
97
98 ret = rfkill_register(rfk); 101 ret = rfkill_register(rfk);
99 if (ret) 102 if (ret)
100 goto err_rfkill; 103 goto err_rfkill;
diff --git a/arch/arm/mach-s3c2410/include/mach/h1940.h b/arch/arm/mach-s3c2410/include/mach/h1940.h
index 4559784129c0..2aa683c8d3d6 100644
--- a/arch/arm/mach-s3c2410/include/mach/h1940.h
+++ b/arch/arm/mach-s3c2410/include/mach/h1940.h
@@ -17,5 +17,8 @@
17#define H1940_SUSPEND_CHECK (0x30080000) 17#define H1940_SUSPEND_CHECK (0x30080000)
18 18
19extern void h1940_pm_return(void); 19extern void h1940_pm_return(void);
20extern int h1940_led_blink_set(unsigned gpio, int state,
21 unsigned long *delay_on, unsigned long *delay_off);
22
20 23
21#endif /* __ASM_ARCH_H1940_H */ 24#endif /* __ASM_ARCH_H1940_H */
diff --git a/arch/arm/mach-s3c2410/mach-h1940.c b/arch/arm/mach-s3c2410/mach-h1940.c
index 1e93f176c1de..2a2fa0620133 100644
--- a/arch/arm/mach-s3c2410/mach-h1940.c
+++ b/arch/arm/mach-s3c2410/mach-h1940.c
@@ -23,8 +23,15 @@
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/gpio.h> 25#include <linux/gpio.h>
26#include <linux/input.h>
27#include <linux/gpio_keys.h>
26#include <linux/pwm_backlight.h> 28#include <linux/pwm_backlight.h>
27#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/leds.h>
31#include <linux/pda_power.h>
32#include <linux/s3c_adc_battery.h>
33#include <linux/delay.h>
34
28#include <video/platform_lcd.h> 35#include <video/platform_lcd.h>
29 36
30#include <linux/mmc/host.h> 37#include <linux/mmc/host.h>
@@ -203,20 +210,239 @@ static struct s3c2410fb_mach_info h1940_fb_info __initdata = {
203 .num_displays = 1, 210 .num_displays = 1,
204 .default_display = 0, 211 .default_display = 0,
205 212
206 .lpcsel= 0x02, 213 .lpcsel = 0x02,
207 .gpccon= 0xaa940659, 214 .gpccon = 0xaa940659,
208 .gpccon_mask= 0xffffffff, 215 .gpccon_mask = 0xffffc0f0,
209 .gpcup= 0x0000ffff, 216 .gpcup = 0x0000ffff,
210 .gpcup_mask= 0xffffffff, 217 .gpcup_mask = 0xffffffff,
211 .gpdcon= 0xaa84aaa0, 218 .gpdcon = 0xaa84aaa0,
212 .gpdcon_mask= 0xffffffff, 219 .gpdcon_mask = 0xffffffff,
213 .gpdup= 0x0000faff, 220 .gpdup = 0x0000faff,
214 .gpdup_mask= 0xffffffff, 221 .gpdup_mask = 0xffffffff,
215}; 222};
216 223
217static struct platform_device h1940_device_leds = { 224static int power_supply_init(struct device *dev)
218 .name = "h1940-leds", 225{
226 return gpio_request(S3C2410_GPF(2), "cable plugged");
227}
228
229static int h1940_is_ac_online(void)
230{
231 return !gpio_get_value(S3C2410_GPF(2));
232}
233
234static void power_supply_exit(struct device *dev)
235{
236 gpio_free(S3C2410_GPF(2));
237}
238
239static char *h1940_supplicants[] = {
240 "main-battery",
241 "backup-battery",
242};
243
244static struct pda_power_pdata power_supply_info = {
245 .init = power_supply_init,
246 .is_ac_online = h1940_is_ac_online,
247 .exit = power_supply_exit,
248 .supplied_to = h1940_supplicants,
249 .num_supplicants = ARRAY_SIZE(h1940_supplicants),
250};
251
252static struct resource power_supply_resources[] = {
253 [0] = {
254 .name = "ac",
255 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE |
256 IORESOURCE_IRQ_HIGHEDGE,
257 .start = IRQ_EINT2,
258 .end = IRQ_EINT2,
259 },
260};
261
262static struct platform_device power_supply = {
263 .name = "pda-power",
264 .id = -1,
265 .dev = {
266 .platform_data =
267 &power_supply_info,
268 },
269 .resource = power_supply_resources,
270 .num_resources = ARRAY_SIZE(power_supply_resources),
271};
272
273static const struct s3c_adc_bat_thresh bat_lut_noac[] = {
274 { .volt = 4070, .cur = 162, .level = 100},
275 { .volt = 4040, .cur = 165, .level = 95},
276 { .volt = 4016, .cur = 164, .level = 90},
277 { .volt = 3996, .cur = 166, .level = 85},
278 { .volt = 3971, .cur = 168, .level = 80},
279 { .volt = 3951, .cur = 168, .level = 75},
280 { .volt = 3931, .cur = 170, .level = 70},
281 { .volt = 3903, .cur = 172, .level = 65},
282 { .volt = 3886, .cur = 172, .level = 60},
283 { .volt = 3858, .cur = 176, .level = 55},
284 { .volt = 3842, .cur = 176, .level = 50},
285 { .volt = 3818, .cur = 176, .level = 45},
286 { .volt = 3789, .cur = 180, .level = 40},
287 { .volt = 3769, .cur = 180, .level = 35},
288 { .volt = 3749, .cur = 184, .level = 30},
289 { .volt = 3732, .cur = 184, .level = 25},
290 { .volt = 3716, .cur = 184, .level = 20},
291 { .volt = 3708, .cur = 184, .level = 15},
292 { .volt = 3716, .cur = 96, .level = 10},
293 { .volt = 3700, .cur = 96, .level = 5},
294 { .volt = 3684, .cur = 96, .level = 0},
295};
296
297static const struct s3c_adc_bat_thresh bat_lut_acin[] = {
298 { .volt = 4130, .cur = 0, .level = 100},
299 { .volt = 3982, .cur = 0, .level = 50},
300 { .volt = 3854, .cur = 0, .level = 10},
301 { .volt = 3841, .cur = 0, .level = 0},
302};
303
304int h1940_bat_init(void)
305{
306 int ret;
307
308 ret = gpio_request(H1940_LATCH_SM803_ENABLE, "h1940-charger-enable");
309 if (ret)
310 return ret;
311 gpio_direction_output(H1940_LATCH_SM803_ENABLE, 0);
312
313 return 0;
314
315}
316
317void h1940_bat_exit(void)
318{
319 gpio_free(H1940_LATCH_SM803_ENABLE);
320}
321
322void h1940_enable_charger(void)
323{
324 gpio_set_value(H1940_LATCH_SM803_ENABLE, 1);
325}
326
327void h1940_disable_charger(void)
328{
329 gpio_set_value(H1940_LATCH_SM803_ENABLE, 0);
330}
331
332static struct s3c_adc_bat_pdata h1940_bat_cfg = {
333 .init = h1940_bat_init,
334 .exit = h1940_bat_exit,
335 .enable_charger = h1940_enable_charger,
336 .disable_charger = h1940_disable_charger,
337 .gpio_charge_finished = S3C2410_GPF(3),
338 .gpio_inverted = 1,
339 .lut_noac = bat_lut_noac,
340 .lut_noac_cnt = ARRAY_SIZE(bat_lut_noac),
341 .lut_acin = bat_lut_acin,
342 .lut_acin_cnt = ARRAY_SIZE(bat_lut_acin),
343 .volt_channel = 0,
344 .current_channel = 1,
345 .volt_mult = 4056,
346 .current_mult = 1893,
347 .internal_impedance = 200,
348 .backup_volt_channel = 3,
349 /* TODO Check backup volt multiplier */
350 .backup_volt_mult = 4056,
351 .backup_volt_min = 0,
352 .backup_volt_max = 4149288
353};
354
355static struct platform_device h1940_battery = {
356 .name = "s3c-adc-battery",
219 .id = -1, 357 .id = -1,
358 .dev = {
359 .parent = &s3c_device_adc.dev,
360 .platform_data = &h1940_bat_cfg,
361 },
362};
363
364DEFINE_SPINLOCK(h1940_blink_spin);
365
366int h1940_led_blink_set(unsigned gpio, int state,
367 unsigned long *delay_on, unsigned long *delay_off)
368{
369 int blink_gpio, check_gpio1, check_gpio2;
370
371 switch (gpio) {
372 case H1940_LATCH_LED_GREEN:
373 blink_gpio = S3C2410_GPA(7);
374 check_gpio1 = S3C2410_GPA(1);
375 check_gpio2 = S3C2410_GPA(3);
376 break;
377 case H1940_LATCH_LED_RED:
378 blink_gpio = S3C2410_GPA(1);
379 check_gpio1 = S3C2410_GPA(7);
380 check_gpio2 = S3C2410_GPA(3);
381 break;
382 default:
383 blink_gpio = S3C2410_GPA(3);
384 check_gpio1 = S3C2410_GPA(1);
385 check_gpio1 = S3C2410_GPA(7);
386 break;
387 }
388
389 if (delay_on && delay_off && !*delay_on && !*delay_off)
390 *delay_on = *delay_off = 500;
391
392 spin_lock(&h1940_blink_spin);
393
394 switch (state) {
395 case GPIO_LED_NO_BLINK_LOW:
396 case GPIO_LED_NO_BLINK_HIGH:
397 if (!gpio_get_value(check_gpio1) &&
398 !gpio_get_value(check_gpio2))
399 gpio_set_value(H1940_LATCH_LED_FLASH, 0);
400 gpio_set_value(blink_gpio, 0);
401 if (gpio_is_valid(gpio))
402 gpio_set_value(gpio, state);
403 break;
404 case GPIO_LED_BLINK:
405 if (gpio_is_valid(gpio))
406 gpio_set_value(gpio, 0);
407 gpio_set_value(H1940_LATCH_LED_FLASH, 1);
408 gpio_set_value(blink_gpio, 1);
409 break;
410 }
411
412 spin_unlock(&h1940_blink_spin);
413
414 return 0;
415}
416EXPORT_SYMBOL(h1940_led_blink_set);
417
418static struct gpio_led h1940_leds_desc[] = {
419 {
420 .name = "Green",
421 .default_trigger = "main-battery-full",
422 .gpio = H1940_LATCH_LED_GREEN,
423 .retain_state_suspended = 1,
424 },
425 {
426 .name = "Red",
427 .default_trigger
428 = "main-battery-charging-blink-full-solid",
429 .gpio = H1940_LATCH_LED_RED,
430 .retain_state_suspended = 1,
431 },
432};
433
434static struct gpio_led_platform_data h1940_leds_pdata = {
435 .num_leds = ARRAY_SIZE(h1940_leds_desc),
436 .leds = h1940_leds_desc,
437 .gpio_blink_set = h1940_led_blink_set,
438};
439
440static struct platform_device h1940_device_leds = {
441 .name = "leds-gpio",
442 .id = -1,
443 .dev = {
444 .platform_data = &h1940_leds_pdata,
445 },
220}; 446};
221 447
222static struct platform_device h1940_device_bluetooth = { 448static struct platform_device h1940_device_bluetooth = {
@@ -302,14 +528,14 @@ static struct platform_device h1940_backlight = {
302static void h1940_lcd_power_set(struct plat_lcd_data *pd, 528static void h1940_lcd_power_set(struct plat_lcd_data *pd,
303 unsigned int power) 529 unsigned int power)
304{ 530{
305 int value; 531 int value, retries = 100;
306 532
307 if (!power) { 533 if (!power) {
308 gpio_set_value(S3C2410_GPC(0), 0); 534 gpio_set_value(S3C2410_GPC(0), 0);
309 /* wait for 3ac */ 535 /* wait for 3ac */
310 do { 536 do {
311 value = gpio_get_value(S3C2410_GPC(6)); 537 value = gpio_get_value(S3C2410_GPC(6));
312 } while (value); 538 } while (value && retries--);
313 539
314 gpio_set_value(H1940_LATCH_LCD_P2, 0); 540 gpio_set_value(H1940_LATCH_LCD_P2, 0);
315 gpio_set_value(H1940_LATCH_LCD_P3, 0); 541 gpio_set_value(H1940_LATCH_LCD_P3, 0);
@@ -327,6 +553,9 @@ static void h1940_lcd_power_set(struct plat_lcd_data *pd,
327 gpio_set_value(H1940_LATCH_LCD_P0, 1); 553 gpio_set_value(H1940_LATCH_LCD_P0, 1);
328 gpio_set_value(H1940_LATCH_LCD_P1, 1); 554 gpio_set_value(H1940_LATCH_LCD_P1, 1);
329 555
556 gpio_direction_input(S3C2410_GPC(1));
557 gpio_direction_input(S3C2410_GPC(4));
558 mdelay(10);
330 s3c_gpio_cfgpin(S3C2410_GPC(1), S3C_GPIO_SFN(2)); 559 s3c_gpio_cfgpin(S3C2410_GPC(1), S3C_GPIO_SFN(2));
331 s3c_gpio_cfgpin(S3C2410_GPC(4), S3C_GPIO_SFN(2)); 560 s3c_gpio_cfgpin(S3C2410_GPC(4), S3C_GPIO_SFN(2));
332 561
@@ -362,7 +591,44 @@ static struct i2c_board_info h1940_i2c_devices[] = {
362 }, 591 },
363}; 592};
364 593
594#define DECLARE_BUTTON(p, k, n, w) \
595 { \
596 .gpio = p, \
597 .code = k, \
598 .desc = n, \
599 .wakeup = w, \
600 .active_low = 1, \
601 }
602
603static struct gpio_keys_button h1940_buttons[] = {
604 DECLARE_BUTTON(S3C2410_GPF(0), KEY_POWER, "Power", 1),
605 DECLARE_BUTTON(S3C2410_GPF(6), KEY_ENTER, "Select", 1),
606 DECLARE_BUTTON(S3C2410_GPF(7), KEY_RECORD, "Record", 0),
607 DECLARE_BUTTON(S3C2410_GPG(0), KEY_F11, "Calendar", 0),
608 DECLARE_BUTTON(S3C2410_GPG(2), KEY_F12, "Contacts", 0),
609 DECLARE_BUTTON(S3C2410_GPG(3), KEY_MAIL, "Mail", 0),
610 DECLARE_BUTTON(S3C2410_GPG(6), KEY_LEFT, "Left_arrow", 0),
611 DECLARE_BUTTON(S3C2410_GPG(7), KEY_HOMEPAGE, "Home", 0),
612 DECLARE_BUTTON(S3C2410_GPG(8), KEY_RIGHT, "Right_arrow", 0),
613 DECLARE_BUTTON(S3C2410_GPG(9), KEY_UP, "Up_arrow", 0),
614 DECLARE_BUTTON(S3C2410_GPG(10), KEY_DOWN, "Down_arrow", 0),
615};
616
617static struct gpio_keys_platform_data h1940_buttons_data = {
618 .buttons = h1940_buttons,
619 .nbuttons = ARRAY_SIZE(h1940_buttons),
620};
621
622static struct platform_device h1940_dev_buttons = {
623 .name = "gpio-keys",
624 .id = -1,
625 .dev = {
626 .platform_data = &h1940_buttons_data,
627 }
628};
629
365static struct platform_device *h1940_devices[] __initdata = { 630static struct platform_device *h1940_devices[] __initdata = {
631 &h1940_dev_buttons,
366 &s3c_device_ohci, 632 &s3c_device_ohci,
367 &s3c_device_lcd, 633 &s3c_device_lcd,
368 &s3c_device_wdt, 634 &s3c_device_wdt,
@@ -379,6 +645,8 @@ static struct platform_device *h1940_devices[] __initdata = {
379 &h1940_lcd_powerdev, 645 &h1940_lcd_powerdev,
380 &s3c_device_adc, 646 &s3c_device_adc,
381 &s3c_device_ts, 647 &s3c_device_ts,
648 &power_supply,
649 &h1940_battery,
382}; 650};
383 651
384static void __init h1940_map_io(void) 652static void __init h1940_map_io(void)
@@ -461,6 +729,15 @@ static void __init h1940_init(void)
461 729
462 platform_add_devices(h1940_devices, ARRAY_SIZE(h1940_devices)); 730 platform_add_devices(h1940_devices, ARRAY_SIZE(h1940_devices));
463 731
732 gpio_request(S3C2410_GPA(1), "Red LED blink");
733 gpio_request(S3C2410_GPA(3), "Blue LED blink");
734 gpio_request(S3C2410_GPA(7), "Green LED blink");
735 gpio_request(H1940_LATCH_LED_FLASH, "LED blink");
736 gpio_direction_output(S3C2410_GPA(1), 0);
737 gpio_direction_output(S3C2410_GPA(3), 0);
738 gpio_direction_output(S3C2410_GPA(7), 0);
739 gpio_direction_output(H1940_LATCH_LED_FLASH, 0);
740
464 i2c_register_board_info(0, h1940_i2c_devices, 741 i2c_register_board_info(0, h1940_i2c_devices,
465 ARRAY_SIZE(h1940_i2c_devices)); 742 ARRAY_SIZE(h1940_i2c_devices));
466} 743}
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c
index d80f129bca94..dfedc9c9e005 100644
--- a/arch/arm/mach-s3c2440/mach-mini2440.c
+++ b/arch/arm/mach-s3c2440/mach-mini2440.c
@@ -488,6 +488,11 @@ static struct i2c_board_info mini2440_i2c_devs[] __initdata = {
488 }, 488 },
489}; 489};
490 490
491static struct platform_device uda1340_codec = {
492 .name = "uda134x-codec",
493 .id = -1,
494};
495
491static struct platform_device *mini2440_devices[] __initdata = { 496static struct platform_device *mini2440_devices[] __initdata = {
492 &s3c_device_ohci, 497 &s3c_device_ohci,
493 &s3c_device_wdt, 498 &s3c_device_wdt,
@@ -503,7 +508,9 @@ static struct platform_device *mini2440_devices[] __initdata = {
503 &s3c_device_nand, 508 &s3c_device_nand,
504 &s3c_device_sdi, 509 &s3c_device_sdi,
505 &s3c_device_iis, 510 &s3c_device_iis,
511 &uda1340_codec,
506 &mini2440_audio, 512 &mini2440_audio,
513 &samsung_asoc_dma,
507}; 514};
508 515
509static void __init mini2440_map_io(void) 516static void __init mini2440_map_io(void)
diff --git a/arch/arm/mach-s3c2440/mach-rx1950.c b/arch/arm/mach-s3c2440/mach-rx1950.c
index 86bbc233b31c..27ea95096fe1 100644
--- a/arch/arm/mach-s3c2440/mach-rx1950.c
+++ b/arch/arm/mach-s3c2440/mach-rx1950.c
@@ -263,27 +263,78 @@ void rx1950_disable_charger(void)
263 gpio_direction_output(S3C2410_GPJ(3), 0); 263 gpio_direction_output(S3C2410_GPJ(3), 0);
264} 264}
265 265
266DEFINE_SPINLOCK(rx1950_blink_spin);
267
268static int rx1950_led_blink_set(unsigned gpio, int state,
269 unsigned long *delay_on, unsigned long *delay_off)
270{
271 int blink_gpio, check_gpio;
272
273 switch (gpio) {
274 case S3C2410_GPA(6):
275 blink_gpio = S3C2410_GPA(4);
276 check_gpio = S3C2410_GPA(3);
277 break;
278 case S3C2410_GPA(7):
279 blink_gpio = S3C2410_GPA(3);
280 check_gpio = S3C2410_GPA(4);
281 break;
282 default:
283 return -EINVAL;
284 break;
285 }
286
287 if (delay_on && delay_off && !*delay_on && !*delay_off)
288 *delay_on = *delay_off = 500;
289
290 spin_lock(&rx1950_blink_spin);
291
292 switch (state) {
293 case GPIO_LED_NO_BLINK_LOW:
294 case GPIO_LED_NO_BLINK_HIGH:
295 if (!gpio_get_value(check_gpio))
296 gpio_set_value(S3C2410_GPJ(6), 0);
297 gpio_set_value(blink_gpio, 0);
298 gpio_set_value(gpio, state);
299 break;
300 case GPIO_LED_BLINK:
301 gpio_set_value(gpio, 0);
302 gpio_set_value(S3C2410_GPJ(6), 1);
303 gpio_set_value(blink_gpio, 1);
304 break;
305 }
306
307 spin_unlock(&rx1950_blink_spin);
308
309 return 0;
310}
311
266static struct gpio_led rx1950_leds_desc[] = { 312static struct gpio_led rx1950_leds_desc[] = {
267 { 313 {
268 .name = "Green", 314 .name = "Green",
269 .default_trigger = "main-battery-charging-or-full", 315 .default_trigger = "main-battery-full",
270 .gpio = S3C2410_GPA(6), 316 .gpio = S3C2410_GPA(6),
317 .retain_state_suspended = 1,
271 }, 318 },
272 { 319 {
273 .name = "Red", 320 .name = "Red",
274 .default_trigger = "main-battery-full", 321 .default_trigger
275 .gpio = S3C2410_GPA(7), 322 = "main-battery-charging-blink-full-solid",
323 .gpio = S3C2410_GPA(7),
324 .retain_state_suspended = 1,
276 }, 325 },
277 { 326 {
278 .name = "Blue", 327 .name = "Blue",
279 .default_trigger = "rx1950-acx-mem", 328 .default_trigger = "rx1950-acx-mem",
280 .gpio = S3C2410_GPA(11), 329 .gpio = S3C2410_GPA(11),
330 .retain_state_suspended = 1,
281 }, 331 },
282}; 332};
283 333
284static struct gpio_led_platform_data rx1950_leds_pdata = { 334static struct gpio_led_platform_data rx1950_leds_pdata = {
285 .num_leds = ARRAY_SIZE(rx1950_leds_desc), 335 .num_leds = ARRAY_SIZE(rx1950_leds_desc),
286 .leds = rx1950_leds_desc, 336 .leds = rx1950_leds_desc,
337 .gpio_blink_set = rx1950_led_blink_set,
287}; 338};
288 339
289static struct platform_device rx1950_leds = { 340static struct platform_device rx1950_leds = {
@@ -752,6 +803,13 @@ static void __init rx1950_init_machine(void)
752 803
753 WARN_ON(gpio_request(S3C2410_GPB(1), "LCD power")); 804 WARN_ON(gpio_request(S3C2410_GPB(1), "LCD power"));
754 805
806 WARN_ON(gpio_request(S3C2410_GPA(3), "Red blink"));
807 WARN_ON(gpio_request(S3C2410_GPA(4), "Green blink"));
808 WARN_ON(gpio_request(S3C2410_GPJ(6), "LED blink"));
809 gpio_direction_output(S3C2410_GPA(3), 0);
810 gpio_direction_output(S3C2410_GPA(4), 0);
811 gpio_direction_output(S3C2410_GPJ(6), 0);
812
755 platform_add_devices(rx1950_devices, ARRAY_SIZE(rx1950_devices)); 813 platform_add_devices(rx1950_devices, ARRAY_SIZE(rx1950_devices));
756 814
757 i2c_register_board_info(0, rx1950_i2c_devices, 815 i2c_register_board_info(0, rx1950_i2c_devices,
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
index 4d6dd4c39b75..c44886062f8e 100644
--- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -104,6 +104,8 @@ struct stedma40_half_channel_info {
104 * 104 *
105 * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH 105 * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
106 * @high_priority: true if high-priority 106 * @high_priority: true if high-priority
107 * @realtime: true if realtime mode is to be enabled. Only available on DMA40
108 * version 3+, i.e DB8500v2+
107 * @mode: channel mode: physical, logical, or operation 109 * @mode: channel mode: physical, logical, or operation
108 * @mode_opt: options for the chosen channel mode 110 * @mode_opt: options for the chosen channel mode
109 * @src_dev_type: Src device type 111 * @src_dev_type: Src device type
@@ -119,6 +121,7 @@ struct stedma40_half_channel_info {
119struct stedma40_chan_cfg { 121struct stedma40_chan_cfg {
120 enum stedma40_xfer_dir dir; 122 enum stedma40_xfer_dir dir;
121 bool high_priority; 123 bool high_priority;
124 bool realtime;
122 enum stedma40_mode mode; 125 enum stedma40_mode mode;
123 enum stedma40_mode_opt mode_opt; 126 enum stedma40_mode_opt mode_opt;
124 int src_dev_type; 127 int src_dev_type;
@@ -169,25 +172,6 @@ struct stedma40_platform_data {
169bool stedma40_filter(struct dma_chan *chan, void *data); 172bool stedma40_filter(struct dma_chan *chan, void *data);
170 173
171/** 174/**
172 * stedma40_memcpy_sg() - extension of the dma framework, memcpy to/from
173 * scattergatter lists.
174 *
175 * @chan: dmaengine handle
176 * @sgl_dst: Destination scatter list
177 * @sgl_src: Source scatter list
178 * @sgl_len: The length of each scatterlist. Both lists must be of equal length
179 * and each element must match the corresponding element in the other scatter
180 * list.
181 * @flags: is actually enum dma_ctrl_flags. See dmaengine.h
182 */
183
184struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
185 struct scatterlist *sgl_dst,
186 struct scatterlist *sgl_src,
187 unsigned int sgl_len,
188 unsigned long flags);
189
190/**
191 * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave 175 * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
192 * (=device) 176 * (=device)
193 * 177 *
diff --git a/arch/avr32/include/asm/types.h b/arch/avr32/include/asm/types.h
index 9cefda6f534a..72667a3b1af7 100644
--- a/arch/avr32/include/asm/types.h
+++ b/arch/avr32/include/asm/types.h
@@ -23,14 +23,6 @@ typedef unsigned short umode_t;
23 23
24#define BITS_PER_LONG 32 24#define BITS_PER_LONG 32
25 25
26#ifndef __ASSEMBLY__
27
28/* Dma addresses are 32-bits wide. */
29
30typedef u32 dma_addr_t;
31
32#endif /* __ASSEMBLY__ */
33
34#endif /* __KERNEL__ */ 26#endif /* __KERNEL__ */
35 27
36 28
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index e67c99945428..bfc9d071db9b 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -2048,6 +2048,11 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data,
2048 rx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; 2048 rx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT;
2049 rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3); 2049 rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3);
2050 rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); 2050 rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
2051 rx_dws->src_master = 0;
2052 rx_dws->dst_master = 1;
2053 rx_dws->src_msize = DW_DMA_MSIZE_1;
2054 rx_dws->dst_msize = DW_DMA_MSIZE_1;
2055 rx_dws->fc = DW_DMA_FC_D_P2M;
2051 } 2056 }
2052 2057
2053 /* Check if DMA slave interface for playback should be configured. */ 2058 /* Check if DMA slave interface for playback should be configured. */
@@ -2056,6 +2061,11 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data,
2056 tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; 2061 tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT;
2057 tx_dws->cfg_hi = DWC_CFGH_DST_PER(4); 2062 tx_dws->cfg_hi = DWC_CFGH_DST_PER(4);
2058 tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); 2063 tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
2064 tx_dws->src_master = 0;
2065 tx_dws->dst_master = 1;
2066 tx_dws->src_msize = DW_DMA_MSIZE_1;
2067 tx_dws->dst_msize = DW_DMA_MSIZE_1;
2068 tx_dws->fc = DW_DMA_FC_D_M2P;
2059 } 2069 }
2060 2070
2061 if (platform_device_add_data(pdev, data, 2071 if (platform_device_add_data(pdev, data,
@@ -2128,6 +2138,11 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data)
2128 dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT; 2138 dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
2129 dws->cfg_hi = DWC_CFGH_DST_PER(2); 2139 dws->cfg_hi = DWC_CFGH_DST_PER(2);
2130 dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); 2140 dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
2141 dws->src_master = 0;
2142 dws->dst_master = 1;
2143 dws->src_msize = DW_DMA_MSIZE_1;
2144 dws->dst_msize = DW_DMA_MSIZE_1;
2145 dws->fc = DW_DMA_FC_D_M2P;
2131 2146
2132 if (platform_device_add_data(pdev, data, 2147 if (platform_device_add_data(pdev, data,
2133 sizeof(struct atmel_abdac_pdata))) 2148 sizeof(struct atmel_abdac_pdata)))
diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h
index 91776069ca80..29b74a105830 100644
--- a/arch/cris/include/asm/thread_info.h
+++ b/arch/cris/include/asm/thread_info.h
@@ -68,7 +68,7 @@ struct thread_info {
68#define init_thread_info (init_thread_union.thread_info) 68#define init_thread_info (init_thread_union.thread_info)
69 69
70/* thread information allocation */ 70/* thread information allocation */
71#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) 71#define alloc_thread_info(tsk, node) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
72#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) 72#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
73 73
74#endif /* !__ASSEMBLY__ */ 74#endif /* !__ASSEMBLY__ */
diff --git a/arch/cris/include/asm/types.h b/arch/cris/include/asm/types.h
index 5790262cbe8a..44055087c932 100644
--- a/arch/cris/include/asm/types.h
+++ b/arch/cris/include/asm/types.h
@@ -18,9 +18,6 @@ typedef unsigned short umode_t;
18 18
19#ifndef __ASSEMBLY__ 19#ifndef __ASSEMBLY__
20 20
21/* Dma addresses are 32-bits wide, just like our other addresses. */
22
23typedef u32 dma_addr_t;
24typedef u32 dma64_addr_t; 21typedef u32 dma64_addr_t;
25 22
26#endif /* __ASSEMBLY__ */ 23#endif /* __ASSEMBLY__ */
diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h
index 3744f2e47f48..4b789ab182b0 100644
--- a/arch/frv/include/asm/processor.h
+++ b/arch/frv/include/asm/processor.h
@@ -137,7 +137,7 @@ unsigned long get_wchan(struct task_struct *p);
137#define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp) 137#define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp)
138 138
139/* Allocation and freeing of basic task resources. */ 139/* Allocation and freeing of basic task resources. */
140extern struct task_struct *alloc_task_struct(void); 140extern struct task_struct *alloc_task_struct_node(int node);
141extern void free_task_struct(struct task_struct *p); 141extern void free_task_struct(struct task_struct *p);
142 142
143#define cpu_relax() barrier() 143#define cpu_relax() barrier()
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
index 11f33ead29bf..8582e9c7531c 100644
--- a/arch/frv/include/asm/thread_info.h
+++ b/arch/frv/include/asm/thread_info.h
@@ -84,16 +84,11 @@ register struct thread_info *__current_thread_info asm("gr15");
84 84
85/* thread information allocation */ 85/* thread information allocation */
86#ifdef CONFIG_DEBUG_STACK_USAGE 86#ifdef CONFIG_DEBUG_STACK_USAGE
87#define alloc_thread_info(tsk) \ 87#define alloc_thread_info_node(tsk, node) \
88 ({ \ 88 kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
89 struct thread_info *ret; \
90 \
91 ret = kzalloc(THREAD_SIZE, GFP_KERNEL); \
92 \
93 ret; \
94 })
95#else 89#else
96#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) 90#define alloc_thread_info_node(tsk) \
91 kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
97#endif 92#endif
98 93
99#define free_thread_info(info) kfree(info) 94#define free_thread_info(info) kfree(info)
diff --git a/arch/frv/include/asm/types.h b/arch/frv/include/asm/types.h
index 613bf1e962f0..aa3e7fdc7f29 100644
--- a/arch/frv/include/asm/types.h
+++ b/arch/frv/include/asm/types.h
@@ -27,14 +27,6 @@ typedef unsigned short umode_t;
27 27
28#define BITS_PER_LONG 32 28#define BITS_PER_LONG 32
29 29
30#ifndef __ASSEMBLY__
31
32/* Dma addresses are 32-bits wide. */
33
34typedef u32 dma_addr_t;
35
36#endif /* __ASSEMBLY__ */
37
38#endif /* __KERNEL__ */ 30#endif /* __KERNEL__ */
39 31
40#endif /* _ASM_TYPES_H */ 32#endif /* _ASM_TYPES_H */
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index efad12071c2e..9d3597526467 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -44,9 +44,10 @@ asmlinkage void ret_from_fork(void);
44void (*pm_power_off)(void); 44void (*pm_power_off)(void);
45EXPORT_SYMBOL(pm_power_off); 45EXPORT_SYMBOL(pm_power_off);
46 46
47struct task_struct *alloc_task_struct(void) 47struct task_struct *alloc_task_struct_node(int node)
48{ 48{
49 struct task_struct *p = kmalloc(THREAD_SIZE, GFP_KERNEL); 49 struct task_struct *p = kmalloc_node(THREAD_SIZE, GFP_KERNEL, node);
50
50 if (p) 51 if (p)
51 atomic_set((atomic_t *)(p+1), 1); 52 atomic_set((atomic_t *)(p+1), 1);
52 return p; 53 return p;
diff --git a/arch/h8300/include/asm/types.h b/arch/h8300/include/asm/types.h
index 12875190b156..bb2c91a3522e 100644
--- a/arch/h8300/include/asm/types.h
+++ b/arch/h8300/include/asm/types.h
@@ -22,10 +22,6 @@ typedef unsigned short umode_t;
22 22
23#define BITS_PER_LONG 32 23#define BITS_PER_LONG 32
24 24
25/* Dma addresses are 32-bits wide. */
26
27typedef u32 dma_addr_t;
28
29#endif /* __KERNEL__ */ 25#endif /* __KERNEL__ */
30 26
31#endif /* __ASSEMBLY__ */ 27#endif /* __ASSEMBLY__ */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index b6a5ba2aca34..6392908e8f98 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -59,11 +59,12 @@ struct thread_info {
59#ifndef ASM_OFFSETS_C 59#ifndef ASM_OFFSETS_C
60/* how to get the thread information struct from C */ 60/* how to get the thread information struct from C */
61#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) 61#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
62#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) 62#define alloc_thread_info_node(tsk, node) \
63 ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
63#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) 64#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
64#else 65#else
65#define current_thread_info() ((struct thread_info *) 0) 66#define current_thread_info() ((struct thread_info *) 0)
66#define alloc_thread_info(tsk) ((struct thread_info *) 0) 67#define alloc_thread_info_node(tsk, node) ((struct thread_info *) 0)
67#define task_thread_info(tsk) ((struct thread_info *) 0) 68#define task_thread_info(tsk) ((struct thread_info *) 0)
68#endif 69#endif
69#define free_thread_info(ti) /* nothing */ 70#define free_thread_info(ti) /* nothing */
@@ -84,7 +85,14 @@ struct thread_info {
84#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET) 85#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
85 86
86#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 87#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
87#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) 88#define alloc_task_struct_node(node) \
89({ \
90 struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP, \
91 KERNEL_STACK_SIZE_ORDER); \
92 struct task_struct *ret = page ? page_address(page) : NULL; \
93 \
94 ret;
95})
88#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) 96#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
89 97
90#endif /* !__ASSEMBLY */ 98#endif /* !__ASSEMBLY */
diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h
index 93773fd37be0..82b3939d2718 100644
--- a/arch/ia64/include/asm/types.h
+++ b/arch/ia64/include/asm/types.h
@@ -40,9 +40,6 @@ struct fnptr {
40 unsigned long gp; 40 unsigned long gp;
41}; 41};
42 42
43/* DMA addresses are 64-bits wide, in general. */
44typedef u64 dma_addr_t;
45
46# endif /* __KERNEL__ */ 43# endif /* __KERNEL__ */
47#endif /* !__ASSEMBLY__ */ 44#endif /* !__ASSEMBLY__ */
48 45
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index 71faff5bcc27..0227dba44068 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -96,16 +96,11 @@ static inline struct thread_info *current_thread_info(void)
96 96
97/* thread information allocation */ 97/* thread information allocation */
98#ifdef CONFIG_DEBUG_STACK_USAGE 98#ifdef CONFIG_DEBUG_STACK_USAGE
99#define alloc_thread_info(tsk) \ 99#define alloc_thread_info_node(tsk, node) \
100 ({ \ 100 kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
101 struct thread_info *ret; \
102 \
103 ret = kzalloc(THREAD_SIZE, GFP_KERNEL); \
104 \
105 ret; \
106 })
107#else 101#else
108#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) 102#define alloc_thread_info_node(tsk, node) \
103 kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
109#endif 104#endif
110 105
111#define free_thread_info(info) kfree(info) 106#define free_thread_info(info) kfree(info)
diff --git a/arch/m32r/include/asm/types.h b/arch/m32r/include/asm/types.h
index bc9f7fff0ac3..fd84b4898e30 100644
--- a/arch/m32r/include/asm/types.h
+++ b/arch/m32r/include/asm/types.h
@@ -18,9 +18,6 @@ typedef unsigned short umode_t;
18 18
19#ifndef __ASSEMBLY__ 19#ifndef __ASSEMBLY__
20 20
21/* DMA addresses are 32-bits wide. */
22
23typedef u32 dma_addr_t;
24typedef u64 dma64_addr_t; 21typedef u64 dma64_addr_t;
25 22
26#endif /* __ASSEMBLY__ */ 23#endif /* __ASSEMBLY__ */
diff --git a/arch/m68k/include/asm/types.h b/arch/m68k/include/asm/types.h
index 6441cb5f8e7c..10ad92f1c173 100644
--- a/arch/m68k/include/asm/types.h
+++ b/arch/m68k/include/asm/types.h
@@ -25,9 +25,6 @@ typedef unsigned short umode_t;
25 25
26#ifndef __ASSEMBLY__ 26#ifndef __ASSEMBLY__
27 27
28/* DMA addresses are always 32-bits wide */
29
30typedef u32 dma_addr_t;
31typedef u32 dma64_addr_t; 28typedef u32 dma64_addr_t;
32 29
33#endif /* __ASSEMBLY__ */ 30#endif /* __ASSEMBLY__ */
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index d309556cacf8..d71160de4d10 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -88,9 +88,11 @@ register struct thread_info *__current_thread_info __asm__("$28");
88#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 88#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
89 89
90#ifdef CONFIG_DEBUG_STACK_USAGE 90#ifdef CONFIG_DEBUG_STACK_USAGE
91#define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL) 91#define alloc_thread_info_node(tsk, node) \
92 kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
92#else 93#else
93#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) 94#define alloc_thread_info_node(tsk, node) \
95 kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
94#endif 96#endif
95 97
96#define free_thread_info(info) kfree(info) 98#define free_thread_info(info) kfree(info)
diff --git a/arch/mips/include/asm/types.h b/arch/mips/include/asm/types.h
index 544a2854598f..9520dc894989 100644
--- a/arch/mips/include/asm/types.h
+++ b/arch/mips/include/asm/types.h
@@ -33,12 +33,6 @@ typedef unsigned short umode_t;
33#ifdef __KERNEL__ 33#ifdef __KERNEL__
34#ifndef __ASSEMBLY__ 34#ifndef __ASSEMBLY__
35 35
36#if (defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) \
37 || defined(CONFIG_64BIT)
38typedef u64 dma_addr_t;
39#else
40typedef u32 dma_addr_t;
41#endif
42typedef u64 dma64_addr_t; 36typedef u64 dma64_addr_t;
43 37
44/* 38/*
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index aa07a4a5d794..8d53f09c878d 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -124,9 +124,11 @@ static inline unsigned long current_stack_pointer(void)
124 124
125/* thread information allocation */ 125/* thread information allocation */
126#ifdef CONFIG_DEBUG_STACK_USAGE 126#ifdef CONFIG_DEBUG_STACK_USAGE
127#define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL) 127#define alloc_thread_info_node(tsk, node) \
128 kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
128#else 129#else
129#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) 130#define alloc_thread_info_node(tsk, node) \
131 kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
130#endif 132#endif
131 133
132#define free_thread_info(ti) kfree((ti)) 134#define free_thread_info(ti) kfree((ti))
diff --git a/arch/mn10300/include/asm/types.h b/arch/mn10300/include/asm/types.h
index 7b9f01042fd4..c1833eb192e3 100644
--- a/arch/mn10300/include/asm/types.h
+++ b/arch/mn10300/include/asm/types.h
@@ -26,13 +26,6 @@ typedef unsigned short umode_t;
26 26
27#define BITS_PER_LONG 32 27#define BITS_PER_LONG 32
28 28
29#ifndef __ASSEMBLY__
30
31/* Dma addresses are 32-bits wide. */
32typedef u32 dma_addr_t;
33
34#endif /* __ASSEMBLY__ */
35
36#endif /* __KERNEL__ */ 29#endif /* __KERNEL__ */
37 30
38#endif /* _ASM_TYPES_H */ 31#endif /* _ASM_TYPES_H */
diff --git a/arch/parisc/include/asm/types.h b/arch/parisc/include/asm/types.h
index 20135cc80039..bc164ddffb78 100644
--- a/arch/parisc/include/asm/types.h
+++ b/arch/parisc/include/asm/types.h
@@ -16,9 +16,6 @@ typedef unsigned short umode_t;
16 16
17#ifndef __ASSEMBLY__ 17#ifndef __ASSEMBLY__
18 18
19/* Dma addresses are 32-bits wide. */
20
21typedef u32 dma_addr_t;
22typedef u64 dma64_addr_t; 19typedef u64 dma64_addr_t;
23 20
24#endif /* __ASSEMBLY__ */ 21#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 65eb85976a03..d8529ef13b23 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -72,7 +72,7 @@ struct thread_info {
72 72
73#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 73#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
74 74
75extern struct thread_info *alloc_thread_info(struct task_struct *tsk); 75extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node);
76extern void free_thread_info(struct thread_info *ti); 76extern void free_thread_info(struct thread_info *ti);
77 77
78#endif /* THREAD_SHIFT < PAGE_SHIFT */ 78#endif /* THREAD_SHIFT < PAGE_SHIFT */
diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h
index a5aea0ca34e9..e16a6b2d96f1 100644
--- a/arch/powerpc/include/asm/types.h
+++ b/arch/powerpc/include/asm/types.h
@@ -44,11 +44,6 @@ typedef struct {
44 44
45typedef __vector128 vector128; 45typedef __vector128 vector128;
46 46
47#if defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT)
48typedef u64 dma_addr_t;
49#else
50typedef u32 dma_addr_t;
51#endif
52typedef u64 dma64_addr_t; 47typedef u64 dma64_addr_t;
53 48
54typedef struct { 49typedef struct {
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8303a6c65ef7..f74f355a9617 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1218,11 +1218,11 @@ void __ppc64_runlatch_off(void)
1218 1218
1219static struct kmem_cache *thread_info_cache; 1219static struct kmem_cache *thread_info_cache;
1220 1220
1221struct thread_info *alloc_thread_info(struct task_struct *tsk) 1221struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
1222{ 1222{
1223 struct thread_info *ti; 1223 struct thread_info *ti;
1224 1224
1225 ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); 1225 ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
1226 if (unlikely(ti == NULL)) 1226 if (unlikely(ti == NULL))
1227 return NULL; 1227 return NULL;
1228#ifdef CONFIG_DEBUG_STACK_USAGE 1228#ifdef CONFIG_DEBUG_STACK_USAGE
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index 04d6b95a89c6..f7f6ae6bed8f 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -31,12 +31,6 @@ typedef __signed__ long saddr_t;
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
32 32
33typedef u64 dma64_addr_t; 33typedef u64 dma64_addr_t;
34#ifdef __s390x__
35/* DMA addresses come in 32-bit and 64-bit flavours. */
36typedef u64 dma_addr_t;
37#else
38typedef u32 dma_addr_t;
39#endif
40 34
41#ifndef __s390x__ 35#ifndef __s390x__
42typedef union { 36typedef union {
diff --git a/arch/score/include/asm/thread_info.h b/arch/score/include/asm/thread_info.h
index 8570d08f58c1..2205c62284db 100644
--- a/arch/score/include/asm/thread_info.h
+++ b/arch/score/include/asm/thread_info.h
@@ -71,7 +71,7 @@ struct thread_info {
71register struct thread_info *__current_thread_info __asm__("r28"); 71register struct thread_info *__current_thread_info __asm__("r28");
72#define current_thread_info() __current_thread_info 72#define current_thread_info() __current_thread_info
73 73
74#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) 74#define alloc_thread_info_node(tsk, node) kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
75#define free_thread_info(info) kfree(info) 75#define free_thread_info(info) kfree(info)
76 76
77#endif /* !__ASSEMBLY__ */ 77#endif /* !__ASSEMBLY__ */
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index c228946926ed..ea2d5089de1e 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -95,7 +95,7 @@ static inline struct thread_info *current_thread_info(void)
95 95
96#endif 96#endif
97 97
98extern struct thread_info *alloc_thread_info(struct task_struct *tsk); 98extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node);
99extern void free_thread_info(struct thread_info *ti); 99extern void free_thread_info(struct thread_info *ti);
100extern void arch_task_cache_init(void); 100extern void arch_task_cache_init(void);
101#define arch_task_cache_init arch_task_cache_init 101#define arch_task_cache_init arch_task_cache_init
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index dcb126dc76fd..f39ad57296b7 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -32,16 +32,16 @@ void free_thread_xstate(struct task_struct *tsk)
32#if THREAD_SHIFT < PAGE_SHIFT 32#if THREAD_SHIFT < PAGE_SHIFT
33static struct kmem_cache *thread_info_cache; 33static struct kmem_cache *thread_info_cache;
34 34
35struct thread_info *alloc_thread_info(struct task_struct *tsk) 35struct thread_info *alloc_thread_info(struct task_struct *tsk, int node)
36{ 36{
37 struct thread_info *ti; 37 struct thread_info *ti;
38
39 ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
40 if (unlikely(ti == NULL))
41 return NULL;
42#ifdef CONFIG_DEBUG_STACK_USAGE 38#ifdef CONFIG_DEBUG_STACK_USAGE
43 memset(ti, 0, THREAD_SIZE); 39 gfp_t mask = GFP_KERNEL | __GFP_ZERO;
40#else
41 gfp_t mask = GFP_KERNEL;
44#endif 42#endif
43
44 ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
45 return ti; 45 return ti;
46} 46}
47 47
@@ -64,7 +64,9 @@ struct thread_info *alloc_thread_info(struct task_struct *tsk)
64#else 64#else
65 gfp_t mask = GFP_KERNEL; 65 gfp_t mask = GFP_KERNEL;
66#endif 66#endif
67 return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER); 67 struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
68
69 return page ? page_address(page) : NULL;
68} 70}
69 71
70void free_thread_info(struct thread_info *ti) 72void free_thread_info(struct thread_info *ti)
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index 9dd0318d3ddf..fa5753233410 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -82,8 +82,8 @@ register struct thread_info *current_thread_info_reg asm("g6");
82 82
83#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 83#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
84 84
85BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void) 85BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info_node, int)
86#define alloc_thread_info(tsk) BTFIXUP_CALL(alloc_thread_info)() 86#define alloc_thread_info_node(tsk, node) BTFIXUP_CALL(alloc_thread_info_node)(node)
87 87
88BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *) 88BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
89#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti) 89#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
@@ -92,7 +92,7 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
92 92
93/* 93/*
94 * Size of kernel stack for each process. 94 * Size of kernel stack for each process.
95 * Observe the order of get_free_pages() in alloc_thread_info(). 95 * Observe the order of get_free_pages() in alloc_thread_info_node().
96 * The sun4 has 8K stack too, because it's short on memory, and 16K is a waste. 96 * The sun4 has 8K stack too, because it's short on memory, and 16K is a waste.
97 */ 97 */
98#define THREAD_SIZE 8192 98#define THREAD_SIZE 8192
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index fb2ea7705a46..60d86be1a533 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -146,21 +146,21 @@ register struct thread_info *current_thread_info_reg asm("g6");
146#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 146#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
147 147
148#ifdef CONFIG_DEBUG_STACK_USAGE 148#ifdef CONFIG_DEBUG_STACK_USAGE
149#define alloc_thread_info(tsk) \ 149#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
150({ \
151 struct thread_info *ret; \
152 \
153 ret = (struct thread_info *) \
154 __get_free_pages(GFP_KERNEL, __THREAD_INFO_ORDER); \
155 if (ret) \
156 memset(ret, 0, PAGE_SIZE<<__THREAD_INFO_ORDER); \
157 ret; \
158})
159#else 150#else
160#define alloc_thread_info(tsk) \ 151#define THREAD_FLAGS (GFP_KERNEL)
161 ((struct thread_info *)__get_free_pages(GFP_KERNEL, __THREAD_INFO_ORDER))
162#endif 152#endif
163 153
154#define alloc_thread_info_node(tsk, node) \
155({ \
156 struct page *page = alloc_pages_node(node, THREAD_FLAGS, \
157 __THREAD_INFO_ORDER); \
158 struct thread_info *ret; \
159 \
160 ret = page ? page_address(page) : NULL; \
161 ret; \
162})
163
164#define free_thread_info(ti) \ 164#define free_thread_info(ti) \
165 free_pages((unsigned long)(ti),__THREAD_INFO_ORDER) 165 free_pages((unsigned long)(ti),__THREAD_INFO_ORDER)
166 166
diff --git a/arch/sparc/include/asm/types.h b/arch/sparc/include/asm/types.h
index 09c79a9c8516..f02d330cb9f1 100644
--- a/arch/sparc/include/asm/types.h
+++ b/arch/sparc/include/asm/types.h
@@ -22,10 +22,6 @@ typedef unsigned short umode_t;
22 22
23#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
24 24
25/* Dma addresses come in generic and 64-bit flavours. */
26
27typedef u32 dma_addr_t;
28
29#if defined(__arch64__) 25#if defined(__arch64__)
30 26
31/*** SPARC 64 bit ***/ 27/*** SPARC 64 bit ***/
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 92319aa8b662..fe09fd8be695 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -650,7 +650,7 @@ static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
650 * mappings on the kernel stack without any special code as we did 650 * mappings on the kernel stack without any special code as we did
651 * need on the sun4c. 651 * need on the sun4c.
652 */ 652 */
653static struct thread_info *srmmu_alloc_thread_info(void) 653static struct thread_info *srmmu_alloc_thread_info_node(int node)
654{ 654{
655 struct thread_info *ret; 655 struct thread_info *ret;
656 656
@@ -2271,7 +2271,7 @@ void __init ld_mmu_srmmu(void)
2271 2271
2272 BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); 2272 BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
2273 2273
2274 BTFIXUPSET_CALL(alloc_thread_info, srmmu_alloc_thread_info, BTFIXUPCALL_NORM); 2274 BTFIXUPSET_CALL(alloc_thread_info_node, srmmu_alloc_thread_info_node, BTFIXUPCALL_NORM);
2275 BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM); 2275 BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM);
2276 2276
2277 BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM); 2277 BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM);
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index b5137cc2aba3..a2350b5e68aa 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -922,7 +922,7 @@ static inline void garbage_collect(int entry)
922 free_locked_segment(BUCKET_ADDR(entry)); 922 free_locked_segment(BUCKET_ADDR(entry));
923} 923}
924 924
925static struct thread_info *sun4c_alloc_thread_info(void) 925static struct thread_info *sun4c_alloc_thread_info_node(int node)
926{ 926{
927 unsigned long addr, pages; 927 unsigned long addr, pages;
928 int entry; 928 int entry;
@@ -2155,7 +2155,7 @@ void __init ld_mmu_sun4c(void)
2155 BTFIXUPSET_CALL(__swp_offset, sun4c_swp_offset, BTFIXUPCALL_NORM); 2155 BTFIXUPSET_CALL(__swp_offset, sun4c_swp_offset, BTFIXUPCALL_NORM);
2156 BTFIXUPSET_CALL(__swp_entry, sun4c_swp_entry, BTFIXUPCALL_NORM); 2156 BTFIXUPSET_CALL(__swp_entry, sun4c_swp_entry, BTFIXUPCALL_NORM);
2157 2157
2158 BTFIXUPSET_CALL(alloc_thread_info, sun4c_alloc_thread_info, BTFIXUPCALL_NORM); 2158 BTFIXUPSET_CALL(alloc_thread_info_node, sun4c_alloc_thread_info_node, BTFIXUPCALL_NORM);
2159 BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM); 2159 BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM);
2160 2160
2161 BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM); 2161 BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM);
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index 9e8e9c4dfa2a..3405b52853b8 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -84,7 +84,7 @@ register unsigned long stack_pointer __asm__("sp");
84 ((struct thread_info *)(stack_pointer & -THREAD_SIZE)) 84 ((struct thread_info *)(stack_pointer & -THREAD_SIZE))
85 85
86#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 86#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
87extern struct thread_info *alloc_thread_info(struct task_struct *task); 87extern struct thread_info *alloc_thread_info_node(struct task_struct *task, int node);
88extern void free_thread_info(struct thread_info *info); 88extern void free_thread_info(struct thread_info *info);
89 89
90/* Sit on a nap instruction until interrupted. */ 90/* Sit on a nap instruction until interrupted. */
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index b9cd962e1d30..d0065103eb7b 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -109,7 +109,7 @@ void cpu_idle(void)
109 } 109 }
110} 110}
111 111
112struct thread_info *alloc_thread_info(struct task_struct *task) 112struct thread_info *alloc_thread_info_node(struct task_struct *task, int node)
113{ 113{
114 struct page *page; 114 struct page *page;
115 gfp_t flags = GFP_KERNEL; 115 gfp_t flags = GFP_KERNEL;
@@ -118,7 +118,7 @@ struct thread_info *alloc_thread_info(struct task_struct *task)
118 flags |= __GFP_ZERO; 118 flags |= __GFP_ZERO;
119#endif 119#endif
120 120
121 page = alloc_pages(flags, THREAD_SIZE_ORDER); 121 page = alloc_pages_node(node, flags, THREAD_SIZE_ORDER);
122 if (!page) 122 if (!page)
123 return NULL; 123 return NULL;
124 124
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 050e4ddbbb65..35dd0b86401a 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -255,8 +255,8 @@ static const struct {
255 { KDSIGACCEPT, KERN_INFO, "KDSIGACCEPT" }, 255 { KDSIGACCEPT, KERN_INFO, "KDSIGACCEPT" },
256}; 256};
257 257
258int line_ioctl(struct tty_struct *tty, struct file * file, 258int line_ioctl(struct tty_struct *tty, unsigned int cmd,
259 unsigned int cmd, unsigned long arg) 259 unsigned long arg)
260{ 260{
261 int ret; 261 int ret;
262 int i; 262 int i;
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index bed668824b5f..d1d1b0d8a0cd 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -66,7 +66,7 @@ struct thread_struct {
66 .request = { 0 } \ 66 .request = { 0 } \
67} 67}
68 68
69extern struct task_struct *alloc_task_struct(void); 69extern struct task_struct *alloc_task_struct_node(int node);
70 70
71static inline void release_thread(struct task_struct *task) 71static inline void release_thread(struct task_struct *task)
72{ 72{
diff --git a/arch/um/include/shared/line.h b/arch/um/include/shared/line.h
index 311a0d3d93af..72f4f25af247 100644
--- a/arch/um/include/shared/line.h
+++ b/arch/um/include/shared/line.h
@@ -77,8 +77,8 @@ extern int line_chars_in_buffer(struct tty_struct *tty);
77extern void line_flush_buffer(struct tty_struct *tty); 77extern void line_flush_buffer(struct tty_struct *tty);
78extern void line_flush_chars(struct tty_struct *tty); 78extern void line_flush_chars(struct tty_struct *tty);
79extern int line_write_room(struct tty_struct *tty); 79extern int line_write_room(struct tty_struct *tty);
80extern int line_ioctl(struct tty_struct *tty, struct file * file, 80extern int line_ioctl(struct tty_struct *tty, unsigned int cmd,
81 unsigned int cmd, unsigned long arg); 81 unsigned long arg);
82extern void line_throttle(struct tty_struct *tty); 82extern void line_throttle(struct tty_struct *tty);
83extern void line_unthrottle(struct tty_struct *tty); 83extern void line_unthrottle(struct tty_struct *tty);
84 84
diff --git a/arch/um/sys-i386/asm/elf.h b/arch/um/sys-i386/asm/elf.h
index a979a22a8d9f..d964a4111ac6 100644
--- a/arch/um/sys-i386/asm/elf.h
+++ b/arch/um/sys-i386/asm/elf.h
@@ -75,6 +75,8 @@ typedef struct user_i387_struct elf_fpregset_t;
75 pr_reg[16] = PT_REGS_SS(regs); \ 75 pr_reg[16] = PT_REGS_SS(regs); \
76} while (0); 76} while (0);
77 77
78#define task_pt_regs(t) (&(t)->thread.regs)
79
78struct task_struct; 80struct task_struct;
79 81
80extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu); 82extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
diff --git a/arch/um/sys-x86_64/asm/elf.h b/arch/um/sys-x86_64/asm/elf.h
index d760967f33a7..d6d5af376251 100644
--- a/arch/um/sys-x86_64/asm/elf.h
+++ b/arch/um/sys-x86_64/asm/elf.h
@@ -95,6 +95,8 @@ typedef struct user_i387_struct elf_fpregset_t;
95 (pr_reg)[25] = 0; \ 95 (pr_reg)[25] = 0; \
96 (pr_reg)[26] = 0; 96 (pr_reg)[26] = 0;
97 97
98#define task_pt_regs(t) (&(t)->thread.regs)
99
98struct task_struct; 100struct task_struct;
99 101
100extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu); 102extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e1f65c46bc93..d57ddd7573cc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -123,7 +123,7 @@ config NEED_SG_DMA_LENGTH
123 def_bool y 123 def_bool y
124 124
125config GENERIC_ISA_DMA 125config GENERIC_ISA_DMA
126 def_bool y 126 def_bool ISA_DMA_API
127 127
128config GENERIC_IOMAP 128config GENERIC_IOMAP
129 def_bool y 129 def_bool y
@@ -143,7 +143,7 @@ config GENERIC_GPIO
143 bool 143 bool
144 144
145config ARCH_MAY_HAVE_PC_FDC 145config ARCH_MAY_HAVE_PC_FDC
146 def_bool y 146 def_bool ISA_DMA_API
147 147
148config RWSEM_GENERIC_SPINLOCK 148config RWSEM_GENERIC_SPINLOCK
149 def_bool !X86_XADD 149 def_bool !X86_XADD
@@ -2002,9 +2002,13 @@ source "drivers/pci/pcie/Kconfig"
2002 2002
2003source "drivers/pci/Kconfig" 2003source "drivers/pci/Kconfig"
2004 2004
2005# x86_64 have no ISA slots, but do have ISA-style DMA. 2005# x86_64 have no ISA slots, but can have ISA-style DMA.
2006config ISA_DMA_API 2006config ISA_DMA_API
2007 def_bool y 2007 bool "ISA-style DMA support" if (X86_64 && EXPERT)
2008 default y
2009 help
2010 Enables ISA-style DMA support for devices requiring such controllers.
2011 If unsure, say Y.
2008 2012
2009if X86_32 2013if X86_32
2010 2014
diff --git a/arch/x86/include/asm/dma.h b/arch/x86/include/asm/dma.h
index ca1098a7e580..97b6d8114a43 100644
--- a/arch/x86/include/asm/dma.h
+++ b/arch/x86/include/asm/dma.h
@@ -151,6 +151,7 @@
151#define DMA_AUTOINIT 0x10 151#define DMA_AUTOINIT 0x10
152 152
153 153
154#ifdef CONFIG_ISA_DMA_API
154extern spinlock_t dma_spin_lock; 155extern spinlock_t dma_spin_lock;
155 156
156static inline unsigned long claim_dma_lock(void) 157static inline unsigned long claim_dma_lock(void)
@@ -164,6 +165,7 @@ static inline void release_dma_lock(unsigned long flags)
164{ 165{
165 spin_unlock_irqrestore(&dma_spin_lock, flags); 166 spin_unlock_irqrestore(&dma_spin_lock, flags);
166} 167}
168#endif /* CONFIG_ISA_DMA_API */
167 169
168/* enable/disable a specific DMA channel */ 170/* enable/disable a specific DMA channel */
169static inline void enable_dma(unsigned int dmanr) 171static inline void enable_dma(unsigned int dmanr)
@@ -303,9 +305,11 @@ static inline int get_dma_residue(unsigned int dmanr)
303} 305}
304 306
305 307
306/* These are in kernel/dma.c: */ 308/* These are in kernel/dma.c because x86 uses CONFIG_GENERIC_ISA_DMA */
309#ifdef CONFIG_ISA_DMA_API
307extern int request_dma(unsigned int dmanr, const char *device_id); 310extern int request_dma(unsigned int dmanr, const char *device_id);
308extern void free_dma(unsigned int dmanr); 311extern void free_dma(unsigned int dmanr);
312#endif
309 313
310/* From PCI */ 314/* From PCI */
311 315
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index f0b6e5dbc5a0..1f2e61e28981 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -161,8 +161,14 @@ struct thread_info {
161 161
162#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR 162#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
163 163
164#define alloc_thread_info(tsk) \ 164#define alloc_thread_info_node(tsk, node) \
165 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER)) 165({ \
166 struct page *page = alloc_pages_node(node, THREAD_FLAGS, \
167 THREAD_ORDER); \
168 struct thread_info *ret = page ? page_address(page) : NULL; \
169 \
170 ret; \
171})
166 172
167#ifdef CONFIG_X86_32 173#ifdef CONFIG_X86_32
168 174
diff --git a/arch/x86/include/asm/types.h b/arch/x86/include/asm/types.h
index df1da20f4534..88102055a4b8 100644
--- a/arch/x86/include/asm/types.h
+++ b/arch/x86/include/asm/types.h
@@ -1,20 +1,12 @@
1#ifndef _ASM_X86_TYPES_H 1#ifndef _ASM_X86_TYPES_H
2#define _ASM_X86_TYPES_H 2#define _ASM_X86_TYPES_H
3 3
4#define dma_addr_t dma_addr_t
5
6#include <asm-generic/types.h> 4#include <asm-generic/types.h>
7 5
8#ifdef __KERNEL__ 6#ifdef __KERNEL__
9#ifndef __ASSEMBLY__ 7#ifndef __ASSEMBLY__
10 8
11typedef u64 dma64_addr_t; 9typedef u64 dma64_addr_t;
12#if defined(CONFIG_X86_64) || defined(CONFIG_HIGHMEM64G)
13/* DMA addresses come in 32-bit and 64-bit flavours. */
14typedef u64 dma_addr_t;
15#else
16typedef u32 dma_addr_t;
17#endif
18 10
19#endif /* __ASSEMBLY__ */ 11#endif /* __ASSEMBLY__ */
20#endif /* __KERNEL__ */ 12#endif /* __KERNEL__ */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 743642f1a36c..7338ef2218bc 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -41,7 +41,7 @@ obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
41obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o 41obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
42obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o 42obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
43obj-y += bootflag.o e820.o 43obj-y += bootflag.o e820.o
44obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o 44obj-y += pci-dma.o quirks.o topology.o kdebugfs.o
45obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o 45obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
46obj-y += tsc.o io_delay.o rtc.o 46obj-y += tsc.o io_delay.o rtc.o
47obj-y += pci-iommu_table.o 47obj-y += pci-iommu_table.o
@@ -55,6 +55,7 @@ obj-$(CONFIG_X86_32) += tls.o
55obj-$(CONFIG_IA32_EMULATION) += tls.o 55obj-$(CONFIG_IA32_EMULATION) += tls.o
56obj-y += step.o 56obj-y += step.o
57obj-$(CONFIG_INTEL_TXT) += tboot.o 57obj-$(CONFIG_INTEL_TXT) += tboot.o
58obj-$(CONFIG_ISA_DMA_API) += i8237.o
58obj-$(CONFIG_STACKTRACE) += stacktrace.o 59obj-$(CONFIG_STACKTRACE) += stacktrace.o
59obj-y += cpu/ 60obj-y += cpu/
60obj-y += acpi/ 61obj-y += acpi/
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 999e2793590b..81ac6c78c01c 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -322,16 +322,6 @@ void die(const char *str, struct pt_regs *regs, long err)
322 oops_end(flags, regs, sig); 322 oops_end(flags, regs, sig);
323} 323}
324 324
325static int __init oops_setup(char *s)
326{
327 if (!s)
328 return -EINVAL;
329 if (!strcmp(s, "panic"))
330 panic_on_oops = 1;
331 return 0;
332}
333early_param("oops", oops_setup);
334
335static int __init kstack_setup(char *s) 325static int __init kstack_setup(char *s)
336{ 326{
337 if (!s) 327 if (!s)
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 2d2673c28aff..5655c2272adb 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -77,9 +77,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
77 /* Make NULL pointers segfault */ 77 /* Make NULL pointers segfault */
78 zap_identity_mappings(); 78 zap_identity_mappings();
79 79
80 /* Cleanup the over mapped high alias */
81 cleanup_highmap();
82
83 max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; 80 max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;
84 81
85 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { 82 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 9d43b28e0728..32bd87cbf982 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -294,30 +294,11 @@ static void __init init_gbpages(void)
294 else 294 else
295 direct_gbpages = 0; 295 direct_gbpages = 0;
296} 296}
297
298static void __init cleanup_highmap_brk_end(void)
299{
300 pud_t *pud;
301 pmd_t *pmd;
302
303 mmu_cr4_features = read_cr4();
304
305 /*
306 * _brk_end cannot change anymore, but it and _end may be
307 * located on different 2M pages. cleanup_highmap(), however,
308 * can only consider _end when it runs, so destroy any
309 * mappings beyond _brk_end here.
310 */
311 pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
312 pmd = pmd_offset(pud, _brk_end - 1);
313 while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
314 pmd_clear(pmd);
315}
316#else 297#else
317static inline void init_gbpages(void) 298static inline void init_gbpages(void)
318{ 299{
319} 300}
320static inline void cleanup_highmap_brk_end(void) 301static void __init cleanup_highmap(void)
321{ 302{
322} 303}
323#endif 304#endif
@@ -330,8 +311,6 @@ static void __init reserve_brk(void)
330 /* Mark brk area as locked down and no longer taking any 311 /* Mark brk area as locked down and no longer taking any
331 new allocations */ 312 new allocations */
332 _brk_start = 0; 313 _brk_start = 0;
333
334 cleanup_highmap_brk_end();
335} 314}
336 315
337#ifdef CONFIG_BLK_DEV_INITRD 316#ifdef CONFIG_BLK_DEV_INITRD
@@ -950,6 +929,8 @@ void __init setup_arch(char **cmdline_p)
950 */ 929 */
951 reserve_brk(); 930 reserve_brk();
952 931
932 cleanup_highmap();
933
953 memblock.current_limit = get_max_mapped(); 934 memblock.current_limit = get_max_mapped();
954 memblock_x86_fill(); 935 memblock_x86_fill();
955 936
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 0aa34669ed3f..2362b646178e 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -52,6 +52,7 @@
52#include <asm/cacheflush.h> 52#include <asm/cacheflush.h>
53#include <asm/init.h> 53#include <asm/init.h>
54#include <asm/uv/uv.h> 54#include <asm/uv/uv.h>
55#include <asm/setup.h>
55 56
56static int __init parse_direct_gbpages_off(char *arg) 57static int __init parse_direct_gbpages_off(char *arg)
57{ 58{
@@ -294,18 +295,18 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
294 * to the compile time generated pmds. This results in invalid pmds up 295 * to the compile time generated pmds. This results in invalid pmds up
295 * to the point where we hit the physaddr 0 mapping. 296 * to the point where we hit the physaddr 0 mapping.
296 * 297 *
297 * We limit the mappings to the region from _text to _end. _end is 298 * We limit the mappings to the region from _text to _brk_end. _brk_end
298 * rounded up to the 2MB boundary. This catches the invalid pmds as 299 * is rounded up to the 2MB boundary. This catches the invalid pmds as
299 * well, as they are located before _text: 300 * well, as they are located before _text:
300 */ 301 */
301void __init cleanup_highmap(void) 302void __init cleanup_highmap(void)
302{ 303{
303 unsigned long vaddr = __START_KERNEL_map; 304 unsigned long vaddr = __START_KERNEL_map;
304 unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1; 305 unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
306 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
305 pmd_t *pmd = level2_kernel_pgt; 307 pmd_t *pmd = level2_kernel_pgt;
306 pmd_t *last_pmd = pmd + PTRS_PER_PMD;
307 308
308 for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { 309 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
309 if (pmd_none(*pmd)) 310 if (pmd_none(*pmd))
310 continue; 311 continue;
311 if (vaddr < (unsigned long) _text || vaddr > end) 312 if (vaddr < (unsigned long) _text || vaddr > end)
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 39ee7182fd18..c82df6c9c0f0 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1487,10 +1487,12 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1487 /* 1487 /*
1488 * If the new pfn is within the range of the newly allocated 1488 * If the new pfn is within the range of the newly allocated
1489 * kernel pagetable, and it isn't being mapped into an 1489 * kernel pagetable, and it isn't being mapped into an
1490 * early_ioremap fixmap slot, make sure it is RO. 1490 * early_ioremap fixmap slot as a freshly allocated page, make sure
1491 * it is RO.
1491 */ 1492 */
1492 if (!is_early_ioremap_ptep(ptep) && 1493 if (((!is_early_ioremap_ptep(ptep) &&
1493 pfn >= pgt_buf_start && pfn < pgt_buf_end) 1494 pfn >= pgt_buf_start && pfn < pgt_buf_end)) ||
1495 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
1494 pte = pte_wrprotect(pte); 1496 pte = pte_wrprotect(pte);
1495 1497
1496 return pte; 1498 return pte;
@@ -1700,9 +1702,6 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1700 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { 1702 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1701 pte_t pte; 1703 pte_t pte;
1702 1704
1703 if (pfn > max_pfn_mapped)
1704 max_pfn_mapped = pfn;
1705
1706 if (!pte_none(pte_page[pteidx])) 1705 if (!pte_none(pte_page[pteidx]))
1707 continue; 1706 continue;
1708 1707
@@ -1760,6 +1759,12 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1760 pud_t *l3; 1759 pud_t *l3;
1761 pmd_t *l2; 1760 pmd_t *l2;
1762 1761
1762 /* max_pfn_mapped is the last pfn mapped in the initial memory
1763 * mappings. Considering that on Xen after the kernel mappings we
1764 * have the mappings of some pages that don't exist in pfn space, we
1765 * set max_pfn_mapped to the last real pfn mapped. */
1766 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1767
1763 /* Zap identity mapping */ 1768 /* Zap identity mapping */
1764 init_level4_pgt[0] = __pgd(0); 1769 init_level4_pgt[0] = __pgd(0);
1765 1770
@@ -1864,9 +1869,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1864 initial_kernel_pmd = 1869 initial_kernel_pmd =
1865 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 1870 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1866 1871
1867 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + 1872 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1868 xen_start_info->nr_pt_frames * PAGE_SIZE +
1869 512*1024);
1870 1873
1871 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); 1874 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1872 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); 1875 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
diff --git a/arch/xtensa/include/asm/types.h b/arch/xtensa/include/asm/types.h
index c89569a8da0c..b1c981e39b52 100644
--- a/arch/xtensa/include/asm/types.h
+++ b/arch/xtensa/include/asm/types.h
@@ -32,10 +32,6 @@ typedef unsigned short umode_t;
32 32
33#define BITS_PER_LONG 32 33#define BITS_PER_LONG 32
34 34
35/* Dma addresses are 32-bits wide. */
36
37typedef u32 dma_addr_t;
38
39#endif /* __KERNEL__ */ 35#endif /* __KERNEL__ */
40#endif 36#endif
41 37
diff --git a/crypto/deflate.c b/crypto/deflate.c
index cbc7a33a9600..b5ccae29be74 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -48,7 +48,8 @@ static int deflate_comp_init(struct deflate_ctx *ctx)
48 int ret = 0; 48 int ret = 0;
49 struct z_stream_s *stream = &ctx->comp_stream; 49 struct z_stream_s *stream = &ctx->comp_stream;
50 50
51 stream->workspace = vzalloc(zlib_deflate_workspacesize()); 51 stream->workspace = vzalloc(zlib_deflate_workspacesize(
52 -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL));
52 if (!stream->workspace) { 53 if (!stream->workspace) {
53 ret = -ENOMEM; 54 ret = -ENOMEM;
54 goto out; 55 goto out;
diff --git a/crypto/zlib.c b/crypto/zlib.c
index 739b8fca4cea..d11d761a5e41 100644
--- a/crypto/zlib.c
+++ b/crypto/zlib.c
@@ -85,6 +85,7 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
85 struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm)); 85 struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
86 struct z_stream_s *stream = &ctx->comp_stream; 86 struct z_stream_s *stream = &ctx->comp_stream;
87 struct nlattr *tb[ZLIB_COMP_MAX + 1]; 87 struct nlattr *tb[ZLIB_COMP_MAX + 1];
88 int window_bits, mem_level;
88 size_t workspacesize; 89 size_t workspacesize;
89 int ret; 90 int ret;
90 91
@@ -94,7 +95,14 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
94 95
95 zlib_comp_exit(ctx); 96 zlib_comp_exit(ctx);
96 97
97 workspacesize = zlib_deflate_workspacesize(); 98 window_bits = tb[ZLIB_COMP_WINDOWBITS]
99 ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
100 : MAX_WBITS;
101 mem_level = tb[ZLIB_COMP_MEMLEVEL]
102 ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
103 : DEF_MEM_LEVEL;
104
105 workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
98 stream->workspace = vzalloc(workspacesize); 106 stream->workspace = vzalloc(workspacesize);
99 if (!stream->workspace) 107 if (!stream->workspace)
100 return -ENOMEM; 108 return -ENOMEM;
@@ -106,12 +114,8 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
106 tb[ZLIB_COMP_METHOD] 114 tb[ZLIB_COMP_METHOD]
107 ? nla_get_u32(tb[ZLIB_COMP_METHOD]) 115 ? nla_get_u32(tb[ZLIB_COMP_METHOD])
108 : Z_DEFLATED, 116 : Z_DEFLATED,
109 tb[ZLIB_COMP_WINDOWBITS] 117 window_bits,
110 ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS]) 118 mem_level,
111 : MAX_WBITS,
112 tb[ZLIB_COMP_MEMLEVEL]
113 ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
114 : DEF_MEM_LEVEL,
115 tb[ZLIB_COMP_STRATEGY] 119 tb[ZLIB_COMP_STRATEGY]
116 ? nla_get_u32(tb[ZLIB_COMP_STRATEGY]) 120 ? nla_get_u32(tb[ZLIB_COMP_STRATEGY])
117 : Z_DEFAULT_STRATEGY); 121 : Z_DEFAULT_STRATEGY);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 90f8f7676d1f..a18e497f1c3c 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -782,6 +782,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
782 782
783 if (acpi_video_backlight_support()) { 783 if (acpi_video_backlight_support()) {
784 struct backlight_properties props; 784 struct backlight_properties props;
785 struct pci_dev *pdev;
786 acpi_handle acpi_parent;
787 struct device *parent = NULL;
785 int result; 788 int result;
786 static int count = 0; 789 static int count = 0;
787 char *name; 790 char *name;
@@ -794,9 +797,20 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
794 return; 797 return;
795 count++; 798 count++;
796 799
800 acpi_get_parent(device->dev->handle, &acpi_parent);
801
802 pdev = acpi_get_pci_dev(acpi_parent);
803 if (pdev) {
804 parent = &pdev->dev;
805 pci_dev_put(pdev);
806 }
807
797 memset(&props, 0, sizeof(struct backlight_properties)); 808 memset(&props, 0, sizeof(struct backlight_properties));
809 props.type = BACKLIGHT_FIRMWARE;
798 props.max_brightness = device->brightness->count - 3; 810 props.max_brightness = device->brightness->count - 3;
799 device->backlight = backlight_device_register(name, NULL, device, 811 device->backlight = backlight_device_register(name,
812 parent,
813 device,
800 &acpi_backlight_ops, 814 &acpi_backlight_ops,
801 &props); 815 &props);
802 kfree(name); 816 kfree(name);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index e1e38b11f48a..16dc3645291c 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -31,6 +31,7 @@
31#include <linux/ceph/osd_client.h> 31#include <linux/ceph/osd_client.h>
32#include <linux/ceph/mon_client.h> 32#include <linux/ceph/mon_client.h>
33#include <linux/ceph/decode.h> 33#include <linux/ceph/decode.h>
34#include <linux/parser.h>
34 35
35#include <linux/kernel.h> 36#include <linux/kernel.h>
36#include <linux/device.h> 37#include <linux/device.h>
@@ -54,6 +55,8 @@
54 55
55#define DEV_NAME_LEN 32 56#define DEV_NAME_LEN 32
56 57
58#define RBD_NOTIFY_TIMEOUT_DEFAULT 10
59
57/* 60/*
58 * block device image metadata (in-memory version) 61 * block device image metadata (in-memory version)
59 */ 62 */
@@ -71,6 +74,12 @@ struct rbd_image_header {
71 74
72 char *snap_names; 75 char *snap_names;
73 u64 *snap_sizes; 76 u64 *snap_sizes;
77
78 u64 obj_version;
79};
80
81struct rbd_options {
82 int notify_timeout;
74}; 83};
75 84
76/* 85/*
@@ -78,6 +87,7 @@ struct rbd_image_header {
78 */ 87 */
79struct rbd_client { 88struct rbd_client {
80 struct ceph_client *client; 89 struct ceph_client *client;
90 struct rbd_options *rbd_opts;
81 struct kref kref; 91 struct kref kref;
82 struct list_head node; 92 struct list_head node;
83}; 93};
@@ -124,6 +134,9 @@ struct rbd_device {
124 char pool_name[RBD_MAX_POOL_NAME_LEN]; 134 char pool_name[RBD_MAX_POOL_NAME_LEN];
125 int poolid; 135 int poolid;
126 136
137 struct ceph_osd_event *watch_event;
138 struct ceph_osd_request *watch_request;
139
127 char snap_name[RBD_MAX_SNAP_NAME_LEN]; 140 char snap_name[RBD_MAX_SNAP_NAME_LEN];
128 u32 cur_snap; /* index+1 of current snapshot within snap context 141 u32 cur_snap; /* index+1 of current snapshot within snap context
129 0 - for the head */ 142 0 - for the head */
@@ -177,6 +190,8 @@ static void rbd_put_dev(struct rbd_device *rbd_dev)
177 put_device(&rbd_dev->dev); 190 put_device(&rbd_dev->dev);
178} 191}
179 192
193static int __rbd_update_snaps(struct rbd_device *rbd_dev);
194
180static int rbd_open(struct block_device *bdev, fmode_t mode) 195static int rbd_open(struct block_device *bdev, fmode_t mode)
181{ 196{
182 struct gendisk *disk = bdev->bd_disk; 197 struct gendisk *disk = bdev->bd_disk;
@@ -211,7 +226,8 @@ static const struct block_device_operations rbd_bd_ops = {
211 * Initialize an rbd client instance. 226 * Initialize an rbd client instance.
212 * We own *opt. 227 * We own *opt.
213 */ 228 */
214static struct rbd_client *rbd_client_create(struct ceph_options *opt) 229static struct rbd_client *rbd_client_create(struct ceph_options *opt,
230 struct rbd_options *rbd_opts)
215{ 231{
216 struct rbd_client *rbdc; 232 struct rbd_client *rbdc;
217 int ret = -ENOMEM; 233 int ret = -ENOMEM;
@@ -233,6 +249,8 @@ static struct rbd_client *rbd_client_create(struct ceph_options *opt)
233 if (ret < 0) 249 if (ret < 0)
234 goto out_err; 250 goto out_err;
235 251
252 rbdc->rbd_opts = rbd_opts;
253
236 spin_lock(&node_lock); 254 spin_lock(&node_lock);
237 list_add_tail(&rbdc->node, &rbd_client_list); 255 list_add_tail(&rbdc->node, &rbd_client_list);
238 spin_unlock(&node_lock); 256 spin_unlock(&node_lock);
@@ -267,6 +285,59 @@ static struct rbd_client *__rbd_client_find(struct ceph_options *opt)
267} 285}
268 286
269/* 287/*
288 * mount options
289 */
290enum {
291 Opt_notify_timeout,
292 Opt_last_int,
293 /* int args above */
294 Opt_last_string,
295 /* string args above */
296};
297
298static match_table_t rbdopt_tokens = {
299 {Opt_notify_timeout, "notify_timeout=%d"},
300 /* int args above */
301 /* string args above */
302 {-1, NULL}
303};
304
305static int parse_rbd_opts_token(char *c, void *private)
306{
307 struct rbd_options *rbdopt = private;
308 substring_t argstr[MAX_OPT_ARGS];
309 int token, intval, ret;
310
311 token = match_token((char *)c, rbdopt_tokens, argstr);
312 if (token < 0)
313 return -EINVAL;
314
315 if (token < Opt_last_int) {
316 ret = match_int(&argstr[0], &intval);
317 if (ret < 0) {
318 pr_err("bad mount option arg (not int) "
319 "at '%s'\n", c);
320 return ret;
321 }
322 dout("got int token %d val %d\n", token, intval);
323 } else if (token > Opt_last_int && token < Opt_last_string) {
324 dout("got string token %d val %s\n", token,
325 argstr[0].from);
326 } else {
327 dout("got token %d\n", token);
328 }
329
330 switch (token) {
331 case Opt_notify_timeout:
332 rbdopt->notify_timeout = intval;
333 break;
334 default:
335 BUG_ON(token);
336 }
337 return 0;
338}
339
340/*
270 * Get a ceph client with specific addr and configuration, if one does 341 * Get a ceph client with specific addr and configuration, if one does
271 * not exist create it. 342 * not exist create it.
272 */ 343 */
@@ -276,11 +347,18 @@ static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
276 struct rbd_client *rbdc; 347 struct rbd_client *rbdc;
277 struct ceph_options *opt; 348 struct ceph_options *opt;
278 int ret; 349 int ret;
350 struct rbd_options *rbd_opts;
351
352 rbd_opts = kzalloc(sizeof(*rbd_opts), GFP_KERNEL);
353 if (!rbd_opts)
354 return -ENOMEM;
355
356 rbd_opts->notify_timeout = RBD_NOTIFY_TIMEOUT_DEFAULT;
279 357
280 ret = ceph_parse_options(&opt, options, mon_addr, 358 ret = ceph_parse_options(&opt, options, mon_addr,
281 mon_addr + strlen(mon_addr), NULL, NULL); 359 mon_addr + strlen(mon_addr), parse_rbd_opts_token, rbd_opts);
282 if (ret < 0) 360 if (ret < 0)
283 return ret; 361 goto done_err;
284 362
285 spin_lock(&node_lock); 363 spin_lock(&node_lock);
286 rbdc = __rbd_client_find(opt); 364 rbdc = __rbd_client_find(opt);
@@ -296,13 +374,18 @@ static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
296 } 374 }
297 spin_unlock(&node_lock); 375 spin_unlock(&node_lock);
298 376
299 rbdc = rbd_client_create(opt); 377 rbdc = rbd_client_create(opt, rbd_opts);
300 if (IS_ERR(rbdc)) 378 if (IS_ERR(rbdc)) {
301 return PTR_ERR(rbdc); 379 ret = PTR_ERR(rbdc);
380 goto done_err;
381 }
302 382
303 rbd_dev->rbd_client = rbdc; 383 rbd_dev->rbd_client = rbdc;
304 rbd_dev->client = rbdc->client; 384 rbd_dev->client = rbdc->client;
305 return 0; 385 return 0;
386done_err:
387 kfree(rbd_opts);
388 return ret;
306} 389}
307 390
308/* 391/*
@@ -318,6 +401,7 @@ static void rbd_client_release(struct kref *kref)
318 spin_unlock(&node_lock); 401 spin_unlock(&node_lock);
319 402
320 ceph_destroy_client(rbdc->client); 403 ceph_destroy_client(rbdc->client);
404 kfree(rbdc->rbd_opts);
321 kfree(rbdc); 405 kfree(rbdc);
322} 406}
323 407
@@ -666,7 +750,9 @@ static int rbd_do_request(struct request *rq,
666 struct ceph_osd_req_op *ops, 750 struct ceph_osd_req_op *ops,
667 int num_reply, 751 int num_reply,
668 void (*rbd_cb)(struct ceph_osd_request *req, 752 void (*rbd_cb)(struct ceph_osd_request *req,
669 struct ceph_msg *msg)) 753 struct ceph_msg *msg),
754 struct ceph_osd_request **linger_req,
755 u64 *ver)
670{ 756{
671 struct ceph_osd_request *req; 757 struct ceph_osd_request *req;
672 struct ceph_file_layout *layout; 758 struct ceph_file_layout *layout;
@@ -729,12 +815,20 @@ static int rbd_do_request(struct request *rq,
729 req->r_oid, req->r_oid_len); 815 req->r_oid, req->r_oid_len);
730 up_read(&header->snap_rwsem); 816 up_read(&header->snap_rwsem);
731 817
818 if (linger_req) {
819 ceph_osdc_set_request_linger(&dev->client->osdc, req);
820 *linger_req = req;
821 }
822
732 ret = ceph_osdc_start_request(&dev->client->osdc, req, false); 823 ret = ceph_osdc_start_request(&dev->client->osdc, req, false);
733 if (ret < 0) 824 if (ret < 0)
734 goto done_err; 825 goto done_err;
735 826
736 if (!rbd_cb) { 827 if (!rbd_cb) {
737 ret = ceph_osdc_wait_request(&dev->client->osdc, req); 828 ret = ceph_osdc_wait_request(&dev->client->osdc, req);
829 if (ver)
830 *ver = le64_to_cpu(req->r_reassert_version.version);
831 dout("reassert_ver=%lld\n", le64_to_cpu(req->r_reassert_version.version));
738 ceph_osdc_put_request(req); 832 ceph_osdc_put_request(req);
739 } 833 }
740 return ret; 834 return ret;
@@ -789,6 +883,11 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
789 kfree(req_data); 883 kfree(req_data);
790} 884}
791 885
886static void rbd_simple_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
887{
888 ceph_osdc_put_request(req);
889}
890
792/* 891/*
793 * Do a synchronous ceph osd operation 892 * Do a synchronous ceph osd operation
794 */ 893 */
@@ -801,7 +900,9 @@ static int rbd_req_sync_op(struct rbd_device *dev,
801 int num_reply, 900 int num_reply,
802 const char *obj, 901 const char *obj,
803 u64 ofs, u64 len, 902 u64 ofs, u64 len,
804 char *buf) 903 char *buf,
904 struct ceph_osd_request **linger_req,
905 u64 *ver)
805{ 906{
806 int ret; 907 int ret;
807 struct page **pages; 908 struct page **pages;
@@ -833,7 +934,8 @@ static int rbd_req_sync_op(struct rbd_device *dev,
833 flags, 934 flags,
834 ops, 935 ops,
835 2, 936 2,
836 NULL); 937 NULL,
938 linger_req, ver);
837 if (ret < 0) 939 if (ret < 0)
838 goto done_ops; 940 goto done_ops;
839 941
@@ -893,7 +995,7 @@ static int rbd_do_op(struct request *rq,
893 flags, 995 flags,
894 ops, 996 ops,
895 num_reply, 997 num_reply,
896 rbd_req_cb); 998 rbd_req_cb, 0, NULL);
897done: 999done:
898 kfree(seg_name); 1000 kfree(seg_name);
899 return ret; 1001 return ret;
@@ -940,18 +1042,174 @@ static int rbd_req_sync_read(struct rbd_device *dev,
940 u64 snapid, 1042 u64 snapid,
941 const char *obj, 1043 const char *obj,
942 u64 ofs, u64 len, 1044 u64 ofs, u64 len,
943 char *buf) 1045 char *buf,
1046 u64 *ver)
944{ 1047{
945 return rbd_req_sync_op(dev, NULL, 1048 return rbd_req_sync_op(dev, NULL,
946 (snapid ? snapid : CEPH_NOSNAP), 1049 (snapid ? snapid : CEPH_NOSNAP),
947 CEPH_OSD_OP_READ, 1050 CEPH_OSD_OP_READ,
948 CEPH_OSD_FLAG_READ, 1051 CEPH_OSD_FLAG_READ,
949 NULL, 1052 NULL,
950 1, obj, ofs, len, buf); 1053 1, obj, ofs, len, buf, NULL, ver);
951} 1054}
952 1055
953/* 1056/*
954 * Request sync osd read 1057 * Request sync osd watch
1058 */
1059static int rbd_req_sync_notify_ack(struct rbd_device *dev,
1060 u64 ver,
1061 u64 notify_id,
1062 const char *obj)
1063{
1064 struct ceph_osd_req_op *ops;
1065 struct page **pages = NULL;
1066 int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0);
1067 if (ret < 0)
1068 return ret;
1069
1070 ops[0].watch.ver = cpu_to_le64(dev->header.obj_version);
1071 ops[0].watch.cookie = notify_id;
1072 ops[0].watch.flag = 0;
1073
1074 ret = rbd_do_request(NULL, dev, NULL, CEPH_NOSNAP,
1075 obj, 0, 0, NULL,
1076 pages, 0,
1077 CEPH_OSD_FLAG_READ,
1078 ops,
1079 1,
1080 rbd_simple_req_cb, 0, NULL);
1081
1082 rbd_destroy_ops(ops);
1083 return ret;
1084}
1085
1086static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1087{
1088 struct rbd_device *dev = (struct rbd_device *)data;
1089 if (!dev)
1090 return;
1091
1092 dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
1093 notify_id, (int)opcode);
1094 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1095 __rbd_update_snaps(dev);
1096 mutex_unlock(&ctl_mutex);
1097
1098 rbd_req_sync_notify_ack(dev, ver, notify_id, dev->obj_md_name);
1099}
1100
1101/*
1102 * Request sync osd watch
1103 */
1104static int rbd_req_sync_watch(struct rbd_device *dev,
1105 const char *obj,
1106 u64 ver)
1107{
1108 struct ceph_osd_req_op *ops;
1109 struct ceph_osd_client *osdc = &dev->client->osdc;
1110
1111 int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_WATCH, 0);
1112 if (ret < 0)
1113 return ret;
1114
1115 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, 0,
1116 (void *)dev, &dev->watch_event);
1117 if (ret < 0)
1118 goto fail;
1119
1120 ops[0].watch.ver = cpu_to_le64(ver);
1121 ops[0].watch.cookie = cpu_to_le64(dev->watch_event->cookie);
1122 ops[0].watch.flag = 1;
1123
1124 ret = rbd_req_sync_op(dev, NULL,
1125 CEPH_NOSNAP,
1126 0,
1127 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1128 ops,
1129 1, obj, 0, 0, NULL,
1130 &dev->watch_request, NULL);
1131
1132 if (ret < 0)
1133 goto fail_event;
1134
1135 rbd_destroy_ops(ops);
1136 return 0;
1137
1138fail_event:
1139 ceph_osdc_cancel_event(dev->watch_event);
1140 dev->watch_event = NULL;
1141fail:
1142 rbd_destroy_ops(ops);
1143 return ret;
1144}
1145
1146struct rbd_notify_info {
1147 struct rbd_device *dev;
1148};
1149
1150static void rbd_notify_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1151{
1152 struct rbd_device *dev = (struct rbd_device *)data;
1153 if (!dev)
1154 return;
1155
1156 dout("rbd_notify_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
1157 notify_id, (int)opcode);
1158}
1159
1160/*
1161 * Request sync osd notify
1162 */
1163static int rbd_req_sync_notify(struct rbd_device *dev,
1164 const char *obj)
1165{
1166 struct ceph_osd_req_op *ops;
1167 struct ceph_osd_client *osdc = &dev->client->osdc;
1168 struct ceph_osd_event *event;
1169 struct rbd_notify_info info;
1170 int payload_len = sizeof(u32) + sizeof(u32);
1171 int ret;
1172
1173 ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY, payload_len);
1174 if (ret < 0)
1175 return ret;
1176
1177 info.dev = dev;
1178
1179 ret = ceph_osdc_create_event(osdc, rbd_notify_cb, 1,
1180 (void *)&info, &event);
1181 if (ret < 0)
1182 goto fail;
1183
1184 ops[0].watch.ver = 1;
1185 ops[0].watch.flag = 1;
1186 ops[0].watch.cookie = event->cookie;
1187 ops[0].watch.prot_ver = RADOS_NOTIFY_VER;
1188 ops[0].watch.timeout = 12;
1189
1190 ret = rbd_req_sync_op(dev, NULL,
1191 CEPH_NOSNAP,
1192 0,
1193 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1194 ops,
1195 1, obj, 0, 0, NULL, NULL, NULL);
1196 if (ret < 0)
1197 goto fail_event;
1198
1199 ret = ceph_osdc_wait_event(event, CEPH_OSD_TIMEOUT_DEFAULT);
1200 dout("ceph_osdc_wait_event returned %d\n", ret);
1201 rbd_destroy_ops(ops);
1202 return 0;
1203
1204fail_event:
1205 ceph_osdc_cancel_event(event);
1206fail:
1207 rbd_destroy_ops(ops);
1208 return ret;
1209}
1210
1211/*
1212 * Request sync osd rollback
955 */ 1213 */
956static int rbd_req_sync_rollback_obj(struct rbd_device *dev, 1214static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
957 u64 snapid, 1215 u64 snapid,
@@ -969,13 +1227,10 @@ static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
969 0, 1227 0,
970 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 1228 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
971 ops, 1229 ops,
972 1, obj, 0, 0, NULL); 1230 1, obj, 0, 0, NULL, NULL, NULL);
973 1231
974 rbd_destroy_ops(ops); 1232 rbd_destroy_ops(ops);
975 1233
976 if (ret < 0)
977 return ret;
978
979 return ret; 1234 return ret;
980} 1235}
981 1236
@@ -987,7 +1242,8 @@ static int rbd_req_sync_exec(struct rbd_device *dev,
987 const char *cls, 1242 const char *cls,
988 const char *method, 1243 const char *method,
989 const char *data, 1244 const char *data,
990 int len) 1245 int len,
1246 u64 *ver)
991{ 1247{
992 struct ceph_osd_req_op *ops; 1248 struct ceph_osd_req_op *ops;
993 int cls_len = strlen(cls); 1249 int cls_len = strlen(cls);
@@ -1010,7 +1266,7 @@ static int rbd_req_sync_exec(struct rbd_device *dev,
1010 0, 1266 0,
1011 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 1267 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1012 ops, 1268 ops,
1013 1, obj, 0, 0, NULL); 1269 1, obj, 0, 0, NULL, NULL, ver);
1014 1270
1015 rbd_destroy_ops(ops); 1271 rbd_destroy_ops(ops);
1016 1272
@@ -1156,6 +1412,7 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
1156 struct rbd_image_header_ondisk *dh; 1412 struct rbd_image_header_ondisk *dh;
1157 int snap_count = 0; 1413 int snap_count = 0;
1158 u64 snap_names_len = 0; 1414 u64 snap_names_len = 0;
1415 u64 ver;
1159 1416
1160 while (1) { 1417 while (1) {
1161 int len = sizeof(*dh) + 1418 int len = sizeof(*dh) +
@@ -1171,7 +1428,7 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
1171 NULL, CEPH_NOSNAP, 1428 NULL, CEPH_NOSNAP,
1172 rbd_dev->obj_md_name, 1429 rbd_dev->obj_md_name,
1173 0, len, 1430 0, len,
1174 (char *)dh); 1431 (char *)dh, &ver);
1175 if (rc < 0) 1432 if (rc < 0)
1176 goto out_dh; 1433 goto out_dh;
1177 1434
@@ -1188,6 +1445,7 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
1188 } 1445 }
1189 break; 1446 break;
1190 } 1447 }
1448 header->obj_version = ver;
1191 1449
1192out_dh: 1450out_dh:
1193 kfree(dh); 1451 kfree(dh);
@@ -1205,6 +1463,7 @@ static int rbd_header_add_snap(struct rbd_device *dev,
1205 u64 new_snapid; 1463 u64 new_snapid;
1206 int ret; 1464 int ret;
1207 void *data, *data_start, *data_end; 1465 void *data, *data_start, *data_end;
1466 u64 ver;
1208 1467
1209 /* we should create a snapshot only if we're pointing at the head */ 1468 /* we should create a snapshot only if we're pointing at the head */
1210 if (dev->cur_snap) 1469 if (dev->cur_snap)
@@ -1227,7 +1486,7 @@ static int rbd_header_add_snap(struct rbd_device *dev,
1227 ceph_encode_64_safe(&data, data_end, new_snapid, bad); 1486 ceph_encode_64_safe(&data, data_end, new_snapid, bad);
1228 1487
1229 ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add", 1488 ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add",
1230 data_start, data - data_start); 1489 data_start, data - data_start, &ver);
1231 1490
1232 kfree(data_start); 1491 kfree(data_start);
1233 1492
@@ -1259,6 +1518,7 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev)
1259 int ret; 1518 int ret;
1260 struct rbd_image_header h; 1519 struct rbd_image_header h;
1261 u64 snap_seq; 1520 u64 snap_seq;
1521 int follow_seq = 0;
1262 1522
1263 ret = rbd_read_header(rbd_dev, &h); 1523 ret = rbd_read_header(rbd_dev, &h);
1264 if (ret < 0) 1524 if (ret < 0)
@@ -1267,6 +1527,11 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev)
1267 down_write(&rbd_dev->header.snap_rwsem); 1527 down_write(&rbd_dev->header.snap_rwsem);
1268 1528
1269 snap_seq = rbd_dev->header.snapc->seq; 1529 snap_seq = rbd_dev->header.snapc->seq;
1530 if (rbd_dev->header.total_snaps &&
1531 rbd_dev->header.snapc->snaps[0] == snap_seq)
1532 /* pointing at the head, will need to follow that
1533 if head moves */
1534 follow_seq = 1;
1270 1535
1271 kfree(rbd_dev->header.snapc); 1536 kfree(rbd_dev->header.snapc);
1272 kfree(rbd_dev->header.snap_names); 1537 kfree(rbd_dev->header.snap_names);
@@ -1277,7 +1542,10 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev)
1277 rbd_dev->header.snap_names = h.snap_names; 1542 rbd_dev->header.snap_names = h.snap_names;
1278 rbd_dev->header.snap_names_len = h.snap_names_len; 1543 rbd_dev->header.snap_names_len = h.snap_names_len;
1279 rbd_dev->header.snap_sizes = h.snap_sizes; 1544 rbd_dev->header.snap_sizes = h.snap_sizes;
1280 rbd_dev->header.snapc->seq = snap_seq; 1545 if (follow_seq)
1546 rbd_dev->header.snapc->seq = rbd_dev->header.snapc->snaps[0];
1547 else
1548 rbd_dev->header.snapc->seq = snap_seq;
1281 1549
1282 ret = __rbd_init_snaps_header(rbd_dev); 1550 ret = __rbd_init_snaps_header(rbd_dev);
1283 1551
@@ -1699,7 +1967,28 @@ static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
1699 device_unregister(&rbd_dev->dev); 1967 device_unregister(&rbd_dev->dev);
1700} 1968}
1701 1969
1702static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count) 1970static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
1971{
1972 int ret, rc;
1973
1974 do {
1975 ret = rbd_req_sync_watch(rbd_dev, rbd_dev->obj_md_name,
1976 rbd_dev->header.obj_version);
1977 if (ret == -ERANGE) {
1978 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1979 rc = __rbd_update_snaps(rbd_dev);
1980 mutex_unlock(&ctl_mutex);
1981 if (rc < 0)
1982 return rc;
1983 }
1984 } while (ret == -ERANGE);
1985
1986 return ret;
1987}
1988
1989static ssize_t rbd_add(struct bus_type *bus,
1990 const char *buf,
1991 size_t count)
1703{ 1992{
1704 struct ceph_osd_client *osdc; 1993 struct ceph_osd_client *osdc;
1705 struct rbd_device *rbd_dev; 1994 struct rbd_device *rbd_dev;
@@ -1797,6 +2086,10 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count)
1797 if (rc) 2086 if (rc)
1798 goto err_out_bus; 2087 goto err_out_bus;
1799 2088
2089 rc = rbd_init_watch_dev(rbd_dev);
2090 if (rc)
2091 goto err_out_bus;
2092
1800 return count; 2093 return count;
1801 2094
1802err_out_bus: 2095err_out_bus:
@@ -1849,6 +2142,12 @@ static void rbd_dev_release(struct device *dev)
1849 struct rbd_device *rbd_dev = 2142 struct rbd_device *rbd_dev =
1850 container_of(dev, struct rbd_device, dev); 2143 container_of(dev, struct rbd_device, dev);
1851 2144
2145 if (rbd_dev->watch_request)
2146 ceph_osdc_unregister_linger_request(&rbd_dev->client->osdc,
2147 rbd_dev->watch_request);
2148 if (rbd_dev->watch_event)
2149 ceph_osdc_cancel_event(rbd_dev->watch_event);
2150
1852 rbd_put_client(rbd_dev); 2151 rbd_put_client(rbd_dev);
1853 2152
1854 /* clean up and free blkdev */ 2153 /* clean up and free blkdev */
@@ -1914,14 +2213,24 @@ static ssize_t rbd_snap_add(struct device *dev,
1914 ret = rbd_header_add_snap(rbd_dev, 2213 ret = rbd_header_add_snap(rbd_dev,
1915 name, GFP_KERNEL); 2214 name, GFP_KERNEL);
1916 if (ret < 0) 2215 if (ret < 0)
1917 goto done_unlock; 2216 goto err_unlock;
1918 2217
1919 ret = __rbd_update_snaps(rbd_dev); 2218 ret = __rbd_update_snaps(rbd_dev);
1920 if (ret < 0) 2219 if (ret < 0)
1921 goto done_unlock; 2220 goto err_unlock;
2221
2222 /* shouldn't hold ctl_mutex when notifying.. notify might
2223 trigger a watch callback that would need to get that mutex */
2224 mutex_unlock(&ctl_mutex);
2225
2226 /* make a best effort, don't error if failed */
2227 rbd_req_sync_notify(rbd_dev, rbd_dev->obj_md_name);
1922 2228
1923 ret = count; 2229 ret = count;
1924done_unlock: 2230 kfree(name);
2231 return ret;
2232
2233err_unlock:
1925 mutex_unlock(&ctl_mutex); 2234 mutex_unlock(&ctl_mutex);
1926 kfree(name); 2235 kfree(name);
1927 return ret; 2236 return ret;
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index c461eda62411..4abd089a094f 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -111,10 +111,8 @@ static void unregister_dca_providers(void)
111 /* at this point only one domain in the list is expected */ 111 /* at this point only one domain in the list is expected */
112 domain = list_first_entry(&dca_domains, struct dca_domain, node); 112 domain = list_first_entry(&dca_domains, struct dca_domain, node);
113 113
114 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) { 114 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node)
115 list_del(&dca->node); 115 list_move(&dca->node, &unregistered_providers);
116 list_add(&dca->node, &unregistered_providers);
117 }
118 116
119 dca_free_domain(domain); 117 dca_free_domain(domain);
120 118
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 1c28816152fa..a572600e44eb 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -82,7 +82,7 @@ config INTEL_IOP_ADMA
82 82
83config DW_DMAC 83config DW_DMAC
84 tristate "Synopsys DesignWare AHB DMA support" 84 tristate "Synopsys DesignWare AHB DMA support"
85 depends on AVR32 85 depends on HAVE_CLK
86 select DMA_ENGINE 86 select DMA_ENGINE
87 default y if CPU_AT32AP7000 87 default y if CPU_AT32AP7000
88 help 88 help
@@ -221,12 +221,20 @@ config IMX_SDMA
221 221
222config IMX_DMA 222config IMX_DMA
223 tristate "i.MX DMA support" 223 tristate "i.MX DMA support"
224 depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27 224 depends on IMX_HAVE_DMA_V1
225 select DMA_ENGINE 225 select DMA_ENGINE
226 help 226 help
227 Support the i.MX DMA engine. This engine is integrated into 227 Support the i.MX DMA engine. This engine is integrated into
228 Freescale i.MX1/21/27 chips. 228 Freescale i.MX1/21/27 chips.
229 229
230config MXS_DMA
231 bool "MXS DMA support"
232 depends on SOC_IMX23 || SOC_IMX28
233 select DMA_ENGINE
234 help
235 Support the MXS DMA engine. This engine including APBH-DMA
236 and APBX-DMA is integrated into Freescale i.MX23/28 chips.
237
230config DMA_ENGINE 238config DMA_ENGINE
231 bool 239 bool
232 240
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 1be065a62f8c..836095ab3c5c 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
19obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 19obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
20obj-$(CONFIG_IMX_SDMA) += imx-sdma.o 20obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
21obj-$(CONFIG_IMX_DMA) += imx-dma.o 21obj-$(CONFIG_IMX_DMA) += imx-dma.o
22obj-$(CONFIG_MXS_DMA) += mxs-dma.o
22obj-$(CONFIG_TIMB_DMA) += timb_dma.o 23obj-$(CONFIG_TIMB_DMA) += timb_dma.o
23obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 24obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
24obj-$(CONFIG_PL330_DMA) += pl330.o 25obj-$(CONFIG_PL330_DMA) += pl330.o
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 5589358b684d..e0888cb538d4 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -54,6 +54,11 @@ module_param(pq_sources, uint, S_IRUGO);
54MODULE_PARM_DESC(pq_sources, 54MODULE_PARM_DESC(pq_sources,
55 "Number of p+q source buffers (default: 3)"); 55 "Number of p+q source buffers (default: 3)");
56 56
57static int timeout = 3000;
58module_param(timeout, uint, S_IRUGO);
59MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), \
60 Pass -1 for infinite timeout");
61
57/* 62/*
58 * Initialization patterns. All bytes in the source buffer has bit 7 63 * Initialization patterns. All bytes in the source buffer has bit 7
59 * set, all bytes in the destination buffer has bit 7 cleared. 64 * set, all bytes in the destination buffer has bit 7 cleared.
@@ -285,7 +290,12 @@ static int dmatest_func(void *data)
285 290
286 set_user_nice(current, 10); 291 set_user_nice(current, 10);
287 292
288 flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; 293 /*
294 * src buffers are freed by the DMAEngine code with dma_unmap_single()
295 * dst buffers are freed by ourselves below
296 */
297 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
298 | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
289 299
290 while (!kthread_should_stop() 300 while (!kthread_should_stop()
291 && !(iterations && total_tests >= iterations)) { 301 && !(iterations && total_tests >= iterations)) {
@@ -294,7 +304,7 @@ static int dmatest_func(void *data)
294 dma_addr_t dma_srcs[src_cnt]; 304 dma_addr_t dma_srcs[src_cnt];
295 dma_addr_t dma_dsts[dst_cnt]; 305 dma_addr_t dma_dsts[dst_cnt];
296 struct completion cmp; 306 struct completion cmp;
297 unsigned long tmo = msecs_to_jiffies(3000); 307 unsigned long tmo = msecs_to_jiffies(timeout);
298 u8 align = 0; 308 u8 align = 0;
299 309
300 total_tests++; 310 total_tests++;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index a3991ab0d67e..9c25c7d099e4 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -32,26 +32,30 @@
32 * which does not support descriptor writeback. 32 * which does not support descriptor writeback.
33 */ 33 */
34 34
35/* NOTE: DMS+SMS is system-specific. We should get this information 35#define DWC_DEFAULT_CTLLO(private) ({ \
36 * from the platform code somehow. 36 struct dw_dma_slave *__slave = (private); \
37 */ 37 int dms = __slave ? __slave->dst_master : 0; \
38#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \ 38 int sms = __slave ? __slave->src_master : 1; \
39 | DWC_CTLL_SRC_MSIZE(0) \ 39 u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \
40 | DWC_CTLL_DMS(0) \ 40 u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \
41 | DWC_CTLL_SMS(1) \ 41 \
42 | DWC_CTLL_LLP_D_EN \ 42 (DWC_CTLL_DST_MSIZE(dmsize) \
43 | DWC_CTLL_LLP_S_EN) 43 | DWC_CTLL_SRC_MSIZE(smsize) \
44 | DWC_CTLL_LLP_D_EN \
45 | DWC_CTLL_LLP_S_EN \
46 | DWC_CTLL_DMS(dms) \
47 | DWC_CTLL_SMS(sms)); \
48 })
44 49
45/* 50/*
46 * This is configuration-dependent and usually a funny size like 4095. 51 * This is configuration-dependent and usually a funny size like 4095.
47 * Let's round it down to the nearest power of two.
48 * 52 *
49 * Note that this is a transfer count, i.e. if we transfer 32-bit 53 * Note that this is a transfer count, i.e. if we transfer 32-bit
50 * words, we can do 8192 bytes per descriptor. 54 * words, we can do 16380 bytes per descriptor.
51 * 55 *
52 * This parameter is also system-specific. 56 * This parameter is also system-specific.
53 */ 57 */
54#define DWC_MAX_COUNT 2048U 58#define DWC_MAX_COUNT 4095U
55 59
56/* 60/*
57 * Number of descriptors to allocate for each channel. This should be 61 * Number of descriptors to allocate for each channel. This should be
@@ -84,11 +88,6 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
84 return list_entry(dwc->active_list.next, struct dw_desc, desc_node); 88 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
85} 89}
86 90
87static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc)
88{
89 return list_entry(dwc->queue.next, struct dw_desc, desc_node);
90}
91
92static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) 91static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
93{ 92{
94 struct dw_desc *desc, *_desc; 93 struct dw_desc *desc, *_desc;
@@ -201,6 +200,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
201 dma_async_tx_callback callback; 200 dma_async_tx_callback callback;
202 void *param; 201 void *param;
203 struct dma_async_tx_descriptor *txd = &desc->txd; 202 struct dma_async_tx_descriptor *txd = &desc->txd;
203 struct dw_desc *child;
204 204
205 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 205 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
206 206
@@ -209,6 +209,12 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
209 param = txd->callback_param; 209 param = txd->callback_param;
210 210
211 dwc_sync_desc_for_cpu(dwc, desc); 211 dwc_sync_desc_for_cpu(dwc, desc);
212
213 /* async_tx_ack */
214 list_for_each_entry(child, &desc->tx_list, desc_node)
215 async_tx_ack(&child->txd);
216 async_tx_ack(&desc->txd);
217
212 list_splice_init(&desc->tx_list, &dwc->free_list); 218 list_splice_init(&desc->tx_list, &dwc->free_list);
213 list_move(&desc->desc_node, &dwc->free_list); 219 list_move(&desc->desc_node, &dwc->free_list);
214 220
@@ -259,10 +265,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
259 * Submit queued descriptors ASAP, i.e. before we go through 265 * Submit queued descriptors ASAP, i.e. before we go through
260 * the completed ones. 266 * the completed ones.
261 */ 267 */
262 if (!list_empty(&dwc->queue))
263 dwc_dostart(dwc, dwc_first_queued(dwc));
264 list_splice_init(&dwc->active_list, &list); 268 list_splice_init(&dwc->active_list, &list);
265 list_splice_init(&dwc->queue, &dwc->active_list); 269 if (!list_empty(&dwc->queue)) {
270 list_move(dwc->queue.next, &dwc->active_list);
271 dwc_dostart(dwc, dwc_first_active(dwc));
272 }
266 273
267 list_for_each_entry_safe(desc, _desc, &list, desc_node) 274 list_for_each_entry_safe(desc, _desc, &list, desc_node)
268 dwc_descriptor_complete(dwc, desc); 275 dwc_descriptor_complete(dwc, desc);
@@ -291,6 +298,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
291 return; 298 return;
292 } 299 }
293 300
301 if (list_empty(&dwc->active_list))
302 return;
303
294 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); 304 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
295 305
296 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 306 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
@@ -319,8 +329,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
319 cpu_relax(); 329 cpu_relax();
320 330
321 if (!list_empty(&dwc->queue)) { 331 if (!list_empty(&dwc->queue)) {
322 dwc_dostart(dwc, dwc_first_queued(dwc)); 332 list_move(dwc->queue.next, &dwc->active_list);
323 list_splice_init(&dwc->queue, &dwc->active_list); 333 dwc_dostart(dwc, dwc_first_active(dwc));
324 } 334 }
325} 335}
326 336
@@ -346,7 +356,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
346 */ 356 */
347 bad_desc = dwc_first_active(dwc); 357 bad_desc = dwc_first_active(dwc);
348 list_del_init(&bad_desc->desc_node); 358 list_del_init(&bad_desc->desc_node);
349 list_splice_init(&dwc->queue, dwc->active_list.prev); 359 list_move(dwc->queue.next, dwc->active_list.prev);
350 360
351 /* Clear the error flag and try to restart the controller */ 361 /* Clear the error flag and try to restart the controller */
352 dma_writel(dw, CLEAR.ERROR, dwc->mask); 362 dma_writel(dw, CLEAR.ERROR, dwc->mask);
@@ -541,8 +551,8 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
541 if (list_empty(&dwc->active_list)) { 551 if (list_empty(&dwc->active_list)) {
542 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 552 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
543 desc->txd.cookie); 553 desc->txd.cookie);
544 dwc_dostart(dwc, desc);
545 list_add_tail(&desc->desc_node, &dwc->active_list); 554 list_add_tail(&desc->desc_node, &dwc->active_list);
555 dwc_dostart(dwc, dwc_first_active(dwc));
546 } else { 556 } else {
547 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 557 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
548 desc->txd.cookie); 558 desc->txd.cookie);
@@ -581,14 +591,16 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
581 * We can be a lot more clever here, but this should take care 591 * We can be a lot more clever here, but this should take care
582 * of the most common optimization. 592 * of the most common optimization.
583 */ 593 */
584 if (!((src | dest | len) & 3)) 594 if (!((src | dest | len) & 7))
595 src_width = dst_width = 3;
596 else if (!((src | dest | len) & 3))
585 src_width = dst_width = 2; 597 src_width = dst_width = 2;
586 else if (!((src | dest | len) & 1)) 598 else if (!((src | dest | len) & 1))
587 src_width = dst_width = 1; 599 src_width = dst_width = 1;
588 else 600 else
589 src_width = dst_width = 0; 601 src_width = dst_width = 0;
590 602
591 ctllo = DWC_DEFAULT_CTLLO 603 ctllo = DWC_DEFAULT_CTLLO(chan->private)
592 | DWC_CTLL_DST_WIDTH(dst_width) 604 | DWC_CTLL_DST_WIDTH(dst_width)
593 | DWC_CTLL_SRC_WIDTH(src_width) 605 | DWC_CTLL_SRC_WIDTH(src_width)
594 | DWC_CTLL_DST_INC 606 | DWC_CTLL_DST_INC
@@ -669,11 +681,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
669 681
670 switch (direction) { 682 switch (direction) {
671 case DMA_TO_DEVICE: 683 case DMA_TO_DEVICE:
672 ctllo = (DWC_DEFAULT_CTLLO 684 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
673 | DWC_CTLL_DST_WIDTH(reg_width) 685 | DWC_CTLL_DST_WIDTH(reg_width)
674 | DWC_CTLL_DST_FIX 686 | DWC_CTLL_DST_FIX
675 | DWC_CTLL_SRC_INC 687 | DWC_CTLL_SRC_INC
676 | DWC_CTLL_FC_M2P); 688 | DWC_CTLL_FC(dws->fc));
677 reg = dws->tx_reg; 689 reg = dws->tx_reg;
678 for_each_sg(sgl, sg, sg_len, i) { 690 for_each_sg(sgl, sg, sg_len, i) {
679 struct dw_desc *desc; 691 struct dw_desc *desc;
@@ -714,11 +726,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
714 } 726 }
715 break; 727 break;
716 case DMA_FROM_DEVICE: 728 case DMA_FROM_DEVICE:
717 ctllo = (DWC_DEFAULT_CTLLO 729 ctllo = (DWC_DEFAULT_CTLLO(chan->private)
718 | DWC_CTLL_SRC_WIDTH(reg_width) 730 | DWC_CTLL_SRC_WIDTH(reg_width)
719 | DWC_CTLL_DST_INC 731 | DWC_CTLL_DST_INC
720 | DWC_CTLL_SRC_FIX 732 | DWC_CTLL_SRC_FIX
721 | DWC_CTLL_FC_P2M); 733 | DWC_CTLL_FC(dws->fc));
722 734
723 reg = dws->rx_reg; 735 reg = dws->rx_reg;
724 for_each_sg(sgl, sg, sg_len, i) { 736 for_each_sg(sgl, sg, sg_len, i) {
@@ -834,7 +846,9 @@ dwc_tx_status(struct dma_chan *chan,
834 846
835 ret = dma_async_is_complete(cookie, last_complete, last_used); 847 ret = dma_async_is_complete(cookie, last_complete, last_used);
836 if (ret != DMA_SUCCESS) { 848 if (ret != DMA_SUCCESS) {
849 spin_lock_bh(&dwc->lock);
837 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 850 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
851 spin_unlock_bh(&dwc->lock);
838 852
839 last_complete = dwc->completed; 853 last_complete = dwc->completed;
840 last_used = chan->cookie; 854 last_used = chan->cookie;
@@ -889,8 +903,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
889 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); 903 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
890 904
891 cfghi = dws->cfg_hi; 905 cfghi = dws->cfg_hi;
892 cfglo = dws->cfg_lo; 906 cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
893 } 907 }
908
909 cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
910
894 channel_writel(dwc, CFG_LO, cfglo); 911 channel_writel(dwc, CFG_LO, cfglo);
895 channel_writel(dwc, CFG_HI, cfghi); 912 channel_writel(dwc, CFG_HI, cfghi);
896 913
@@ -1126,23 +1143,23 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1126 case DMA_TO_DEVICE: 1143 case DMA_TO_DEVICE:
1127 desc->lli.dar = dws->tx_reg; 1144 desc->lli.dar = dws->tx_reg;
1128 desc->lli.sar = buf_addr + (period_len * i); 1145 desc->lli.sar = buf_addr + (period_len * i);
1129 desc->lli.ctllo = (DWC_DEFAULT_CTLLO 1146 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
1130 | DWC_CTLL_DST_WIDTH(reg_width) 1147 | DWC_CTLL_DST_WIDTH(reg_width)
1131 | DWC_CTLL_SRC_WIDTH(reg_width) 1148 | DWC_CTLL_SRC_WIDTH(reg_width)
1132 | DWC_CTLL_DST_FIX 1149 | DWC_CTLL_DST_FIX
1133 | DWC_CTLL_SRC_INC 1150 | DWC_CTLL_SRC_INC
1134 | DWC_CTLL_FC_M2P 1151 | DWC_CTLL_FC(dws->fc)
1135 | DWC_CTLL_INT_EN); 1152 | DWC_CTLL_INT_EN);
1136 break; 1153 break;
1137 case DMA_FROM_DEVICE: 1154 case DMA_FROM_DEVICE:
1138 desc->lli.dar = buf_addr + (period_len * i); 1155 desc->lli.dar = buf_addr + (period_len * i);
1139 desc->lli.sar = dws->rx_reg; 1156 desc->lli.sar = dws->rx_reg;
1140 desc->lli.ctllo = (DWC_DEFAULT_CTLLO 1157 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
1141 | DWC_CTLL_SRC_WIDTH(reg_width) 1158 | DWC_CTLL_SRC_WIDTH(reg_width)
1142 | DWC_CTLL_DST_WIDTH(reg_width) 1159 | DWC_CTLL_DST_WIDTH(reg_width)
1143 | DWC_CTLL_DST_INC 1160 | DWC_CTLL_DST_INC
1144 | DWC_CTLL_SRC_FIX 1161 | DWC_CTLL_SRC_FIX
1145 | DWC_CTLL_FC_P2M 1162 | DWC_CTLL_FC(dws->fc)
1146 | DWC_CTLL_INT_EN); 1163 | DWC_CTLL_INT_EN);
1147 break; 1164 break;
1148 default: 1165 default:
@@ -1307,7 +1324,17 @@ static int __init dw_probe(struct platform_device *pdev)
1307 dwc->chan.device = &dw->dma; 1324 dwc->chan.device = &dw->dma;
1308 dwc->chan.cookie = dwc->completed = 1; 1325 dwc->chan.cookie = dwc->completed = 1;
1309 dwc->chan.chan_id = i; 1326 dwc->chan.chan_id = i;
1310 list_add_tail(&dwc->chan.device_node, &dw->dma.channels); 1327 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1328 list_add_tail(&dwc->chan.device_node,
1329 &dw->dma.channels);
1330 else
1331 list_add(&dwc->chan.device_node, &dw->dma.channels);
1332
1333 /* 7 is highest priority & 0 is lowest. */
1334 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1335 dwc->priority = 7 - i;
1336 else
1337 dwc->priority = i;
1311 1338
1312 dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; 1339 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1313 spin_lock_init(&dwc->lock); 1340 spin_lock_init(&dwc->lock);
@@ -1335,6 +1362,8 @@ static int __init dw_probe(struct platform_device *pdev)
1335 1362
1336 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); 1363 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1337 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); 1364 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1365 if (pdata->is_private)
1366 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1338 dw->dma.dev = &pdev->dev; 1367 dw->dma.dev = &pdev->dev;
1339 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; 1368 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1340 dw->dma.device_free_chan_resources = dwc_free_chan_resources; 1369 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
@@ -1447,7 +1476,7 @@ static int __init dw_init(void)
1447{ 1476{
1448 return platform_driver_probe(&dw_driver, dw_probe); 1477 return platform_driver_probe(&dw_driver, dw_probe);
1449} 1478}
1450module_init(dw_init); 1479subsys_initcall(dw_init);
1451 1480
1452static void __exit dw_exit(void) 1481static void __exit dw_exit(void)
1453{ 1482{
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index d9a939f67f46..720f821527f8 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -86,6 +86,7 @@ struct dw_dma_regs {
86#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) 86#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
87#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ 87#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
88#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ 88#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
89#define DWC_CTLL_FC(n) ((n) << 20)
89#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ 90#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
90#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ 91#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
91#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ 92#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
@@ -101,6 +102,8 @@ struct dw_dma_regs {
101#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff 102#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
102 103
103/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */ 104/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
105#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */
106#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */
104#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ 107#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
105#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ 108#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
106#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ 109#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
@@ -134,6 +137,7 @@ struct dw_dma_chan {
134 struct dma_chan chan; 137 struct dma_chan chan;
135 void __iomem *ch_regs; 138 void __iomem *ch_regs;
136 u8 mask; 139 u8 mask;
140 u8 priority;
137 141
138 spinlock_t lock; 142 spinlock_t lock;
139 143
@@ -155,9 +159,9 @@ __dwc_regs(struct dw_dma_chan *dwc)
155} 159}
156 160
157#define channel_readl(dwc, name) \ 161#define channel_readl(dwc, name) \
158 __raw_readl(&(__dwc_regs(dwc)->name)) 162 readl(&(__dwc_regs(dwc)->name))
159#define channel_writel(dwc, name, val) \ 163#define channel_writel(dwc, name, val) \
160 __raw_writel((val), &(__dwc_regs(dwc)->name)) 164 writel((val), &(__dwc_regs(dwc)->name))
161 165
162static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) 166static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
163{ 167{
@@ -181,9 +185,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
181} 185}
182 186
183#define dma_readl(dw, name) \ 187#define dma_readl(dw, name) \
184 __raw_readl(&(__dw_regs(dw)->name)) 188 readl(&(__dw_regs(dw)->name))
185#define dma_writel(dw, name, val) \ 189#define dma_writel(dw, name, val) \
186 __raw_writel((val), &(__dw_regs(dw)->name)) 190 writel((val), &(__dw_regs(dw)->name))
187 191
188#define channel_set_bit(dw, reg, mask) \ 192#define channel_set_bit(dw, reg, mask) \
189 dma_writel(dw, reg, ((mask) << 8) | (mask)) 193 dma_writel(dw, reg, ((mask) << 8) | (mask))
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index e3854a8f0de0..6b396759e7f5 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -37,35 +37,16 @@
37 37
38#include "fsldma.h" 38#include "fsldma.h"
39 39
40static const char msg_ld_oom[] = "No free memory for link descriptor\n"; 40#define chan_dbg(chan, fmt, arg...) \
41 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
42#define chan_err(chan, fmt, arg...) \
43 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
41 44
42static void dma_init(struct fsldma_chan *chan) 45static const char msg_ld_oom[] = "No free memory for link descriptor";
43{
44 /* Reset the channel */
45 DMA_OUT(chan, &chan->regs->mr, 0, 32);
46 46
47 switch (chan->feature & FSL_DMA_IP_MASK) { 47/*
48 case FSL_DMA_IP_85XX: 48 * Register Helpers
49 /* Set the channel to below modes: 49 */
50 * EIE - Error interrupt enable
51 * EOSIE - End of segments interrupt enable (basic mode)
52 * EOLNIE - End of links interrupt enable
53 * BWC - Bandwidth sharing among channels
54 */
55 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
56 | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE
57 | FSL_DMA_MR_EOSIE, 32);
58 break;
59 case FSL_DMA_IP_83XX:
60 /* Set the channel to below modes:
61 * EOTIE - End-of-transfer interrupt enable
62 * PRC_RM - PCI read multiple
63 */
64 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
65 | FSL_DMA_MR_PRC_RM, 32);
66 break;
67 }
68}
69 50
70static void set_sr(struct fsldma_chan *chan, u32 val) 51static void set_sr(struct fsldma_chan *chan, u32 val)
71{ 52{
@@ -77,14 +58,38 @@ static u32 get_sr(struct fsldma_chan *chan)
77 return DMA_IN(chan, &chan->regs->sr, 32); 58 return DMA_IN(chan, &chan->regs->sr, 32);
78} 59}
79 60
61static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
62{
63 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
64}
65
66static dma_addr_t get_cdar(struct fsldma_chan *chan)
67{
68 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
69}
70
71static u32 get_bcr(struct fsldma_chan *chan)
72{
73 return DMA_IN(chan, &chan->regs->bcr, 32);
74}
75
76/*
77 * Descriptor Helpers
78 */
79
80static void set_desc_cnt(struct fsldma_chan *chan, 80static void set_desc_cnt(struct fsldma_chan *chan,
81 struct fsl_dma_ld_hw *hw, u32 count) 81 struct fsl_dma_ld_hw *hw, u32 count)
82{ 82{
83 hw->count = CPU_TO_DMA(chan, count, 32); 83 hw->count = CPU_TO_DMA(chan, count, 32);
84} 84}
85 85
86static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
87{
88 return DMA_TO_CPU(chan, desc->hw.count, 32);
89}
90
86static void set_desc_src(struct fsldma_chan *chan, 91static void set_desc_src(struct fsldma_chan *chan,
87 struct fsl_dma_ld_hw *hw, dma_addr_t src) 92 struct fsl_dma_ld_hw *hw, dma_addr_t src)
88{ 93{
89 u64 snoop_bits; 94 u64 snoop_bits;
90 95
@@ -93,8 +98,18 @@ static void set_desc_src(struct fsldma_chan *chan,
93 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 98 hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
94} 99}
95 100
101static dma_addr_t get_desc_src(struct fsldma_chan *chan,
102 struct fsl_desc_sw *desc)
103{
104 u64 snoop_bits;
105
106 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
107 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
108 return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
109}
110
96static void set_desc_dst(struct fsldma_chan *chan, 111static void set_desc_dst(struct fsldma_chan *chan,
97 struct fsl_dma_ld_hw *hw, dma_addr_t dst) 112 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
98{ 113{
99 u64 snoop_bits; 114 u64 snoop_bits;
100 115
@@ -103,8 +118,18 @@ static void set_desc_dst(struct fsldma_chan *chan,
103 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 118 hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
104} 119}
105 120
121static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
122 struct fsl_desc_sw *desc)
123{
124 u64 snoop_bits;
125
126 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
127 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
128 return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
129}
130
106static void set_desc_next(struct fsldma_chan *chan, 131static void set_desc_next(struct fsldma_chan *chan,
107 struct fsl_dma_ld_hw *hw, dma_addr_t next) 132 struct fsl_dma_ld_hw *hw, dma_addr_t next)
108{ 133{
109 u64 snoop_bits; 134 u64 snoop_bits;
110 135
@@ -113,24 +138,46 @@ static void set_desc_next(struct fsldma_chan *chan,
113 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); 138 hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
114} 139}
115 140
116static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) 141static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
117{ 142{
118 DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); 143 u64 snoop_bits;
119}
120 144
121static dma_addr_t get_cdar(struct fsldma_chan *chan) 145 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
122{ 146 ? FSL_DMA_SNEN : 0;
123 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
124}
125 147
126static dma_addr_t get_ndar(struct fsldma_chan *chan) 148 desc->hw.next_ln_addr = CPU_TO_DMA(chan,
127{ 149 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
128 return DMA_IN(chan, &chan->regs->ndar, 64); 150 | snoop_bits, 64);
129} 151}
130 152
131static u32 get_bcr(struct fsldma_chan *chan) 153/*
154 * DMA Engine Hardware Control Helpers
155 */
156
157static void dma_init(struct fsldma_chan *chan)
132{ 158{
133 return DMA_IN(chan, &chan->regs->bcr, 32); 159 /* Reset the channel */
160 DMA_OUT(chan, &chan->regs->mr, 0, 32);
161
162 switch (chan->feature & FSL_DMA_IP_MASK) {
163 case FSL_DMA_IP_85XX:
164 /* Set the channel to below modes:
165 * EIE - Error interrupt enable
166 * EOLNIE - End of links interrupt enable
167 * BWC - Bandwidth sharing among channels
168 */
169 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
170 | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
171 break;
172 case FSL_DMA_IP_83XX:
173 /* Set the channel to below modes:
174 * EOTIE - End-of-transfer interrupt enable
175 * PRC_RM - PCI read multiple
176 */
177 DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
178 | FSL_DMA_MR_PRC_RM, 32);
179 break;
180 }
134} 181}
135 182
136static int dma_is_idle(struct fsldma_chan *chan) 183static int dma_is_idle(struct fsldma_chan *chan)
@@ -139,25 +186,32 @@ static int dma_is_idle(struct fsldma_chan *chan)
139 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); 186 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
140} 187}
141 188
189/*
190 * Start the DMA controller
191 *
192 * Preconditions:
193 * - the CDAR register must point to the start descriptor
194 * - the MRn[CS] bit must be cleared
195 */
142static void dma_start(struct fsldma_chan *chan) 196static void dma_start(struct fsldma_chan *chan)
143{ 197{
144 u32 mode; 198 u32 mode;
145 199
146 mode = DMA_IN(chan, &chan->regs->mr, 32); 200 mode = DMA_IN(chan, &chan->regs->mr, 32);
147 201
148 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 202 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
149 if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 203 DMA_OUT(chan, &chan->regs->bcr, 0, 32);
150 DMA_OUT(chan, &chan->regs->bcr, 0, 32); 204 mode |= FSL_DMA_MR_EMP_EN;
151 mode |= FSL_DMA_MR_EMP_EN; 205 } else {
152 } else { 206 mode &= ~FSL_DMA_MR_EMP_EN;
153 mode &= ~FSL_DMA_MR_EMP_EN;
154 }
155 } 207 }
156 208
157 if (chan->feature & FSL_DMA_CHAN_START_EXT) 209 if (chan->feature & FSL_DMA_CHAN_START_EXT) {
158 mode |= FSL_DMA_MR_EMS_EN; 210 mode |= FSL_DMA_MR_EMS_EN;
159 else 211 } else {
212 mode &= ~FSL_DMA_MR_EMS_EN;
160 mode |= FSL_DMA_MR_CS; 213 mode |= FSL_DMA_MR_CS;
214 }
161 215
162 DMA_OUT(chan, &chan->regs->mr, mode, 32); 216 DMA_OUT(chan, &chan->regs->mr, mode, 32);
163} 217}
@@ -167,13 +221,26 @@ static void dma_halt(struct fsldma_chan *chan)
167 u32 mode; 221 u32 mode;
168 int i; 222 int i;
169 223
224 /* read the mode register */
170 mode = DMA_IN(chan, &chan->regs->mr, 32); 225 mode = DMA_IN(chan, &chan->regs->mr, 32);
171 mode |= FSL_DMA_MR_CA;
172 DMA_OUT(chan, &chan->regs->mr, mode, 32);
173 226
174 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); 227 /*
228 * The 85xx controller supports channel abort, which will stop
229 * the current transfer. On 83xx, this bit is the transfer error
230 * mask bit, which should not be changed.
231 */
232 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
233 mode |= FSL_DMA_MR_CA;
234 DMA_OUT(chan, &chan->regs->mr, mode, 32);
235
236 mode &= ~FSL_DMA_MR_CA;
237 }
238
239 /* stop the DMA controller */
240 mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
175 DMA_OUT(chan, &chan->regs->mr, mode, 32); 241 DMA_OUT(chan, &chan->regs->mr, mode, 32);
176 242
243 /* wait for the DMA controller to become idle */
177 for (i = 0; i < 100; i++) { 244 for (i = 0; i < 100; i++) {
178 if (dma_is_idle(chan)) 245 if (dma_is_idle(chan))
179 return; 246 return;
@@ -182,20 +249,7 @@ static void dma_halt(struct fsldma_chan *chan)
182 } 249 }
183 250
184 if (!dma_is_idle(chan)) 251 if (!dma_is_idle(chan))
185 dev_err(chan->dev, "DMA halt timeout!\n"); 252 chan_err(chan, "DMA halt timeout!\n");
186}
187
188static void set_ld_eol(struct fsldma_chan *chan,
189 struct fsl_desc_sw *desc)
190{
191 u64 snoop_bits;
192
193 snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
194 ? FSL_DMA_SNEN : 0;
195
196 desc->hw.next_ln_addr = CPU_TO_DMA(chan,
197 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
198 | snoop_bits, 64);
199} 253}
200 254
201/** 255/**
@@ -321,8 +375,7 @@ static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
321 chan->feature &= ~FSL_DMA_CHAN_START_EXT; 375 chan->feature &= ~FSL_DMA_CHAN_START_EXT;
322} 376}
323 377
324static void append_ld_queue(struct fsldma_chan *chan, 378static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
325 struct fsl_desc_sw *desc)
326{ 379{
327 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); 380 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
328 381
@@ -363,8 +416,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
363 cookie = chan->common.cookie; 416 cookie = chan->common.cookie;
364 list_for_each_entry(child, &desc->tx_list, node) { 417 list_for_each_entry(child, &desc->tx_list, node) {
365 cookie++; 418 cookie++;
366 if (cookie < 0) 419 if (cookie < DMA_MIN_COOKIE)
367 cookie = 1; 420 cookie = DMA_MIN_COOKIE;
368 421
369 child->async_tx.cookie = cookie; 422 child->async_tx.cookie = cookie;
370 } 423 }
@@ -385,15 +438,14 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
385 * 438 *
386 * Return - The descriptor allocated. NULL for failed. 439 * Return - The descriptor allocated. NULL for failed.
387 */ 440 */
388static struct fsl_desc_sw *fsl_dma_alloc_descriptor( 441static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
389 struct fsldma_chan *chan)
390{ 442{
391 struct fsl_desc_sw *desc; 443 struct fsl_desc_sw *desc;
392 dma_addr_t pdesc; 444 dma_addr_t pdesc;
393 445
394 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); 446 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
395 if (!desc) { 447 if (!desc) {
396 dev_dbg(chan->dev, "out of memory for link desc\n"); 448 chan_dbg(chan, "out of memory for link descriptor\n");
397 return NULL; 449 return NULL;
398 } 450 }
399 451
@@ -403,10 +455,13 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
403 desc->async_tx.tx_submit = fsl_dma_tx_submit; 455 desc->async_tx.tx_submit = fsl_dma_tx_submit;
404 desc->async_tx.phys = pdesc; 456 desc->async_tx.phys = pdesc;
405 457
458#ifdef FSL_DMA_LD_DEBUG
459 chan_dbg(chan, "LD %p allocated\n", desc);
460#endif
461
406 return desc; 462 return desc;
407} 463}
408 464
409
410/** 465/**
411 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. 466 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
412 * @chan : Freescale DMA channel 467 * @chan : Freescale DMA channel
@@ -427,13 +482,11 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
427 * We need the descriptor to be aligned to 32bytes 482 * We need the descriptor to be aligned to 32bytes
428 * for meeting FSL DMA specification requirement. 483 * for meeting FSL DMA specification requirement.
429 */ 484 */
430 chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", 485 chan->desc_pool = dma_pool_create(chan->name, chan->dev,
431 chan->dev,
432 sizeof(struct fsl_desc_sw), 486 sizeof(struct fsl_desc_sw),
433 __alignof__(struct fsl_desc_sw), 0); 487 __alignof__(struct fsl_desc_sw), 0);
434 if (!chan->desc_pool) { 488 if (!chan->desc_pool) {
435 dev_err(chan->dev, "unable to allocate channel %d " 489 chan_err(chan, "unable to allocate descriptor pool\n");
436 "descriptor pool\n", chan->id);
437 return -ENOMEM; 490 return -ENOMEM;
438 } 491 }
439 492
@@ -455,6 +508,9 @@ static void fsldma_free_desc_list(struct fsldma_chan *chan,
455 508
456 list_for_each_entry_safe(desc, _desc, list, node) { 509 list_for_each_entry_safe(desc, _desc, list, node) {
457 list_del(&desc->node); 510 list_del(&desc->node);
511#ifdef FSL_DMA_LD_DEBUG
512 chan_dbg(chan, "LD %p free\n", desc);
513#endif
458 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 514 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
459 } 515 }
460} 516}
@@ -466,6 +522,9 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
466 522
467 list_for_each_entry_safe_reverse(desc, _desc, list, node) { 523 list_for_each_entry_safe_reverse(desc, _desc, list, node) {
468 list_del(&desc->node); 524 list_del(&desc->node);
525#ifdef FSL_DMA_LD_DEBUG
526 chan_dbg(chan, "LD %p free\n", desc);
527#endif
469 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 528 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
470 } 529 }
471} 530}
@@ -479,7 +538,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
479 struct fsldma_chan *chan = to_fsl_chan(dchan); 538 struct fsldma_chan *chan = to_fsl_chan(dchan);
480 unsigned long flags; 539 unsigned long flags;
481 540
482 dev_dbg(chan->dev, "Free all channel resources.\n"); 541 chan_dbg(chan, "free all channel resources\n");
483 spin_lock_irqsave(&chan->desc_lock, flags); 542 spin_lock_irqsave(&chan->desc_lock, flags);
484 fsldma_free_desc_list(chan, &chan->ld_pending); 543 fsldma_free_desc_list(chan, &chan->ld_pending);
485 fsldma_free_desc_list(chan, &chan->ld_running); 544 fsldma_free_desc_list(chan, &chan->ld_running);
@@ -502,7 +561,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
502 561
503 new = fsl_dma_alloc_descriptor(chan); 562 new = fsl_dma_alloc_descriptor(chan);
504 if (!new) { 563 if (!new) {
505 dev_err(chan->dev, msg_ld_oom); 564 chan_err(chan, "%s\n", msg_ld_oom);
506 return NULL; 565 return NULL;
507 } 566 }
508 567
@@ -512,14 +571,15 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
512 /* Insert the link descriptor to the LD ring */ 571 /* Insert the link descriptor to the LD ring */
513 list_add_tail(&new->node, &new->tx_list); 572 list_add_tail(&new->node, &new->tx_list);
514 573
515 /* Set End-of-link to the last link descriptor of new list*/ 574 /* Set End-of-link to the last link descriptor of new list */
516 set_ld_eol(chan, new); 575 set_ld_eol(chan, new);
517 576
518 return &new->async_tx; 577 return &new->async_tx;
519} 578}
520 579
521static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( 580static struct dma_async_tx_descriptor *
522 struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, 581fsl_dma_prep_memcpy(struct dma_chan *dchan,
582 dma_addr_t dma_dst, dma_addr_t dma_src,
523 size_t len, unsigned long flags) 583 size_t len, unsigned long flags)
524{ 584{
525 struct fsldma_chan *chan; 585 struct fsldma_chan *chan;
@@ -539,12 +599,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
539 /* Allocate the link descriptor from DMA pool */ 599 /* Allocate the link descriptor from DMA pool */
540 new = fsl_dma_alloc_descriptor(chan); 600 new = fsl_dma_alloc_descriptor(chan);
541 if (!new) { 601 if (!new) {
542 dev_err(chan->dev, msg_ld_oom); 602 chan_err(chan, "%s\n", msg_ld_oom);
543 goto fail; 603 goto fail;
544 } 604 }
545#ifdef FSL_DMA_LD_DEBUG
546 dev_dbg(chan->dev, "new link desc alloc %p\n", new);
547#endif
548 605
549 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 606 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
550 607
@@ -572,7 +629,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
572 new->async_tx.flags = flags; /* client is in control of this ack */ 629 new->async_tx.flags = flags; /* client is in control of this ack */
573 new->async_tx.cookie = -EBUSY; 630 new->async_tx.cookie = -EBUSY;
574 631
575 /* Set End-of-link to the last link descriptor of new list*/ 632 /* Set End-of-link to the last link descriptor of new list */
576 set_ld_eol(chan, new); 633 set_ld_eol(chan, new);
577 634
578 return &first->async_tx; 635 return &first->async_tx;
@@ -627,12 +684,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
627 /* allocate and populate the descriptor */ 684 /* allocate and populate the descriptor */
628 new = fsl_dma_alloc_descriptor(chan); 685 new = fsl_dma_alloc_descriptor(chan);
629 if (!new) { 686 if (!new) {
630 dev_err(chan->dev, msg_ld_oom); 687 chan_err(chan, "%s\n", msg_ld_oom);
631 goto fail; 688 goto fail;
632 } 689 }
633#ifdef FSL_DMA_LD_DEBUG
634 dev_dbg(chan->dev, "new link desc alloc %p\n", new);
635#endif
636 690
637 set_desc_cnt(chan, &new->hw, len); 691 set_desc_cnt(chan, &new->hw, len);
638 set_desc_src(chan, &new->hw, src); 692 set_desc_src(chan, &new->hw, src);
@@ -744,14 +798,15 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
744 798
745 switch (cmd) { 799 switch (cmd) {
746 case DMA_TERMINATE_ALL: 800 case DMA_TERMINATE_ALL:
801 spin_lock_irqsave(&chan->desc_lock, flags);
802
747 /* Halt the DMA engine */ 803 /* Halt the DMA engine */
748 dma_halt(chan); 804 dma_halt(chan);
749 805
750 spin_lock_irqsave(&chan->desc_lock, flags);
751
752 /* Remove and free all of the descriptors in the LD queue */ 806 /* Remove and free all of the descriptors in the LD queue */
753 fsldma_free_desc_list(chan, &chan->ld_pending); 807 fsldma_free_desc_list(chan, &chan->ld_pending);
754 fsldma_free_desc_list(chan, &chan->ld_running); 808 fsldma_free_desc_list(chan, &chan->ld_running);
809 chan->idle = true;
755 810
756 spin_unlock_irqrestore(&chan->desc_lock, flags); 811 spin_unlock_irqrestore(&chan->desc_lock, flags);
757 return 0; 812 return 0;
@@ -789,140 +844,87 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
789} 844}
790 845
791/** 846/**
792 * fsl_dma_update_completed_cookie - Update the completed cookie. 847 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
793 * @chan : Freescale DMA channel
794 *
795 * CONTEXT: hardirq
796 */
797static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
798{
799 struct fsl_desc_sw *desc;
800 unsigned long flags;
801 dma_cookie_t cookie;
802
803 spin_lock_irqsave(&chan->desc_lock, flags);
804
805 if (list_empty(&chan->ld_running)) {
806 dev_dbg(chan->dev, "no running descriptors\n");
807 goto out_unlock;
808 }
809
810 /* Get the last descriptor, update the cookie to that */
811 desc = to_fsl_desc(chan->ld_running.prev);
812 if (dma_is_idle(chan))
813 cookie = desc->async_tx.cookie;
814 else {
815 cookie = desc->async_tx.cookie - 1;
816 if (unlikely(cookie < DMA_MIN_COOKIE))
817 cookie = DMA_MAX_COOKIE;
818 }
819
820 chan->completed_cookie = cookie;
821
822out_unlock:
823 spin_unlock_irqrestore(&chan->desc_lock, flags);
824}
825
826/**
827 * fsldma_desc_status - Check the status of a descriptor
828 * @chan: Freescale DMA channel 848 * @chan: Freescale DMA channel
829 * @desc: DMA SW descriptor 849 * @desc: descriptor to cleanup and free
830 *
831 * This function will return the status of the given descriptor
832 */
833static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
834 struct fsl_desc_sw *desc)
835{
836 return dma_async_is_complete(desc->async_tx.cookie,
837 chan->completed_cookie,
838 chan->common.cookie);
839}
840
841/**
842 * fsl_chan_ld_cleanup - Clean up link descriptors
843 * @chan : Freescale DMA channel
844 * 850 *
845 * This function clean up the ld_queue of DMA channel. 851 * This function is used on a descriptor which has been executed by the DMA
852 * controller. It will run any callbacks, submit any dependencies, and then
853 * free the descriptor.
846 */ 854 */
847static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) 855static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
856 struct fsl_desc_sw *desc)
848{ 857{
849 struct fsl_desc_sw *desc, *_desc; 858 struct dma_async_tx_descriptor *txd = &desc->async_tx;
850 unsigned long flags; 859 struct device *dev = chan->common.device->dev;
851 860 dma_addr_t src = get_desc_src(chan, desc);
852 spin_lock_irqsave(&chan->desc_lock, flags); 861 dma_addr_t dst = get_desc_dst(chan, desc);
853 862 u32 len = get_desc_cnt(chan, desc);
854 dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); 863
855 list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { 864 /* Run the link descriptor callback function */
856 dma_async_tx_callback callback; 865 if (txd->callback) {
857 void *callback_param; 866#ifdef FSL_DMA_LD_DEBUG
858 867 chan_dbg(chan, "LD %p callback\n", desc);
859 if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) 868#endif
860 break; 869 txd->callback(txd->callback_param);
870 }
861 871
862 /* Remove from the list of running transactions */ 872 /* Run any dependencies */
863 list_del(&desc->node); 873 dma_run_dependencies(txd);
864 874
865 /* Run the link descriptor callback function */ 875 /* Unmap the dst buffer, if requested */
866 callback = desc->async_tx.callback; 876 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
867 callback_param = desc->async_tx.callback_param; 877 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
868 if (callback) { 878 dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
869 spin_unlock_irqrestore(&chan->desc_lock, flags); 879 else
870 dev_dbg(chan->dev, "LD %p callback\n", desc); 880 dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
871 callback(callback_param); 881 }
872 spin_lock_irqsave(&chan->desc_lock, flags);
873 }
874 882
875 /* Run any dependencies, then free the descriptor */ 883 /* Unmap the src buffer, if requested */
876 dma_run_dependencies(&desc->async_tx); 884 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
877 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 885 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
886 dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
887 else
888 dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
878 } 889 }
879 890
880 spin_unlock_irqrestore(&chan->desc_lock, flags); 891#ifdef FSL_DMA_LD_DEBUG
892 chan_dbg(chan, "LD %p free\n", desc);
893#endif
894 dma_pool_free(chan->desc_pool, desc, txd->phys);
881} 895}
882 896
883/** 897/**
884 * fsl_chan_xfer_ld_queue - transfer any pending transactions 898 * fsl_chan_xfer_ld_queue - transfer any pending transactions
885 * @chan : Freescale DMA channel 899 * @chan : Freescale DMA channel
886 * 900 *
887 * This will make sure that any pending transactions will be run. 901 * HARDWARE STATE: idle
888 * If the DMA controller is idle, it will be started. Otherwise, 902 * LOCKING: must hold chan->desc_lock
889 * the DMA controller's interrupt handler will start any pending
890 * transactions when it becomes idle.
891 */ 903 */
892static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) 904static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
893{ 905{
894 struct fsl_desc_sw *desc; 906 struct fsl_desc_sw *desc;
895 unsigned long flags;
896
897 spin_lock_irqsave(&chan->desc_lock, flags);
898 907
899 /* 908 /*
900 * If the list of pending descriptors is empty, then we 909 * If the list of pending descriptors is empty, then we
901 * don't need to do any work at all 910 * don't need to do any work at all
902 */ 911 */
903 if (list_empty(&chan->ld_pending)) { 912 if (list_empty(&chan->ld_pending)) {
904 dev_dbg(chan->dev, "no pending LDs\n"); 913 chan_dbg(chan, "no pending LDs\n");
905 goto out_unlock; 914 return;
906 } 915 }
907 916
908 /* 917 /*
909 * The DMA controller is not idle, which means the interrupt 918 * The DMA controller is not idle, which means that the interrupt
910 * handler will start any queued transactions when it runs 919 * handler will start any queued transactions when it runs after
911 * at the end of the current transaction 920 * this transaction finishes
912 */ 921 */
913 if (!dma_is_idle(chan)) { 922 if (!chan->idle) {
914 dev_dbg(chan->dev, "DMA controller still busy\n"); 923 chan_dbg(chan, "DMA controller still busy\n");
915 goto out_unlock; 924 return;
916 } 925 }
917 926
918 /* 927 /*
919 * TODO:
920 * make sure the dma_halt() function really un-wedges the
921 * controller as much as possible
922 */
923 dma_halt(chan);
924
925 /*
926 * If there are some link descriptors which have not been 928 * If there are some link descriptors which have not been
927 * transferred, we need to start the controller 929 * transferred, we need to start the controller
928 */ 930 */
@@ -931,18 +933,32 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
931 * Move all elements from the queue of pending transactions 933 * Move all elements from the queue of pending transactions
932 * onto the list of running transactions 934 * onto the list of running transactions
933 */ 935 */
936 chan_dbg(chan, "idle, starting controller\n");
934 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); 937 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
935 list_splice_tail_init(&chan->ld_pending, &chan->ld_running); 938 list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
936 939
937 /* 940 /*
941 * The 85xx DMA controller doesn't clear the channel start bit
942 * automatically at the end of a transfer. Therefore we must clear
943 * it in software before starting the transfer.
944 */
945 if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
946 u32 mode;
947
948 mode = DMA_IN(chan, &chan->regs->mr, 32);
949 mode &= ~FSL_DMA_MR_CS;
950 DMA_OUT(chan, &chan->regs->mr, mode, 32);
951 }
952
953 /*
938 * Program the descriptor's address into the DMA controller, 954 * Program the descriptor's address into the DMA controller,
939 * then start the DMA transaction 955 * then start the DMA transaction
940 */ 956 */
941 set_cdar(chan, desc->async_tx.phys); 957 set_cdar(chan, desc->async_tx.phys);
942 dma_start(chan); 958 get_cdar(chan);
943 959
944out_unlock: 960 dma_start(chan);
945 spin_unlock_irqrestore(&chan->desc_lock, flags); 961 chan->idle = false;
946} 962}
947 963
948/** 964/**
@@ -952,7 +968,11 @@ out_unlock:
952static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) 968static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
953{ 969{
954 struct fsldma_chan *chan = to_fsl_chan(dchan); 970 struct fsldma_chan *chan = to_fsl_chan(dchan);
971 unsigned long flags;
972
973 spin_lock_irqsave(&chan->desc_lock, flags);
955 fsl_chan_xfer_ld_queue(chan); 974 fsl_chan_xfer_ld_queue(chan);
975 spin_unlock_irqrestore(&chan->desc_lock, flags);
956} 976}
957 977
958/** 978/**
@@ -964,16 +984,18 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
964 struct dma_tx_state *txstate) 984 struct dma_tx_state *txstate)
965{ 985{
966 struct fsldma_chan *chan = to_fsl_chan(dchan); 986 struct fsldma_chan *chan = to_fsl_chan(dchan);
967 dma_cookie_t last_used;
968 dma_cookie_t last_complete; 987 dma_cookie_t last_complete;
988 dma_cookie_t last_used;
989 unsigned long flags;
969 990
970 fsl_chan_ld_cleanup(chan); 991 spin_lock_irqsave(&chan->desc_lock, flags);
971 992
972 last_used = dchan->cookie;
973 last_complete = chan->completed_cookie; 993 last_complete = chan->completed_cookie;
994 last_used = dchan->cookie;
974 995
975 dma_set_tx_state(txstate, last_complete, last_used, 0); 996 spin_unlock_irqrestore(&chan->desc_lock, flags);
976 997
998 dma_set_tx_state(txstate, last_complete, last_used, 0);
977 return dma_async_is_complete(cookie, last_complete, last_used); 999 return dma_async_is_complete(cookie, last_complete, last_used);
978} 1000}
979 1001
@@ -984,21 +1006,20 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
984static irqreturn_t fsldma_chan_irq(int irq, void *data) 1006static irqreturn_t fsldma_chan_irq(int irq, void *data)
985{ 1007{
986 struct fsldma_chan *chan = data; 1008 struct fsldma_chan *chan = data;
987 int update_cookie = 0;
988 int xfer_ld_q = 0;
989 u32 stat; 1009 u32 stat;
990 1010
991 /* save and clear the status register */ 1011 /* save and clear the status register */
992 stat = get_sr(chan); 1012 stat = get_sr(chan);
993 set_sr(chan, stat); 1013 set_sr(chan, stat);
994 dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); 1014 chan_dbg(chan, "irq: stat = 0x%x\n", stat);
995 1015
1016 /* check that this was really our device */
996 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 1017 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
997 if (!stat) 1018 if (!stat)
998 return IRQ_NONE; 1019 return IRQ_NONE;
999 1020
1000 if (stat & FSL_DMA_SR_TE) 1021 if (stat & FSL_DMA_SR_TE)
1001 dev_err(chan->dev, "Transfer Error!\n"); 1022 chan_err(chan, "Transfer Error!\n");
1002 1023
1003 /* 1024 /*
1004 * Programming Error 1025 * Programming Error
@@ -1006,29 +1027,10 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1006 * triger a PE interrupt. 1027 * triger a PE interrupt.
1007 */ 1028 */
1008 if (stat & FSL_DMA_SR_PE) { 1029 if (stat & FSL_DMA_SR_PE) {
1009 dev_dbg(chan->dev, "irq: Programming Error INT\n"); 1030 chan_dbg(chan, "irq: Programming Error INT\n");
1010 if (get_bcr(chan) == 0) {
1011 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
1012 * Now, update the completed cookie, and continue the
1013 * next uncompleted transfer.
1014 */
1015 update_cookie = 1;
1016 xfer_ld_q = 1;
1017 }
1018 stat &= ~FSL_DMA_SR_PE; 1031 stat &= ~FSL_DMA_SR_PE;
1019 } 1032 if (get_bcr(chan) != 0)
1020 1033 chan_err(chan, "Programming Error!\n");
1021 /*
1022 * If the link descriptor segment transfer finishes,
1023 * we will recycle the used descriptor.
1024 */
1025 if (stat & FSL_DMA_SR_EOSI) {
1026 dev_dbg(chan->dev, "irq: End-of-segments INT\n");
1027 dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
1028 (unsigned long long)get_cdar(chan),
1029 (unsigned long long)get_ndar(chan));
1030 stat &= ~FSL_DMA_SR_EOSI;
1031 update_cookie = 1;
1032 } 1034 }
1033 1035
1034 /* 1036 /*
@@ -1036,10 +1038,8 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1036 * and start the next transfer if it exist. 1038 * and start the next transfer if it exist.
1037 */ 1039 */
1038 if (stat & FSL_DMA_SR_EOCDI) { 1040 if (stat & FSL_DMA_SR_EOCDI) {
1039 dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); 1041 chan_dbg(chan, "irq: End-of-Chain link INT\n");
1040 stat &= ~FSL_DMA_SR_EOCDI; 1042 stat &= ~FSL_DMA_SR_EOCDI;
1041 update_cookie = 1;
1042 xfer_ld_q = 1;
1043 } 1043 }
1044 1044
1045 /* 1045 /*
@@ -1048,27 +1048,79 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
1048 * prepare next transfer. 1048 * prepare next transfer.
1049 */ 1049 */
1050 if (stat & FSL_DMA_SR_EOLNI) { 1050 if (stat & FSL_DMA_SR_EOLNI) {
1051 dev_dbg(chan->dev, "irq: End-of-link INT\n"); 1051 chan_dbg(chan, "irq: End-of-link INT\n");
1052 stat &= ~FSL_DMA_SR_EOLNI; 1052 stat &= ~FSL_DMA_SR_EOLNI;
1053 xfer_ld_q = 1;
1054 } 1053 }
1055 1054
1056 if (update_cookie) 1055 /* check that the DMA controller is really idle */
1057 fsl_dma_update_completed_cookie(chan); 1056 if (!dma_is_idle(chan))
1058 if (xfer_ld_q) 1057 chan_err(chan, "irq: controller not idle!\n");
1059 fsl_chan_xfer_ld_queue(chan); 1058
1059 /* check that we handled all of the bits */
1060 if (stat) 1060 if (stat)
1061 dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); 1061 chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
1062 1062
1063 dev_dbg(chan->dev, "irq: Exit\n"); 1063 /*
1064 * Schedule the tasklet to handle all cleanup of the current
1065 * transaction. It will start a new transaction if there is
1066 * one pending.
1067 */
1064 tasklet_schedule(&chan->tasklet); 1068 tasklet_schedule(&chan->tasklet);
1069 chan_dbg(chan, "irq: Exit\n");
1065 return IRQ_HANDLED; 1070 return IRQ_HANDLED;
1066} 1071}
1067 1072
1068static void dma_do_tasklet(unsigned long data) 1073static void dma_do_tasklet(unsigned long data)
1069{ 1074{
1070 struct fsldma_chan *chan = (struct fsldma_chan *)data; 1075 struct fsldma_chan *chan = (struct fsldma_chan *)data;
1071 fsl_chan_ld_cleanup(chan); 1076 struct fsl_desc_sw *desc, *_desc;
1077 LIST_HEAD(ld_cleanup);
1078 unsigned long flags;
1079
1080 chan_dbg(chan, "tasklet entry\n");
1081
1082 spin_lock_irqsave(&chan->desc_lock, flags);
1083
1084 /* update the cookie if we have some descriptors to cleanup */
1085 if (!list_empty(&chan->ld_running)) {
1086 dma_cookie_t cookie;
1087
1088 desc = to_fsl_desc(chan->ld_running.prev);
1089 cookie = desc->async_tx.cookie;
1090
1091 chan->completed_cookie = cookie;
1092 chan_dbg(chan, "completed_cookie=%d\n", cookie);
1093 }
1094
1095 /*
1096 * move the descriptors to a temporary list so we can drop the lock
1097 * during the entire cleanup operation
1098 */
1099 list_splice_tail_init(&chan->ld_running, &ld_cleanup);
1100
1101 /* the hardware is now idle and ready for more */
1102 chan->idle = true;
1103
1104 /*
1105 * Start any pending transactions automatically
1106 *
1107 * In the ideal case, we keep the DMA controller busy while we go
1108 * ahead and free the descriptors below.
1109 */
1110 fsl_chan_xfer_ld_queue(chan);
1111 spin_unlock_irqrestore(&chan->desc_lock, flags);
1112
1113 /* Run the callback for each descriptor, in order */
1114 list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
1115
1116 /* Remove from the list of transactions */
1117 list_del(&desc->node);
1118
1119 /* Run all cleanup for this descriptor */
1120 fsldma_cleanup_descriptor(chan, desc);
1121 }
1122
1123 chan_dbg(chan, "tasklet exit\n");
1072} 1124}
1073 1125
1074static irqreturn_t fsldma_ctrl_irq(int irq, void *data) 1126static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
@@ -1116,7 +1168,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev)
1116 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1168 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1117 chan = fdev->chan[i]; 1169 chan = fdev->chan[i];
1118 if (chan && chan->irq != NO_IRQ) { 1170 if (chan && chan->irq != NO_IRQ) {
1119 dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); 1171 chan_dbg(chan, "free per-channel IRQ\n");
1120 free_irq(chan->irq, chan); 1172 free_irq(chan->irq, chan);
1121 } 1173 }
1122 } 1174 }
@@ -1143,19 +1195,16 @@ static int fsldma_request_irqs(struct fsldma_device *fdev)
1143 continue; 1195 continue;
1144 1196
1145 if (chan->irq == NO_IRQ) { 1197 if (chan->irq == NO_IRQ) {
1146 dev_err(fdev->dev, "no interrupts property defined for " 1198 chan_err(chan, "interrupts property missing in device tree\n");
1147 "DMA channel %d. Please fix your "
1148 "device tree\n", chan->id);
1149 ret = -ENODEV; 1199 ret = -ENODEV;
1150 goto out_unwind; 1200 goto out_unwind;
1151 } 1201 }
1152 1202
1153 dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); 1203 chan_dbg(chan, "request per-channel IRQ\n");
1154 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, 1204 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1155 "fsldma-chan", chan); 1205 "fsldma-chan", chan);
1156 if (ret) { 1206 if (ret) {
1157 dev_err(fdev->dev, "unable to request IRQ for DMA " 1207 chan_err(chan, "unable to request per-channel IRQ\n");
1158 "channel %d\n", chan->id);
1159 goto out_unwind; 1208 goto out_unwind;
1160 } 1209 }
1161 } 1210 }
@@ -1230,6 +1279,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1230 1279
1231 fdev->chan[chan->id] = chan; 1280 fdev->chan[chan->id] = chan;
1232 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); 1281 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1282 snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1233 1283
1234 /* Initialize the channel */ 1284 /* Initialize the channel */
1235 dma_init(chan); 1285 dma_init(chan);
@@ -1250,6 +1300,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1250 spin_lock_init(&chan->desc_lock); 1300 spin_lock_init(&chan->desc_lock);
1251 INIT_LIST_HEAD(&chan->ld_pending); 1301 INIT_LIST_HEAD(&chan->ld_pending);
1252 INIT_LIST_HEAD(&chan->ld_running); 1302 INIT_LIST_HEAD(&chan->ld_running);
1303 chan->idle = true;
1253 1304
1254 chan->common.device = &fdev->common; 1305 chan->common.device = &fdev->common;
1255 1306
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index ba9f403c0fbe..9cb5aa57c677 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -102,8 +102,8 @@ struct fsl_desc_sw {
102} __attribute__((aligned(32))); 102} __attribute__((aligned(32)));
103 103
104struct fsldma_chan_regs { 104struct fsldma_chan_regs {
105 u32 mr; /* 0x00 - Mode Register */ 105 u32 mr; /* 0x00 - Mode Register */
106 u32 sr; /* 0x04 - Status Register */ 106 u32 sr; /* 0x04 - Status Register */
107 u64 cdar; /* 0x08 - Current descriptor address register */ 107 u64 cdar; /* 0x08 - Current descriptor address register */
108 u64 sar; /* 0x10 - Source Address Register */ 108 u64 sar; /* 0x10 - Source Address Register */
109 u64 dar; /* 0x18 - Destination Address Register */ 109 u64 dar; /* 0x18 - Destination Address Register */
@@ -135,6 +135,7 @@ struct fsldma_device {
135#define FSL_DMA_CHAN_START_EXT 0x00002000 135#define FSL_DMA_CHAN_START_EXT 0x00002000
136 136
137struct fsldma_chan { 137struct fsldma_chan {
138 char name[8]; /* Channel name */
138 struct fsldma_chan_regs __iomem *regs; 139 struct fsldma_chan_regs __iomem *regs;
139 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 140 dma_cookie_t completed_cookie; /* The maximum cookie completed */
140 spinlock_t desc_lock; /* Descriptor operation lock */ 141 spinlock_t desc_lock; /* Descriptor operation lock */
@@ -147,6 +148,7 @@ struct fsldma_chan {
147 int id; /* Raw id of this channel */ 148 int id; /* Raw id of this channel */
148 struct tasklet_struct tasklet; 149 struct tasklet_struct tasklet;
149 u32 feature; 150 u32 feature;
151 bool idle; /* DMA controller is idle */
150 152
151 void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); 153 void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
152 void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); 154 void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
new file mode 100644
index 000000000000..88aad4f54002
--- /dev/null
+++ b/drivers/dma/mxs-dma.c
@@ -0,0 +1,724 @@
1/*
2 * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
3 *
4 * Refer to drivers/dma/imx-sdma.c
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/init.h>
12#include <linux/types.h>
13#include <linux/mm.h>
14#include <linux/interrupt.h>
15#include <linux/clk.h>
16#include <linux/wait.h>
17#include <linux/sched.h>
18#include <linux/semaphore.h>
19#include <linux/device.h>
20#include <linux/dma-mapping.h>
21#include <linux/slab.h>
22#include <linux/platform_device.h>
23#include <linux/dmaengine.h>
24#include <linux/delay.h>
25
26#include <asm/irq.h>
27#include <mach/mxs.h>
28#include <mach/dma.h>
29#include <mach/common.h>
30
31/*
32 * NOTE: The term "PIO" throughout the mxs-dma implementation means
33 * PIO mode of mxs apbh-dma and apbx-dma. With this working mode,
34 * dma can program the controller registers of peripheral devices.
35 */
36
37#define MXS_DMA_APBH 0
38#define MXS_DMA_APBX 1
39#define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH)
40
41#define APBH_VERSION_LATEST 3
42#define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST)
43
44#define HW_APBHX_CTRL0 0x000
45#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
46#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
47#define BP_APBH_CTRL0_CLKGATE_CHANNEL 8
48#define BP_APBH_CTRL0_RESET_CHANNEL 16
49#define HW_APBHX_CTRL1 0x010
50#define HW_APBHX_CTRL2 0x020
51#define HW_APBHX_CHANNEL_CTRL 0x030
52#define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
53#define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800)
54#define HW_APBX_VERSION 0x800
55#define BP_APBHX_VERSION_MAJOR 24
56#define HW_APBHX_CHn_NXTCMDAR(n) \
57 (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70)
58#define HW_APBHX_CHn_SEMA(n) \
59 (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70)
60
61/*
62 * ccw bits definitions
63 *
64 * COMMAND: 0..1 (2)
65 * CHAIN: 2 (1)
66 * IRQ: 3 (1)
67 * NAND_LOCK: 4 (1) - not implemented
68 * NAND_WAIT4READY: 5 (1) - not implemented
69 * DEC_SEM: 6 (1)
70 * WAIT4END: 7 (1)
71 * HALT_ON_TERMINATE: 8 (1)
72 * TERMINATE_FLUSH: 9 (1)
73 * RESERVED: 10..11 (2)
74 * PIO_NUM: 12..15 (4)
75 */
76#define BP_CCW_COMMAND 0
77#define BM_CCW_COMMAND (3 << 0)
78#define CCW_CHAIN (1 << 2)
79#define CCW_IRQ (1 << 3)
80#define CCW_DEC_SEM (1 << 6)
81#define CCW_WAIT4END (1 << 7)
82#define CCW_HALT_ON_TERM (1 << 8)
83#define CCW_TERM_FLUSH (1 << 9)
84#define BP_CCW_PIO_NUM 12
85#define BM_CCW_PIO_NUM (0xf << 12)
86
87#define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field)
88
89#define MXS_DMA_CMD_NO_XFER 0
90#define MXS_DMA_CMD_WRITE 1
91#define MXS_DMA_CMD_READ 2
92#define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */
93
94struct mxs_dma_ccw {
95 u32 next;
96 u16 bits;
97 u16 xfer_bytes;
98#define MAX_XFER_BYTES 0xff00
99 u32 bufaddr;
100#define MXS_PIO_WORDS 16
101 u32 pio_words[MXS_PIO_WORDS];
102};
103
104#define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw))
105
106struct mxs_dma_chan {
107 struct mxs_dma_engine *mxs_dma;
108 struct dma_chan chan;
109 struct dma_async_tx_descriptor desc;
110 struct tasklet_struct tasklet;
111 int chan_irq;
112 struct mxs_dma_ccw *ccw;
113 dma_addr_t ccw_phys;
114 dma_cookie_t last_completed;
115 enum dma_status status;
116 unsigned int flags;
117#define MXS_DMA_SG_LOOP (1 << 0)
118};
119
120#define MXS_DMA_CHANNELS 16
121#define MXS_DMA_CHANNELS_MASK 0xffff
122
123struct mxs_dma_engine {
124 int dev_id;
125 unsigned int version;
126 void __iomem *base;
127 struct clk *clk;
128 struct dma_device dma_device;
129 struct device_dma_parameters dma_parms;
130 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
131};
132
133static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
134{
135 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
136 int chan_id = mxs_chan->chan.chan_id;
137
138 if (dma_is_apbh() && apbh_is_old())
139 writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
140 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
141 else
142 writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
143 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
144}
145
146static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
147{
148 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
149 int chan_id = mxs_chan->chan.chan_id;
150
151 /* set cmd_addr up */
152 writel(mxs_chan->ccw_phys,
153 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
154
155 /* enable apbh channel clock */
156 if (dma_is_apbh()) {
157 if (apbh_is_old())
158 writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
159 mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
160 else
161 writel(1 << chan_id,
162 mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
163 }
164
165 /* write 1 to SEMA to kick off the channel */
166 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
167}
168
169static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
170{
171 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
172 int chan_id = mxs_chan->chan.chan_id;
173
174 /* disable apbh channel clock */
175 if (dma_is_apbh()) {
176 if (apbh_is_old())
177 writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
178 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
179 else
180 writel(1 << chan_id,
181 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
182 }
183
184 mxs_chan->status = DMA_SUCCESS;
185}
186
187static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
188{
189 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
190 int chan_id = mxs_chan->chan.chan_id;
191
192 /* freeze the channel */
193 if (dma_is_apbh() && apbh_is_old())
194 writel(1 << chan_id,
195 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
196 else
197 writel(1 << chan_id,
198 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
199
200 mxs_chan->status = DMA_PAUSED;
201}
202
203static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
204{
205 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
206 int chan_id = mxs_chan->chan.chan_id;
207
208 /* unfreeze the channel */
209 if (dma_is_apbh() && apbh_is_old())
210 writel(1 << chan_id,
211 mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
212 else
213 writel(1 << chan_id,
214 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR);
215
216 mxs_chan->status = DMA_IN_PROGRESS;
217}
218
219static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan)
220{
221 dma_cookie_t cookie = mxs_chan->chan.cookie;
222
223 if (++cookie < 0)
224 cookie = 1;
225
226 mxs_chan->chan.cookie = cookie;
227 mxs_chan->desc.cookie = cookie;
228
229 return cookie;
230}
231
232static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
233{
234 return container_of(chan, struct mxs_dma_chan, chan);
235}
236
237static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
238{
239 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan);
240
241 mxs_dma_enable_chan(mxs_chan);
242
243 return mxs_dma_assign_cookie(mxs_chan);
244}
245
246static void mxs_dma_tasklet(unsigned long data)
247{
248 struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
249
250 if (mxs_chan->desc.callback)
251 mxs_chan->desc.callback(mxs_chan->desc.callback_param);
252}
253
254static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
255{
256 struct mxs_dma_engine *mxs_dma = dev_id;
257 u32 stat1, stat2;
258
259 /* completion status */
260 stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
261 stat1 &= MXS_DMA_CHANNELS_MASK;
262 writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR);
263
264 /* error status */
265 stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
266 writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR);
267
268 /*
269 * When both completion and error of termination bits set at the
270 * same time, we do not take it as an error. IOW, it only becomes
271 * an error we need to handler here in case of ether it's (1) an bus
272 * error or (2) a termination error with no completion.
273 */
274 stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
275 (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */
276
277 /* combine error and completion status for checking */
278 stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1;
279 while (stat1) {
280 int channel = fls(stat1) - 1;
281 struct mxs_dma_chan *mxs_chan =
282 &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS];
283
284 if (channel >= MXS_DMA_CHANNELS) {
285 dev_dbg(mxs_dma->dma_device.dev,
286 "%s: error in channel %d\n", __func__,
287 channel - MXS_DMA_CHANNELS);
288 mxs_chan->status = DMA_ERROR;
289 mxs_dma_reset_chan(mxs_chan);
290 } else {
291 if (mxs_chan->flags & MXS_DMA_SG_LOOP)
292 mxs_chan->status = DMA_IN_PROGRESS;
293 else
294 mxs_chan->status = DMA_SUCCESS;
295 }
296
297 stat1 &= ~(1 << channel);
298
299 if (mxs_chan->status == DMA_SUCCESS)
300 mxs_chan->last_completed = mxs_chan->desc.cookie;
301
302 /* schedule tasklet on this channel */
303 tasklet_schedule(&mxs_chan->tasklet);
304 }
305
306 return IRQ_HANDLED;
307}
308
309static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
310{
311 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
312 struct mxs_dma_data *data = chan->private;
313 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
314 int ret;
315
316 if (!data)
317 return -EINVAL;
318
319 mxs_chan->chan_irq = data->chan_irq;
320
321 mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
322 &mxs_chan->ccw_phys, GFP_KERNEL);
323 if (!mxs_chan->ccw) {
324 ret = -ENOMEM;
325 goto err_alloc;
326 }
327
328 memset(mxs_chan->ccw, 0, PAGE_SIZE);
329
330 ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
331 0, "mxs-dma", mxs_dma);
332 if (ret)
333 goto err_irq;
334
335 ret = clk_enable(mxs_dma->clk);
336 if (ret)
337 goto err_clk;
338
339 mxs_dma_reset_chan(mxs_chan);
340
341 dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
342 mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
343
344 /* the descriptor is ready */
345 async_tx_ack(&mxs_chan->desc);
346
347 return 0;
348
349err_clk:
350 free_irq(mxs_chan->chan_irq, mxs_dma);
351err_irq:
352 dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
353 mxs_chan->ccw, mxs_chan->ccw_phys);
354err_alloc:
355 return ret;
356}
357
358static void mxs_dma_free_chan_resources(struct dma_chan *chan)
359{
360 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
361 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
362
363 mxs_dma_disable_chan(mxs_chan);
364
365 free_irq(mxs_chan->chan_irq, mxs_dma);
366
367 dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
368 mxs_chan->ccw, mxs_chan->ccw_phys);
369
370 clk_disable(mxs_dma->clk);
371}
372
373static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
374 struct dma_chan *chan, struct scatterlist *sgl,
375 unsigned int sg_len, enum dma_data_direction direction,
376 unsigned long append)
377{
378 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
379 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
380 struct mxs_dma_ccw *ccw;
381 struct scatterlist *sg;
382 int i, j;
383 u32 *pio;
384 static int idx;
385
386 if (mxs_chan->status == DMA_IN_PROGRESS && !append)
387 return NULL;
388
389 if (sg_len + (append ? idx : 0) > NUM_CCW) {
390 dev_err(mxs_dma->dma_device.dev,
391 "maximum number of sg exceeded: %d > %d\n",
392 sg_len, NUM_CCW);
393 goto err_out;
394 }
395
396 mxs_chan->status = DMA_IN_PROGRESS;
397 mxs_chan->flags = 0;
398
399 /*
400 * If the sg is prepared with append flag set, the sg
401 * will be appended to the last prepared sg.
402 */
403 if (append) {
404 BUG_ON(idx < 1);
405 ccw = &mxs_chan->ccw[idx - 1];
406 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
407 ccw->bits |= CCW_CHAIN;
408 ccw->bits &= ~CCW_IRQ;
409 ccw->bits &= ~CCW_DEC_SEM;
410 ccw->bits &= ~CCW_WAIT4END;
411 } else {
412 idx = 0;
413 }
414
415 if (direction == DMA_NONE) {
416 ccw = &mxs_chan->ccw[idx++];
417 pio = (u32 *) sgl;
418
419 for (j = 0; j < sg_len;)
420 ccw->pio_words[j++] = *pio++;
421
422 ccw->bits = 0;
423 ccw->bits |= CCW_IRQ;
424 ccw->bits |= CCW_DEC_SEM;
425 ccw->bits |= CCW_WAIT4END;
426 ccw->bits |= CCW_HALT_ON_TERM;
427 ccw->bits |= CCW_TERM_FLUSH;
428 ccw->bits |= BF_CCW(sg_len, PIO_NUM);
429 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
430 } else {
431 for_each_sg(sgl, sg, sg_len, i) {
432 if (sg->length > MAX_XFER_BYTES) {
433 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
434 sg->length, MAX_XFER_BYTES);
435 goto err_out;
436 }
437
438 ccw = &mxs_chan->ccw[idx++];
439
440 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
441 ccw->bufaddr = sg->dma_address;
442 ccw->xfer_bytes = sg->length;
443
444 ccw->bits = 0;
445 ccw->bits |= CCW_CHAIN;
446 ccw->bits |= CCW_HALT_ON_TERM;
447 ccw->bits |= CCW_TERM_FLUSH;
448 ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
449 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
450 COMMAND);
451
452 if (i + 1 == sg_len) {
453 ccw->bits &= ~CCW_CHAIN;
454 ccw->bits |= CCW_IRQ;
455 ccw->bits |= CCW_DEC_SEM;
456 ccw->bits |= CCW_WAIT4END;
457 }
458 }
459 }
460
461 return &mxs_chan->desc;
462
463err_out:
464 mxs_chan->status = DMA_ERROR;
465 return NULL;
466}
467
468static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
469 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
470 size_t period_len, enum dma_data_direction direction)
471{
472 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
473 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
474 int num_periods = buf_len / period_len;
475 int i = 0, buf = 0;
476
477 if (mxs_chan->status == DMA_IN_PROGRESS)
478 return NULL;
479
480 mxs_chan->status = DMA_IN_PROGRESS;
481 mxs_chan->flags |= MXS_DMA_SG_LOOP;
482
483 if (num_periods > NUM_CCW) {
484 dev_err(mxs_dma->dma_device.dev,
485 "maximum number of sg exceeded: %d > %d\n",
486 num_periods, NUM_CCW);
487 goto err_out;
488 }
489
490 if (period_len > MAX_XFER_BYTES) {
491 dev_err(mxs_dma->dma_device.dev,
492 "maximum period size exceeded: %d > %d\n",
493 period_len, MAX_XFER_BYTES);
494 goto err_out;
495 }
496
497 while (buf < buf_len) {
498 struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i];
499
500 if (i + 1 == num_periods)
501 ccw->next = mxs_chan->ccw_phys;
502 else
503 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1);
504
505 ccw->bufaddr = dma_addr;
506 ccw->xfer_bytes = period_len;
507
508 ccw->bits = 0;
509 ccw->bits |= CCW_CHAIN;
510 ccw->bits |= CCW_IRQ;
511 ccw->bits |= CCW_HALT_ON_TERM;
512 ccw->bits |= CCW_TERM_FLUSH;
513 ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
514 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
515
516 dma_addr += period_len;
517 buf += period_len;
518
519 i++;
520 }
521
522 return &mxs_chan->desc;
523
524err_out:
525 mxs_chan->status = DMA_ERROR;
526 return NULL;
527}
528
529static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
530 unsigned long arg)
531{
532 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
533 int ret = 0;
534
535 switch (cmd) {
536 case DMA_TERMINATE_ALL:
537 mxs_dma_disable_chan(mxs_chan);
538 break;
539 case DMA_PAUSE:
540 mxs_dma_pause_chan(mxs_chan);
541 break;
542 case DMA_RESUME:
543 mxs_dma_resume_chan(mxs_chan);
544 break;
545 default:
546 ret = -ENOSYS;
547 }
548
549 return ret;
550}
551
552static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
553 dma_cookie_t cookie, struct dma_tx_state *txstate)
554{
555 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
556 dma_cookie_t last_used;
557
558 last_used = chan->cookie;
559 dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0);
560
561 return mxs_chan->status;
562}
563
564static void mxs_dma_issue_pending(struct dma_chan *chan)
565{
566 /*
567 * Nothing to do. We only have a single descriptor.
568 */
569}
570
571static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
572{
573 int ret;
574
575 ret = clk_enable(mxs_dma->clk);
576 if (ret)
577 goto err_out;
578
579 ret = mxs_reset_block(mxs_dma->base);
580 if (ret)
581 goto err_out;
582
583 /* only major version matters */
584 mxs_dma->version = readl(mxs_dma->base +
585 ((mxs_dma->dev_id == MXS_DMA_APBX) ?
586 HW_APBX_VERSION : HW_APBH_VERSION)) >>
587 BP_APBHX_VERSION_MAJOR;
588
589 /* enable apbh burst */
590 if (dma_is_apbh()) {
591 writel(BM_APBH_CTRL0_APB_BURST_EN,
592 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
593 writel(BM_APBH_CTRL0_APB_BURST8_EN,
594 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
595 }
596
597 /* enable irq for all the channels */
598 writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
599 mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
600
601 clk_disable(mxs_dma->clk);
602
603 return 0;
604
605err_out:
606 return ret;
607}
608
609static int __init mxs_dma_probe(struct platform_device *pdev)
610{
611 const struct platform_device_id *id_entry =
612 platform_get_device_id(pdev);
613 struct mxs_dma_engine *mxs_dma;
614 struct resource *iores;
615 int ret, i;
616
617 mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL);
618 if (!mxs_dma)
619 return -ENOMEM;
620
621 mxs_dma->dev_id = id_entry->driver_data;
622
623 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
624
625 if (!request_mem_region(iores->start, resource_size(iores),
626 pdev->name)) {
627 ret = -EBUSY;
628 goto err_request_region;
629 }
630
631 mxs_dma->base = ioremap(iores->start, resource_size(iores));
632 if (!mxs_dma->base) {
633 ret = -ENOMEM;
634 goto err_ioremap;
635 }
636
637 mxs_dma->clk = clk_get(&pdev->dev, NULL);
638 if (IS_ERR(mxs_dma->clk)) {
639 ret = PTR_ERR(mxs_dma->clk);
640 goto err_clk;
641 }
642
643 dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask);
644 dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask);
645
646 INIT_LIST_HEAD(&mxs_dma->dma_device.channels);
647
648 /* Initialize channel parameters */
649 for (i = 0; i < MXS_DMA_CHANNELS; i++) {
650 struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i];
651
652 mxs_chan->mxs_dma = mxs_dma;
653 mxs_chan->chan.device = &mxs_dma->dma_device;
654
655 tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
656 (unsigned long) mxs_chan);
657
658
659 /* Add the channel to mxs_chan list */
660 list_add_tail(&mxs_chan->chan.device_node,
661 &mxs_dma->dma_device.channels);
662 }
663
664 ret = mxs_dma_init(mxs_dma);
665 if (ret)
666 goto err_init;
667
668 mxs_dma->dma_device.dev = &pdev->dev;
669
670 /* mxs_dma gets 65535 bytes maximum sg size */
671 mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms;
672 dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES);
673
674 mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
675 mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
676 mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
677 mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
678 mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
679 mxs_dma->dma_device.device_control = mxs_dma_control;
680 mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending;
681
682 ret = dma_async_device_register(&mxs_dma->dma_device);
683 if (ret) {
684 dev_err(mxs_dma->dma_device.dev, "unable to register\n");
685 goto err_init;
686 }
687
688 dev_info(mxs_dma->dma_device.dev, "initialized\n");
689
690 return 0;
691
692err_init:
693 clk_put(mxs_dma->clk);
694err_clk:
695 iounmap(mxs_dma->base);
696err_ioremap:
697 release_mem_region(iores->start, resource_size(iores));
698err_request_region:
699 kfree(mxs_dma);
700 return ret;
701}
702
703static struct platform_device_id mxs_dma_type[] = {
704 {
705 .name = "mxs-dma-apbh",
706 .driver_data = MXS_DMA_APBH,
707 }, {
708 .name = "mxs-dma-apbx",
709 .driver_data = MXS_DMA_APBX,
710 }
711};
712
713static struct platform_driver mxs_dma_driver = {
714 .driver = {
715 .name = "mxs-dma",
716 },
717 .id_table = mxs_dma_type,
718};
719
720static int __init mxs_dma_module_init(void)
721{
722 return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
723}
724subsys_initcall(mxs_dma_module_init);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 1c38418ae61f..8d8fef1480a9 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -82,7 +82,7 @@ struct pch_dma_regs {
82 u32 dma_sts1; 82 u32 dma_sts1;
83 u32 reserved2; 83 u32 reserved2;
84 u32 reserved3; 84 u32 reserved3;
85 struct pch_dma_desc_regs desc[0]; 85 struct pch_dma_desc_regs desc[MAX_CHAN_NR];
86}; 86};
87 87
88struct pch_dma_desc { 88struct pch_dma_desc {
@@ -124,7 +124,7 @@ struct pch_dma {
124 struct pci_pool *pool; 124 struct pci_pool *pool;
125 struct pch_dma_regs regs; 125 struct pch_dma_regs regs;
126 struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; 126 struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
127 struct pch_dma_chan channels[0]; 127 struct pch_dma_chan channels[MAX_CHAN_NR];
128}; 128};
129 129
130#define PCH_DMA_CTL0 0x00 130#define PCH_DMA_CTL0 0x00
@@ -366,7 +366,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
366 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); 366 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
367 dma_cookie_t cookie; 367 dma_cookie_t cookie;
368 368
369 spin_lock_bh(&pd_chan->lock); 369 spin_lock(&pd_chan->lock);
370 cookie = pdc_assign_cookie(pd_chan, desc); 370 cookie = pdc_assign_cookie(pd_chan, desc);
371 371
372 if (list_empty(&pd_chan->active_list)) { 372 if (list_empty(&pd_chan->active_list)) {
@@ -376,7 +376,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
376 list_add_tail(&desc->desc_node, &pd_chan->queue); 376 list_add_tail(&desc->desc_node, &pd_chan->queue);
377 } 377 }
378 378
379 spin_unlock_bh(&pd_chan->lock); 379 spin_unlock(&pd_chan->lock);
380 return 0; 380 return 0;
381} 381}
382 382
@@ -386,7 +386,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
386 struct pch_dma *pd = to_pd(chan->device); 386 struct pch_dma *pd = to_pd(chan->device);
387 dma_addr_t addr; 387 dma_addr_t addr;
388 388
389 desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr); 389 desc = pci_pool_alloc(pd->pool, flags, &addr);
390 if (desc) { 390 if (desc) {
391 memset(desc, 0, sizeof(struct pch_dma_desc)); 391 memset(desc, 0, sizeof(struct pch_dma_desc));
392 INIT_LIST_HEAD(&desc->tx_list); 392 INIT_LIST_HEAD(&desc->tx_list);
@@ -405,7 +405,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
405 struct pch_dma_desc *ret = NULL; 405 struct pch_dma_desc *ret = NULL;
406 int i; 406 int i;
407 407
408 spin_lock_bh(&pd_chan->lock); 408 spin_lock(&pd_chan->lock);
409 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { 409 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
410 i++; 410 i++;
411 if (async_tx_test_ack(&desc->txd)) { 411 if (async_tx_test_ack(&desc->txd)) {
@@ -415,15 +415,15 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
415 } 415 }
416 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); 416 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
417 } 417 }
418 spin_unlock_bh(&pd_chan->lock); 418 spin_unlock(&pd_chan->lock);
419 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); 419 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
420 420
421 if (!ret) { 421 if (!ret) {
422 ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); 422 ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
423 if (ret) { 423 if (ret) {
424 spin_lock_bh(&pd_chan->lock); 424 spin_lock(&pd_chan->lock);
425 pd_chan->descs_allocated++; 425 pd_chan->descs_allocated++;
426 spin_unlock_bh(&pd_chan->lock); 426 spin_unlock(&pd_chan->lock);
427 } else { 427 } else {
428 dev_err(chan2dev(&pd_chan->chan), 428 dev_err(chan2dev(&pd_chan->chan),
429 "failed to alloc desc\n"); 429 "failed to alloc desc\n");
@@ -437,10 +437,10 @@ static void pdc_desc_put(struct pch_dma_chan *pd_chan,
437 struct pch_dma_desc *desc) 437 struct pch_dma_desc *desc)
438{ 438{
439 if (desc) { 439 if (desc) {
440 spin_lock_bh(&pd_chan->lock); 440 spin_lock(&pd_chan->lock);
441 list_splice_init(&desc->tx_list, &pd_chan->free_list); 441 list_splice_init(&desc->tx_list, &pd_chan->free_list);
442 list_add(&desc->desc_node, &pd_chan->free_list); 442 list_add(&desc->desc_node, &pd_chan->free_list);
443 spin_unlock_bh(&pd_chan->lock); 443 spin_unlock(&pd_chan->lock);
444 } 444 }
445} 445}
446 446
@@ -530,9 +530,9 @@ static void pd_issue_pending(struct dma_chan *chan)
530 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 530 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
531 531
532 if (pdc_is_idle(pd_chan)) { 532 if (pdc_is_idle(pd_chan)) {
533 spin_lock_bh(&pd_chan->lock); 533 spin_lock(&pd_chan->lock);
534 pdc_advance_work(pd_chan); 534 pdc_advance_work(pd_chan);
535 spin_unlock_bh(&pd_chan->lock); 535 spin_unlock(&pd_chan->lock);
536 } 536 }
537} 537}
538 538
@@ -592,7 +592,6 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
592 goto err_desc_get; 592 goto err_desc_get;
593 } 593 }
594 594
595
596 if (!first) { 595 if (!first) {
597 first = desc; 596 first = desc;
598 } else { 597 } else {
@@ -641,13 +640,13 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
641 640
642 spin_unlock_bh(&pd_chan->lock); 641 spin_unlock_bh(&pd_chan->lock);
643 642
644
645 return 0; 643 return 0;
646} 644}
647 645
648static void pdc_tasklet(unsigned long data) 646static void pdc_tasklet(unsigned long data)
649{ 647{
650 struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; 648 struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
649 unsigned long flags;
651 650
652 if (!pdc_is_idle(pd_chan)) { 651 if (!pdc_is_idle(pd_chan)) {
653 dev_err(chan2dev(&pd_chan->chan), 652 dev_err(chan2dev(&pd_chan->chan),
@@ -655,12 +654,12 @@ static void pdc_tasklet(unsigned long data)
655 return; 654 return;
656 } 655 }
657 656
658 spin_lock_bh(&pd_chan->lock); 657 spin_lock_irqsave(&pd_chan->lock, flags);
659 if (test_and_clear_bit(0, &pd_chan->err_status)) 658 if (test_and_clear_bit(0, &pd_chan->err_status))
660 pdc_handle_error(pd_chan); 659 pdc_handle_error(pd_chan);
661 else 660 else
662 pdc_advance_work(pd_chan); 661 pdc_advance_work(pd_chan);
663 spin_unlock_bh(&pd_chan->lock); 662 spin_unlock_irqrestore(&pd_chan->lock, flags);
664} 663}
665 664
666static irqreturn_t pd_irq(int irq, void *devid) 665static irqreturn_t pd_irq(int irq, void *devid)
@@ -694,6 +693,7 @@ static irqreturn_t pd_irq(int irq, void *devid)
694 return ret; 693 return ret;
695} 694}
696 695
696#ifdef CONFIG_PM
697static void pch_dma_save_regs(struct pch_dma *pd) 697static void pch_dma_save_regs(struct pch_dma *pd)
698{ 698{
699 struct pch_dma_chan *pd_chan; 699 struct pch_dma_chan *pd_chan;
@@ -771,6 +771,7 @@ static int pch_dma_resume(struct pci_dev *pdev)
771 771
772 return 0; 772 return 0;
773} 773}
774#endif
774 775
775static int __devinit pch_dma_probe(struct pci_dev *pdev, 776static int __devinit pch_dma_probe(struct pci_dev *pdev,
776 const struct pci_device_id *id) 777 const struct pci_device_id *id)
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 6e1d46a65d0e..af955de035f4 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -68,6 +68,7 @@ enum d40_command {
68 * @base: Pointer to memory area when the pre_alloc_lli's are not large 68 * @base: Pointer to memory area when the pre_alloc_lli's are not large
69 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if 69 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
70 * pre_alloc_lli is used. 70 * pre_alloc_lli is used.
71 * @dma_addr: DMA address, if mapped
71 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. 72 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
72 * @pre_alloc_lli: Pre allocated area for the most common case of transfers, 73 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
73 * one buffer to one buffer. 74 * one buffer to one buffer.
@@ -75,6 +76,7 @@ enum d40_command {
75struct d40_lli_pool { 76struct d40_lli_pool {
76 void *base; 77 void *base;
77 int size; 78 int size;
79 dma_addr_t dma_addr;
78 /* Space for dst and src, plus an extra for padding */ 80 /* Space for dst and src, plus an extra for padding */
79 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; 81 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
80}; 82};
@@ -94,7 +96,6 @@ struct d40_lli_pool {
94 * during a transfer. 96 * during a transfer.
95 * @node: List entry. 97 * @node: List entry.
96 * @is_in_client_list: true if the client owns this descriptor. 98 * @is_in_client_list: true if the client owns this descriptor.
97 * @is_hw_linked: true if this job will automatically be continued for
98 * the previous one. 99 * the previous one.
99 * 100 *
100 * This descriptor is used for both logical and physical transfers. 101 * This descriptor is used for both logical and physical transfers.
@@ -114,7 +115,7 @@ struct d40_desc {
114 struct list_head node; 115 struct list_head node;
115 116
116 bool is_in_client_list; 117 bool is_in_client_list;
117 bool is_hw_linked; 118 bool cyclic;
118}; 119};
119 120
120/** 121/**
@@ -130,6 +131,7 @@ struct d40_desc {
130 */ 131 */
131struct d40_lcla_pool { 132struct d40_lcla_pool {
132 void *base; 133 void *base;
134 dma_addr_t dma_addr;
133 void *base_unaligned; 135 void *base_unaligned;
134 int pages; 136 int pages;
135 spinlock_t lock; 137 spinlock_t lock;
@@ -303,9 +305,37 @@ struct d40_reg_val {
303 unsigned int val; 305 unsigned int val;
304}; 306};
305 307
306static int d40_pool_lli_alloc(struct d40_desc *d40d, 308static struct device *chan2dev(struct d40_chan *d40c)
307 int lli_len, bool is_log)
308{ 309{
310 return &d40c->chan.dev->device;
311}
312
313static bool chan_is_physical(struct d40_chan *chan)
314{
315 return chan->log_num == D40_PHY_CHAN;
316}
317
318static bool chan_is_logical(struct d40_chan *chan)
319{
320 return !chan_is_physical(chan);
321}
322
323static void __iomem *chan_base(struct d40_chan *chan)
324{
325 return chan->base->virtbase + D40_DREG_PCBASE +
326 chan->phy_chan->num * D40_DREG_PCDELTA;
327}
328
329#define d40_err(dev, format, arg...) \
330 dev_err(dev, "[%s] " format, __func__, ## arg)
331
332#define chan_err(d40c, format, arg...) \
333 d40_err(chan2dev(d40c), format, ## arg)
334
335static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
336 int lli_len)
337{
338 bool is_log = chan_is_logical(d40c);
309 u32 align; 339 u32 align;
310 void *base; 340 void *base;
311 341
@@ -319,7 +349,7 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
319 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); 349 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
320 d40d->lli_pool.base = NULL; 350 d40d->lli_pool.base = NULL;
321 } else { 351 } else {
322 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align); 352 d40d->lli_pool.size = lli_len * 2 * align;
323 353
324 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); 354 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
325 d40d->lli_pool.base = base; 355 d40d->lli_pool.base = base;
@@ -329,22 +359,37 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d,
329 } 359 }
330 360
331 if (is_log) { 361 if (is_log) {
332 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, 362 d40d->lli_log.src = PTR_ALIGN(base, align);
333 align); 363 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
334 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len, 364
335 align); 365 d40d->lli_pool.dma_addr = 0;
336 } else { 366 } else {
337 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, 367 d40d->lli_phy.src = PTR_ALIGN(base, align);
338 align); 368 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
339 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, 369
340 align); 370 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
371 d40d->lli_phy.src,
372 d40d->lli_pool.size,
373 DMA_TO_DEVICE);
374
375 if (dma_mapping_error(d40c->base->dev,
376 d40d->lli_pool.dma_addr)) {
377 kfree(d40d->lli_pool.base);
378 d40d->lli_pool.base = NULL;
379 d40d->lli_pool.dma_addr = 0;
380 return -ENOMEM;
381 }
341 } 382 }
342 383
343 return 0; 384 return 0;
344} 385}
345 386
346static void d40_pool_lli_free(struct d40_desc *d40d) 387static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
347{ 388{
389 if (d40d->lli_pool.dma_addr)
390 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
391 d40d->lli_pool.size, DMA_TO_DEVICE);
392
348 kfree(d40d->lli_pool.base); 393 kfree(d40d->lli_pool.base);
349 d40d->lli_pool.base = NULL; 394 d40d->lli_pool.base = NULL;
350 d40d->lli_pool.size = 0; 395 d40d->lli_pool.size = 0;
@@ -391,7 +436,7 @@ static int d40_lcla_free_all(struct d40_chan *d40c,
391 int i; 436 int i;
392 int ret = -EINVAL; 437 int ret = -EINVAL;
393 438
394 if (d40c->log_num == D40_PHY_CHAN) 439 if (chan_is_physical(d40c))
395 return 0; 440 return 0;
396 441
397 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); 442 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
@@ -430,7 +475,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
430 475
431 list_for_each_entry_safe(d, _d, &d40c->client, node) 476 list_for_each_entry_safe(d, _d, &d40c->client, node)
432 if (async_tx_test_ack(&d->txd)) { 477 if (async_tx_test_ack(&d->txd)) {
433 d40_pool_lli_free(d); 478 d40_pool_lli_free(d40c, d);
434 d40_desc_remove(d); 479 d40_desc_remove(d);
435 desc = d; 480 desc = d;
436 memset(desc, 0, sizeof(*desc)); 481 memset(desc, 0, sizeof(*desc));
@@ -450,6 +495,7 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
450static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) 495static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
451{ 496{
452 497
498 d40_pool_lli_free(d40c, d40d);
453 d40_lcla_free_all(d40c, d40d); 499 d40_lcla_free_all(d40c, d40d);
454 kmem_cache_free(d40c->base->desc_slab, d40d); 500 kmem_cache_free(d40c->base->desc_slab, d40d);
455} 501}
@@ -459,57 +505,128 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
459 list_add_tail(&desc->node, &d40c->active); 505 list_add_tail(&desc->node, &d40c->active);
460} 506}
461 507
462static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) 508static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
463{ 509{
464 int curr_lcla = -EINVAL, next_lcla; 510 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
511 struct d40_phy_lli *lli_src = desc->lli_phy.src;
512 void __iomem *base = chan_base(chan);
513
514 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
515 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
516 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
517 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
518
519 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
520 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
521 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
522 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
523}
465 524
466 if (d40c->log_num == D40_PHY_CHAN) { 525static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
467 d40_phy_lli_write(d40c->base->virtbase, 526{
468 d40c->phy_chan->num, 527 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
469 d40d->lli_phy.dst, 528 struct d40_log_lli_bidir *lli = &desc->lli_log;
470 d40d->lli_phy.src); 529 int lli_current = desc->lli_current;
471 d40d->lli_current = d40d->lli_len; 530 int lli_len = desc->lli_len;
472 } else { 531 bool cyclic = desc->cyclic;
532 int curr_lcla = -EINVAL;
533 int first_lcla = 0;
534 bool linkback;
473 535
474 if ((d40d->lli_len - d40d->lli_current) > 1) 536 /*
475 curr_lcla = d40_lcla_alloc_one(d40c, d40d); 537 * We may have partially running cyclic transfers, in case we did't get
538 * enough LCLA entries.
539 */
540 linkback = cyclic && lli_current == 0;
476 541
477 d40_log_lli_lcpa_write(d40c->lcpa, 542 /*
478 &d40d->lli_log.dst[d40d->lli_current], 543 * For linkback, we need one LCLA even with only one link, because we
479 &d40d->lli_log.src[d40d->lli_current], 544 * can't link back to the one in LCPA space
480 curr_lcla); 545 */
546 if (linkback || (lli_len - lli_current > 1)) {
547 curr_lcla = d40_lcla_alloc_one(chan, desc);
548 first_lcla = curr_lcla;
549 }
481 550
482 d40d->lli_current++; 551 /*
483 for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) { 552 * For linkback, we normally load the LCPA in the loop since we need to
484 struct d40_log_lli *lcla; 553 * link it to the second LCLA and not the first. However, if we
554 * couldn't even get a first LCLA, then we have to run in LCPA and
555 * reload manually.
556 */
557 if (!linkback || curr_lcla == -EINVAL) {
558 unsigned int flags = 0;
485 559
486 if (d40d->lli_current + 1 < d40d->lli_len) 560 if (curr_lcla == -EINVAL)
487 next_lcla = d40_lcla_alloc_one(d40c, d40d); 561 flags |= LLI_TERM_INT;
488 else
489 next_lcla = -EINVAL;
490 562
491 lcla = d40c->base->lcla_pool.base + 563 d40_log_lli_lcpa_write(chan->lcpa,
492 d40c->phy_chan->num * 1024 + 564 &lli->dst[lli_current],
493 8 * curr_lcla * 2; 565 &lli->src[lli_current],
566 curr_lcla,
567 flags);
568 lli_current++;
569 }
494 570
495 d40_log_lli_lcla_write(lcla, 571 if (curr_lcla < 0)
496 &d40d->lli_log.dst[d40d->lli_current], 572 goto out;
497 &d40d->lli_log.src[d40d->lli_current],
498 next_lcla);
499 573
500 (void) dma_map_single(d40c->base->dev, lcla, 574 for (; lli_current < lli_len; lli_current++) {
501 2 * sizeof(struct d40_log_lli), 575 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
502 DMA_TO_DEVICE); 576 8 * curr_lcla * 2;
577 struct d40_log_lli *lcla = pool->base + lcla_offset;
578 unsigned int flags = 0;
579 int next_lcla;
503 580
504 curr_lcla = next_lcla; 581 if (lli_current + 1 < lli_len)
582 next_lcla = d40_lcla_alloc_one(chan, desc);
583 else
584 next_lcla = linkback ? first_lcla : -EINVAL;
505 585
506 if (curr_lcla == -EINVAL) { 586 if (cyclic || next_lcla == -EINVAL)
507 d40d->lli_current++; 587 flags |= LLI_TERM_INT;
508 break; 588
509 } 589 if (linkback && curr_lcla == first_lcla) {
590 /* First link goes in both LCPA and LCLA */
591 d40_log_lli_lcpa_write(chan->lcpa,
592 &lli->dst[lli_current],
593 &lli->src[lli_current],
594 next_lcla, flags);
595 }
596
597 /*
598 * One unused LCLA in the cyclic case if the very first
599 * next_lcla fails...
600 */
601 d40_log_lli_lcla_write(lcla,
602 &lli->dst[lli_current],
603 &lli->src[lli_current],
604 next_lcla, flags);
605
606 dma_sync_single_range_for_device(chan->base->dev,
607 pool->dma_addr, lcla_offset,
608 2 * sizeof(struct d40_log_lli),
609 DMA_TO_DEVICE);
510 610
611 curr_lcla = next_lcla;
612
613 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
614 lli_current++;
615 break;
511 } 616 }
512 } 617 }
618
619out:
620 desc->lli_current = lli_current;
621}
622
623static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
624{
625 if (chan_is_physical(d40c)) {
626 d40_phy_lli_load(d40c, d40d);
627 d40d->lli_current = d40d->lli_len;
628 } else
629 d40_log_lli_to_lcxa(d40c, d40d);
513} 630}
514 631
515static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) 632static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
@@ -543,18 +660,6 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
543 return d; 660 return d;
544} 661}
545 662
546static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
547{
548 struct d40_desc *d;
549
550 if (list_empty(&d40c->queue))
551 return NULL;
552 list_for_each_entry(d, &d40c->queue, node)
553 if (list_is_last(&d->node, &d40c->queue))
554 break;
555 return d;
556}
557
558static int d40_psize_2_burst_size(bool is_log, int psize) 663static int d40_psize_2_burst_size(bool is_log, int psize)
559{ 664{
560 if (is_log) { 665 if (is_log) {
@@ -666,9 +771,9 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
666 } 771 }
667 772
668 if (i == D40_SUSPEND_MAX_IT) { 773 if (i == D40_SUSPEND_MAX_IT) {
669 dev_err(&d40c->chan.dev->device, 774 chan_err(d40c,
670 "[%s]: unable to suspend the chl %d (log: %d) status %x\n", 775 "unable to suspend the chl %d (log: %d) status %x\n",
671 __func__, d40c->phy_chan->num, d40c->log_num, 776 d40c->phy_chan->num, d40c->log_num,
672 status); 777 status);
673 dump_stack(); 778 dump_stack();
674 ret = -EBUSY; 779 ret = -EBUSY;
@@ -701,17 +806,45 @@ static void d40_term_all(struct d40_chan *d40c)
701 d40c->busy = false; 806 d40c->busy = false;
702} 807}
703 808
809static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
810 u32 event, int reg)
811{
812 void __iomem *addr = chan_base(d40c) + reg;
813 int tries;
814
815 if (!enable) {
816 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
817 | ~D40_EVENTLINE_MASK(event), addr);
818 return;
819 }
820
821 /*
822 * The hardware sometimes doesn't register the enable when src and dst
823 * event lines are active on the same logical channel. Retry to ensure
824 * it does. Usually only one retry is sufficient.
825 */
826 tries = 100;
827 while (--tries) {
828 writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
829 | ~D40_EVENTLINE_MASK(event), addr);
830
831 if (readl(addr) & D40_EVENTLINE_MASK(event))
832 break;
833 }
834
835 if (tries != 99)
836 dev_dbg(chan2dev(d40c),
837 "[%s] workaround enable S%cLNK (%d tries)\n",
838 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
839 100 - tries);
840
841 WARN_ON(!tries);
842}
843
704static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) 844static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
705{ 845{
706 u32 val;
707 unsigned long flags; 846 unsigned long flags;
708 847
709 /* Notice, that disable requires the physical channel to be stopped */
710 if (do_enable)
711 val = D40_ACTIVATE_EVENTLINE;
712 else
713 val = D40_DEACTIVATE_EVENTLINE;
714
715 spin_lock_irqsave(&d40c->phy_chan->lock, flags); 848 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
716 849
717 /* Enable event line connected to device (or memcpy) */ 850 /* Enable event line connected to device (or memcpy) */
@@ -719,20 +852,15 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
719 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { 852 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
720 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 853 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
721 854
722 writel((val << D40_EVENTLINE_POS(event)) | 855 __d40_config_set_event(d40c, do_enable, event,
723 ~D40_EVENTLINE_MASK(event), 856 D40_CHAN_REG_SSLNK);
724 d40c->base->virtbase + D40_DREG_PCBASE +
725 d40c->phy_chan->num * D40_DREG_PCDELTA +
726 D40_CHAN_REG_SSLNK);
727 } 857 }
858
728 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { 859 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
729 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 860 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
730 861
731 writel((val << D40_EVENTLINE_POS(event)) | 862 __d40_config_set_event(d40c, do_enable, event,
732 ~D40_EVENTLINE_MASK(event), 863 D40_CHAN_REG_SDLNK);
733 d40c->base->virtbase + D40_DREG_PCBASE +
734 d40c->phy_chan->num * D40_DREG_PCDELTA +
735 D40_CHAN_REG_SDLNK);
736 } 864 }
737 865
738 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); 866 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
@@ -740,15 +868,12 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
740 868
741static u32 d40_chan_has_events(struct d40_chan *d40c) 869static u32 d40_chan_has_events(struct d40_chan *d40c)
742{ 870{
871 void __iomem *chanbase = chan_base(d40c);
743 u32 val; 872 u32 val;
744 873
745 val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 874 val = readl(chanbase + D40_CHAN_REG_SSLNK);
746 d40c->phy_chan->num * D40_DREG_PCDELTA + 875 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
747 D40_CHAN_REG_SSLNK);
748 876
749 val |= readl(d40c->base->virtbase + D40_DREG_PCBASE +
750 d40c->phy_chan->num * D40_DREG_PCDELTA +
751 D40_CHAN_REG_SDLNK);
752 return val; 877 return val;
753} 878}
754 879
@@ -771,7 +896,7 @@ static u32 d40_get_prmo(struct d40_chan *d40c)
771 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, 896 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
772 }; 897 };
773 898
774 if (d40c->log_num == D40_PHY_CHAN) 899 if (chan_is_physical(d40c))
775 return phy_map[d40c->dma_cfg.mode_opt]; 900 return phy_map[d40c->dma_cfg.mode_opt];
776 else 901 else
777 return log_map[d40c->dma_cfg.mode_opt]; 902 return log_map[d40c->dma_cfg.mode_opt];
@@ -785,7 +910,7 @@ static void d40_config_write(struct d40_chan *d40c)
785 /* Odd addresses are even addresses + 4 */ 910 /* Odd addresses are even addresses + 4 */
786 addr_base = (d40c->phy_chan->num % 2) * 4; 911 addr_base = (d40c->phy_chan->num % 2) * 4;
787 /* Setup channel mode to logical or physical */ 912 /* Setup channel mode to logical or physical */
788 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) << 913 var = ((u32)(chan_is_logical(d40c)) + 1) <<
789 D40_CHAN_POS(d40c->phy_chan->num); 914 D40_CHAN_POS(d40c->phy_chan->num);
790 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); 915 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
791 916
@@ -794,30 +919,18 @@ static void d40_config_write(struct d40_chan *d40c)
794 919
795 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); 920 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
796 921
797 if (d40c->log_num != D40_PHY_CHAN) { 922 if (chan_is_logical(d40c)) {
923 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
924 & D40_SREG_ELEM_LOG_LIDX_MASK;
925 void __iomem *chanbase = chan_base(d40c);
926
798 /* Set default config for CFG reg */ 927 /* Set default config for CFG reg */
799 writel(d40c->src_def_cfg, 928 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
800 d40c->base->virtbase + D40_DREG_PCBASE + 929 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
801 d40c->phy_chan->num * D40_DREG_PCDELTA +
802 D40_CHAN_REG_SSCFG);
803 writel(d40c->dst_def_cfg,
804 d40c->base->virtbase + D40_DREG_PCBASE +
805 d40c->phy_chan->num * D40_DREG_PCDELTA +
806 D40_CHAN_REG_SDCFG);
807 930
808 /* Set LIDX for lcla */ 931 /* Set LIDX for lcla */
809 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & 932 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
810 D40_SREG_ELEM_LOG_LIDX_MASK, 933 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
811 d40c->base->virtbase + D40_DREG_PCBASE +
812 d40c->phy_chan->num * D40_DREG_PCDELTA +
813 D40_CHAN_REG_SDELT);
814
815 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
816 D40_SREG_ELEM_LOG_LIDX_MASK,
817 d40c->base->virtbase + D40_DREG_PCBASE +
818 d40c->phy_chan->num * D40_DREG_PCDELTA +
819 D40_CHAN_REG_SSELT);
820
821 } 934 }
822} 935}
823 936
@@ -825,15 +938,15 @@ static u32 d40_residue(struct d40_chan *d40c)
825{ 938{
826 u32 num_elt; 939 u32 num_elt;
827 940
828 if (d40c->log_num != D40_PHY_CHAN) 941 if (chan_is_logical(d40c))
829 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) 942 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
830 >> D40_MEM_LCSP2_ECNT_POS; 943 >> D40_MEM_LCSP2_ECNT_POS;
831 else 944 else {
832 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + 945 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
833 d40c->phy_chan->num * D40_DREG_PCDELTA + 946 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
834 D40_CHAN_REG_SDELT) & 947 >> D40_SREG_ELEM_PHY_ECNT_POS;
835 D40_SREG_ELEM_PHY_ECNT_MASK) >> 948 }
836 D40_SREG_ELEM_PHY_ECNT_POS; 949
837 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); 950 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
838} 951}
839 952
@@ -841,20 +954,17 @@ static bool d40_tx_is_linked(struct d40_chan *d40c)
841{ 954{
842 bool is_link; 955 bool is_link;
843 956
844 if (d40c->log_num != D40_PHY_CHAN) 957 if (chan_is_logical(d40c))
845 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; 958 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
846 else 959 else
847 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + 960 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
848 d40c->phy_chan->num * D40_DREG_PCDELTA + 961 & D40_SREG_LNK_PHYS_LNK_MASK;
849 D40_CHAN_REG_SDLNK) & 962
850 D40_SREG_LNK_PHYS_LNK_MASK;
851 return is_link; 963 return is_link;
852} 964}
853 965
854static int d40_pause(struct dma_chan *chan) 966static int d40_pause(struct d40_chan *d40c)
855{ 967{
856 struct d40_chan *d40c =
857 container_of(chan, struct d40_chan, chan);
858 int res = 0; 968 int res = 0;
859 unsigned long flags; 969 unsigned long flags;
860 970
@@ -865,7 +975,7 @@ static int d40_pause(struct dma_chan *chan)
865 975
866 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 976 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
867 if (res == 0) { 977 if (res == 0) {
868 if (d40c->log_num != D40_PHY_CHAN) { 978 if (chan_is_logical(d40c)) {
869 d40_config_set_event(d40c, false); 979 d40_config_set_event(d40c, false);
870 /* Resume the other logical channels if any */ 980 /* Resume the other logical channels if any */
871 if (d40_chan_has_events(d40c)) 981 if (d40_chan_has_events(d40c))
@@ -878,10 +988,8 @@ static int d40_pause(struct dma_chan *chan)
878 return res; 988 return res;
879} 989}
880 990
881static int d40_resume(struct dma_chan *chan) 991static int d40_resume(struct d40_chan *d40c)
882{ 992{
883 struct d40_chan *d40c =
884 container_of(chan, struct d40_chan, chan);
885 int res = 0; 993 int res = 0;
886 unsigned long flags; 994 unsigned long flags;
887 995
@@ -891,7 +999,7 @@ static int d40_resume(struct dma_chan *chan)
891 spin_lock_irqsave(&d40c->lock, flags); 999 spin_lock_irqsave(&d40c->lock, flags);
892 1000
893 if (d40c->base->rev == 0) 1001 if (d40c->base->rev == 0)
894 if (d40c->log_num != D40_PHY_CHAN) { 1002 if (chan_is_logical(d40c)) {
895 res = d40_channel_execute_command(d40c, 1003 res = d40_channel_execute_command(d40c,
896 D40_DMA_SUSPEND_REQ); 1004 D40_DMA_SUSPEND_REQ);
897 goto no_suspend; 1005 goto no_suspend;
@@ -900,7 +1008,7 @@ static int d40_resume(struct dma_chan *chan)
900 /* If bytes left to transfer or linked tx resume job */ 1008 /* If bytes left to transfer or linked tx resume job */
901 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { 1009 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
902 1010
903 if (d40c->log_num != D40_PHY_CHAN) 1011 if (chan_is_logical(d40c))
904 d40_config_set_event(d40c, true); 1012 d40_config_set_event(d40c, true);
905 1013
906 res = d40_channel_execute_command(d40c, D40_DMA_RUN); 1014 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
@@ -911,75 +1019,20 @@ no_suspend:
911 return res; 1019 return res;
912} 1020}
913 1021
914static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d) 1022static int d40_terminate_all(struct d40_chan *chan)
915{ 1023{
916 /* TODO: Write */ 1024 unsigned long flags;
917} 1025 int ret = 0;
918
919static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d)
920{
921 struct d40_desc *d40d_prev = NULL;
922 int i;
923 u32 val;
924
925 if (!list_empty(&d40c->queue))
926 d40d_prev = d40_last_queued(d40c);
927 else if (!list_empty(&d40c->active))
928 d40d_prev = d40_first_active_get(d40c);
929
930 if (!d40d_prev)
931 return;
932
933 /* Here we try to join this job with previous jobs */
934 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
935 d40c->phy_chan->num * D40_DREG_PCDELTA +
936 D40_CHAN_REG_SSLNK);
937
938 /* Figure out which link we're currently transmitting */
939 for (i = 0; i < d40d_prev->lli_len; i++)
940 if (val == d40d_prev->lli_phy.src[i].reg_lnk)
941 break;
942
943 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
944 d40c->phy_chan->num * D40_DREG_PCDELTA +
945 D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS;
946
947 if (i == (d40d_prev->lli_len - 1) && val > 0) {
948 /* Change the current one */
949 writel(virt_to_phys(d40d->lli_phy.src),
950 d40c->base->virtbase + D40_DREG_PCBASE +
951 d40c->phy_chan->num * D40_DREG_PCDELTA +
952 D40_CHAN_REG_SSLNK);
953 writel(virt_to_phys(d40d->lli_phy.dst),
954 d40c->base->virtbase + D40_DREG_PCBASE +
955 d40c->phy_chan->num * D40_DREG_PCDELTA +
956 D40_CHAN_REG_SDLNK);
957
958 d40d->is_hw_linked = true;
959
960 } else if (i < d40d_prev->lli_len) {
961 (void) dma_unmap_single(d40c->base->dev,
962 virt_to_phys(d40d_prev->lli_phy.src),
963 d40d_prev->lli_pool.size,
964 DMA_TO_DEVICE);
965 1026
966 /* Keep the settings */ 1027 ret = d40_pause(chan);
967 val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk & 1028 if (!ret && chan_is_physical(chan))
968 ~D40_SREG_LNK_PHYS_LNK_MASK; 1029 ret = d40_channel_execute_command(chan, D40_DMA_STOP);
969 d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk =
970 val | virt_to_phys(d40d->lli_phy.src);
971 1030
972 val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk & 1031 spin_lock_irqsave(&chan->lock, flags);
973 ~D40_SREG_LNK_PHYS_LNK_MASK; 1032 d40_term_all(chan);
974 d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk = 1033 spin_unlock_irqrestore(&chan->lock, flags);
975 val | virt_to_phys(d40d->lli_phy.dst);
976 1034
977 (void) dma_map_single(d40c->base->dev, 1035 return ret;
978 d40d_prev->lli_phy.src,
979 d40d_prev->lli_pool.size,
980 DMA_TO_DEVICE);
981 d40d->is_hw_linked = true;
982 }
983} 1036}
984 1037
985static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) 1038static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -990,8 +1043,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
990 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 1043 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
991 unsigned long flags; 1044 unsigned long flags;
992 1045
993 (void) d40_pause(&d40c->chan);
994
995 spin_lock_irqsave(&d40c->lock, flags); 1046 spin_lock_irqsave(&d40c->lock, flags);
996 1047
997 d40c->chan.cookie++; 1048 d40c->chan.cookie++;
@@ -1001,17 +1052,10 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1001 1052
1002 d40d->txd.cookie = d40c->chan.cookie; 1053 d40d->txd.cookie = d40c->chan.cookie;
1003 1054
1004 if (d40c->log_num == D40_PHY_CHAN)
1005 d40_tx_submit_phy(d40c, d40d);
1006 else
1007 d40_tx_submit_log(d40c, d40d);
1008
1009 d40_desc_queue(d40c, d40d); 1055 d40_desc_queue(d40c, d40d);
1010 1056
1011 spin_unlock_irqrestore(&d40c->lock, flags); 1057 spin_unlock_irqrestore(&d40c->lock, flags);
1012 1058
1013 (void) d40_resume(&d40c->chan);
1014
1015 return tx->cookie; 1059 return tx->cookie;
1016} 1060}
1017 1061
@@ -1020,7 +1064,7 @@ static int d40_start(struct d40_chan *d40c)
1020 if (d40c->base->rev == 0) { 1064 if (d40c->base->rev == 0) {
1021 int err; 1065 int err;
1022 1066
1023 if (d40c->log_num != D40_PHY_CHAN) { 1067 if (chan_is_logical(d40c)) {
1024 err = d40_channel_execute_command(d40c, 1068 err = d40_channel_execute_command(d40c,
1025 D40_DMA_SUSPEND_REQ); 1069 D40_DMA_SUSPEND_REQ);
1026 if (err) 1070 if (err)
@@ -1028,7 +1072,7 @@ static int d40_start(struct d40_chan *d40c)
1028 } 1072 }
1029 } 1073 }
1030 1074
1031 if (d40c->log_num != D40_PHY_CHAN) 1075 if (chan_is_logical(d40c))
1032 d40_config_set_event(d40c, true); 1076 d40_config_set_event(d40c, true);
1033 1077
1034 return d40_channel_execute_command(d40c, D40_DMA_RUN); 1078 return d40_channel_execute_command(d40c, D40_DMA_RUN);
@@ -1051,21 +1095,14 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1051 /* Add to active queue */ 1095 /* Add to active queue */
1052 d40_desc_submit(d40c, d40d); 1096 d40_desc_submit(d40c, d40d);
1053 1097
1054 /* 1098 /* Initiate DMA job */
1055 * If this job is already linked in hw, 1099 d40_desc_load(d40c, d40d);
1056 * do not submit it.
1057 */
1058
1059 if (!d40d->is_hw_linked) {
1060 /* Initiate DMA job */
1061 d40_desc_load(d40c, d40d);
1062 1100
1063 /* Start dma job */ 1101 /* Start dma job */
1064 err = d40_start(d40c); 1102 err = d40_start(d40c);
1065 1103
1066 if (err) 1104 if (err)
1067 return NULL; 1105 return NULL;
1068 }
1069 } 1106 }
1070 1107
1071 return d40d; 1108 return d40d;
@@ -1082,17 +1119,36 @@ static void dma_tc_handle(struct d40_chan *d40c)
1082 if (d40d == NULL) 1119 if (d40d == NULL)
1083 return; 1120 return;
1084 1121
1085 d40_lcla_free_all(d40c, d40d); 1122 if (d40d->cyclic) {
1123 /*
1124 * If this was a paritially loaded list, we need to reloaded
1125 * it, and only when the list is completed. We need to check
1126 * for done because the interrupt will hit for every link, and
1127 * not just the last one.
1128 */
1129 if (d40d->lli_current < d40d->lli_len
1130 && !d40_tx_is_linked(d40c)
1131 && !d40_residue(d40c)) {
1132 d40_lcla_free_all(d40c, d40d);
1133 d40_desc_load(d40c, d40d);
1134 (void) d40_start(d40c);
1086 1135
1087 if (d40d->lli_current < d40d->lli_len) { 1136 if (d40d->lli_current == d40d->lli_len)
1088 d40_desc_load(d40c, d40d); 1137 d40d->lli_current = 0;
1089 /* Start dma job */ 1138 }
1090 (void) d40_start(d40c); 1139 } else {
1091 return; 1140 d40_lcla_free_all(d40c, d40d);
1092 }
1093 1141
1094 if (d40_queue_start(d40c) == NULL) 1142 if (d40d->lli_current < d40d->lli_len) {
1095 d40c->busy = false; 1143 d40_desc_load(d40c, d40d);
1144 /* Start dma job */
1145 (void) d40_start(d40c);
1146 return;
1147 }
1148
1149 if (d40_queue_start(d40c) == NULL)
1150 d40c->busy = false;
1151 }
1096 1152
1097 d40c->pending_tx++; 1153 d40c->pending_tx++;
1098 tasklet_schedule(&d40c->tasklet); 1154 tasklet_schedule(&d40c->tasklet);
@@ -1111,11 +1167,11 @@ static void dma_tasklet(unsigned long data)
1111 1167
1112 /* Get first active entry from list */ 1168 /* Get first active entry from list */
1113 d40d = d40_first_active_get(d40c); 1169 d40d = d40_first_active_get(d40c);
1114
1115 if (d40d == NULL) 1170 if (d40d == NULL)
1116 goto err; 1171 goto err;
1117 1172
1118 d40c->completed = d40d->txd.cookie; 1173 if (!d40d->cyclic)
1174 d40c->completed = d40d->txd.cookie;
1119 1175
1120 /* 1176 /*
1121 * If terminating a channel pending_tx is set to zero. 1177 * If terminating a channel pending_tx is set to zero.
@@ -1130,16 +1186,18 @@ static void dma_tasklet(unsigned long data)
1130 callback = d40d->txd.callback; 1186 callback = d40d->txd.callback;
1131 callback_param = d40d->txd.callback_param; 1187 callback_param = d40d->txd.callback_param;
1132 1188
1133 if (async_tx_test_ack(&d40d->txd)) { 1189 if (!d40d->cyclic) {
1134 d40_pool_lli_free(d40d); 1190 if (async_tx_test_ack(&d40d->txd)) {
1135 d40_desc_remove(d40d); 1191 d40_pool_lli_free(d40c, d40d);
1136 d40_desc_free(d40c, d40d);
1137 } else {
1138 if (!d40d->is_in_client_list) {
1139 d40_desc_remove(d40d); 1192 d40_desc_remove(d40d);
1140 d40_lcla_free_all(d40c, d40d); 1193 d40_desc_free(d40c, d40d);
1141 list_add_tail(&d40d->node, &d40c->client); 1194 } else {
1142 d40d->is_in_client_list = true; 1195 if (!d40d->is_in_client_list) {
1196 d40_desc_remove(d40d);
1197 d40_lcla_free_all(d40c, d40d);
1198 list_add_tail(&d40d->node, &d40c->client);
1199 d40d->is_in_client_list = true;
1200 }
1143 } 1201 }
1144 } 1202 }
1145 1203
@@ -1216,9 +1274,8 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
1216 if (!il[row].is_error) 1274 if (!il[row].is_error)
1217 dma_tc_handle(d40c); 1275 dma_tc_handle(d40c);
1218 else 1276 else
1219 dev_err(base->dev, 1277 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1220 "[%s] IRQ chan: %ld offset %d idx %d\n", 1278 chan, il[row].offset, idx);
1221 __func__, chan, il[row].offset, idx);
1222 1279
1223 spin_unlock(&d40c->lock); 1280 spin_unlock(&d40c->lock);
1224 } 1281 }
@@ -1237,8 +1294,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
1237 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; 1294 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
1238 1295
1239 if (!conf->dir) { 1296 if (!conf->dir) {
1240 dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n", 1297 chan_err(d40c, "Invalid direction.\n");
1241 __func__);
1242 res = -EINVAL; 1298 res = -EINVAL;
1243 } 1299 }
1244 1300
@@ -1246,46 +1302,40 @@ static int d40_validate_conf(struct d40_chan *d40c,
1246 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && 1302 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 &&
1247 d40c->runtime_addr == 0) { 1303 d40c->runtime_addr == 0) {
1248 1304
1249 dev_err(&d40c->chan.dev->device, 1305 chan_err(d40c, "Invalid TX channel address (%d)\n",
1250 "[%s] Invalid TX channel address (%d)\n", 1306 conf->dst_dev_type);
1251 __func__, conf->dst_dev_type);
1252 res = -EINVAL; 1307 res = -EINVAL;
1253 } 1308 }
1254 1309
1255 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && 1310 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY &&
1256 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && 1311 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 &&
1257 d40c->runtime_addr == 0) { 1312 d40c->runtime_addr == 0) {
1258 dev_err(&d40c->chan.dev->device, 1313 chan_err(d40c, "Invalid RX channel address (%d)\n",
1259 "[%s] Invalid RX channel address (%d)\n", 1314 conf->src_dev_type);
1260 __func__, conf->src_dev_type);
1261 res = -EINVAL; 1315 res = -EINVAL;
1262 } 1316 }
1263 1317
1264 if (conf->dir == STEDMA40_MEM_TO_PERIPH && 1318 if (conf->dir == STEDMA40_MEM_TO_PERIPH &&
1265 dst_event_group == STEDMA40_DEV_DST_MEMORY) { 1319 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
1266 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", 1320 chan_err(d40c, "Invalid dst\n");
1267 __func__);
1268 res = -EINVAL; 1321 res = -EINVAL;
1269 } 1322 }
1270 1323
1271 if (conf->dir == STEDMA40_PERIPH_TO_MEM && 1324 if (conf->dir == STEDMA40_PERIPH_TO_MEM &&
1272 src_event_group == STEDMA40_DEV_SRC_MEMORY) { 1325 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
1273 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", 1326 chan_err(d40c, "Invalid src\n");
1274 __func__);
1275 res = -EINVAL; 1327 res = -EINVAL;
1276 } 1328 }
1277 1329
1278 if (src_event_group == STEDMA40_DEV_SRC_MEMORY && 1330 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
1279 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { 1331 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
1280 dev_err(&d40c->chan.dev->device, 1332 chan_err(d40c, "No event line\n");
1281 "[%s] No event line\n", __func__);
1282 res = -EINVAL; 1333 res = -EINVAL;
1283 } 1334 }
1284 1335
1285 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && 1336 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
1286 (src_event_group != dst_event_group)) { 1337 (src_event_group != dst_event_group)) {
1287 dev_err(&d40c->chan.dev->device, 1338 chan_err(d40c, "Invalid event group\n");
1288 "[%s] Invalid event group\n", __func__);
1289 res = -EINVAL; 1339 res = -EINVAL;
1290 } 1340 }
1291 1341
@@ -1294,9 +1344,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
1294 * DMAC HW supports it. Will be added to this driver, 1344 * DMAC HW supports it. Will be added to this driver,
1295 * in case any dma client requires it. 1345 * in case any dma client requires it.
1296 */ 1346 */
1297 dev_err(&d40c->chan.dev->device, 1347 chan_err(d40c, "periph to periph not supported\n");
1298 "[%s] periph to periph not supported\n",
1299 __func__);
1300 res = -EINVAL; 1348 res = -EINVAL;
1301 } 1349 }
1302 1350
@@ -1309,9 +1357,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
1309 * src (burst x width) == dst (burst x width) 1357 * src (burst x width) == dst (burst x width)
1310 */ 1358 */
1311 1359
1312 dev_err(&d40c->chan.dev->device, 1360 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
1313 "[%s] src (burst x width) != dst (burst x width)\n",
1314 __func__);
1315 res = -EINVAL; 1361 res = -EINVAL;
1316 } 1362 }
1317 1363
@@ -1514,8 +1560,7 @@ static int d40_config_memcpy(struct d40_chan *d40c)
1514 dma_has_cap(DMA_SLAVE, cap)) { 1560 dma_has_cap(DMA_SLAVE, cap)) {
1515 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; 1561 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1516 } else { 1562 } else {
1517 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", 1563 chan_err(d40c, "No memcpy\n");
1518 __func__);
1519 return -EINVAL; 1564 return -EINVAL;
1520 } 1565 }
1521 1566
@@ -1540,21 +1585,19 @@ static int d40_free_dma(struct d40_chan *d40c)
1540 /* Release client owned descriptors */ 1585 /* Release client owned descriptors */
1541 if (!list_empty(&d40c->client)) 1586 if (!list_empty(&d40c->client))
1542 list_for_each_entry_safe(d, _d, &d40c->client, node) { 1587 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1543 d40_pool_lli_free(d); 1588 d40_pool_lli_free(d40c, d);
1544 d40_desc_remove(d); 1589 d40_desc_remove(d);
1545 d40_desc_free(d40c, d); 1590 d40_desc_free(d40c, d);
1546 } 1591 }
1547 1592
1548 if (phy == NULL) { 1593 if (phy == NULL) {
1549 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", 1594 chan_err(d40c, "phy == null\n");
1550 __func__);
1551 return -EINVAL; 1595 return -EINVAL;
1552 } 1596 }
1553 1597
1554 if (phy->allocated_src == D40_ALLOC_FREE && 1598 if (phy->allocated_src == D40_ALLOC_FREE &&
1555 phy->allocated_dst == D40_ALLOC_FREE) { 1599 phy->allocated_dst == D40_ALLOC_FREE) {
1556 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", 1600 chan_err(d40c, "channel already free\n");
1557 __func__);
1558 return -EINVAL; 1601 return -EINVAL;
1559 } 1602 }
1560 1603
@@ -1566,19 +1609,17 @@ static int d40_free_dma(struct d40_chan *d40c)
1566 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1609 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1567 is_src = true; 1610 is_src = true;
1568 } else { 1611 } else {
1569 dev_err(&d40c->chan.dev->device, 1612 chan_err(d40c, "Unknown direction\n");
1570 "[%s] Unknown direction\n", __func__);
1571 return -EINVAL; 1613 return -EINVAL;
1572 } 1614 }
1573 1615
1574 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1616 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1575 if (res) { 1617 if (res) {
1576 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", 1618 chan_err(d40c, "suspend failed\n");
1577 __func__);
1578 return res; 1619 return res;
1579 } 1620 }
1580 1621
1581 if (d40c->log_num != D40_PHY_CHAN) { 1622 if (chan_is_logical(d40c)) {
1582 /* Release logical channel, deactivate the event line */ 1623 /* Release logical channel, deactivate the event line */
1583 1624
1584 d40_config_set_event(d40c, false); 1625 d40_config_set_event(d40c, false);
@@ -1594,9 +1635,8 @@ static int d40_free_dma(struct d40_chan *d40c)
1594 res = d40_channel_execute_command(d40c, 1635 res = d40_channel_execute_command(d40c,
1595 D40_DMA_RUN); 1636 D40_DMA_RUN);
1596 if (res) { 1637 if (res) {
1597 dev_err(&d40c->chan.dev->device, 1638 chan_err(d40c,
1598 "[%s] Executing RUN command\n", 1639 "Executing RUN command\n");
1599 __func__);
1600 return res; 1640 return res;
1601 } 1641 }
1602 } 1642 }
@@ -1609,8 +1649,7 @@ static int d40_free_dma(struct d40_chan *d40c)
1609 /* Release physical channel */ 1649 /* Release physical channel */
1610 res = d40_channel_execute_command(d40c, D40_DMA_STOP); 1650 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1611 if (res) { 1651 if (res) {
1612 dev_err(&d40c->chan.dev->device, 1652 chan_err(d40c, "Failed to stop channel\n");
1613 "[%s] Failed to stop channel\n", __func__);
1614 return res; 1653 return res;
1615 } 1654 }
1616 d40c->phy_chan = NULL; 1655 d40c->phy_chan = NULL;
@@ -1622,6 +1661,7 @@ static int d40_free_dma(struct d40_chan *d40c)
1622 1661
1623static bool d40_is_paused(struct d40_chan *d40c) 1662static bool d40_is_paused(struct d40_chan *d40c)
1624{ 1663{
1664 void __iomem *chanbase = chan_base(d40c);
1625 bool is_paused = false; 1665 bool is_paused = false;
1626 unsigned long flags; 1666 unsigned long flags;
1627 void __iomem *active_reg; 1667 void __iomem *active_reg;
@@ -1630,7 +1670,7 @@ static bool d40_is_paused(struct d40_chan *d40c)
1630 1670
1631 spin_lock_irqsave(&d40c->lock, flags); 1671 spin_lock_irqsave(&d40c->lock, flags);
1632 1672
1633 if (d40c->log_num == D40_PHY_CHAN) { 1673 if (chan_is_physical(d40c)) {
1634 if (d40c->phy_chan->num % 2 == 0) 1674 if (d40c->phy_chan->num % 2 == 0)
1635 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 1675 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1636 else 1676 else
@@ -1648,17 +1688,12 @@ static bool d40_is_paused(struct d40_chan *d40c)
1648 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1688 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1649 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1689 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1650 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1690 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1651 status = readl(d40c->base->virtbase + D40_DREG_PCBASE + 1691 status = readl(chanbase + D40_CHAN_REG_SDLNK);
1652 d40c->phy_chan->num * D40_DREG_PCDELTA +
1653 D40_CHAN_REG_SDLNK);
1654 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 1692 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1655 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1693 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1656 status = readl(d40c->base->virtbase + D40_DREG_PCBASE + 1694 status = readl(chanbase + D40_CHAN_REG_SSLNK);
1657 d40c->phy_chan->num * D40_DREG_PCDELTA +
1658 D40_CHAN_REG_SSLNK);
1659 } else { 1695 } else {
1660 dev_err(&d40c->chan.dev->device, 1696 chan_err(d40c, "Unknown direction\n");
1661 "[%s] Unknown direction\n", __func__);
1662 goto _exit; 1697 goto _exit;
1663 } 1698 }
1664 1699
@@ -1688,114 +1723,184 @@ static u32 stedma40_residue(struct dma_chan *chan)
1688 return bytes_left; 1723 return bytes_left;
1689} 1724}
1690 1725
1691struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, 1726static int
1692 struct scatterlist *sgl_dst, 1727d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
1693 struct scatterlist *sgl_src, 1728 struct scatterlist *sg_src, struct scatterlist *sg_dst,
1694 unsigned int sgl_len, 1729 unsigned int sg_len, dma_addr_t src_dev_addr,
1695 unsigned long dma_flags) 1730 dma_addr_t dst_dev_addr)
1696{ 1731{
1697 int res; 1732 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1698 struct d40_desc *d40d; 1733 struct stedma40_half_channel_info *src_info = &cfg->src_info;
1699 struct d40_chan *d40c = container_of(chan, struct d40_chan, 1734 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
1700 chan); 1735 int ret;
1701 unsigned long flags;
1702 1736
1703 if (d40c->phy_chan == NULL) { 1737 ret = d40_log_sg_to_lli(sg_src, sg_len,
1704 dev_err(&d40c->chan.dev->device, 1738 src_dev_addr,
1705 "[%s] Unallocated channel.\n", __func__); 1739 desc->lli_log.src,
1706 return ERR_PTR(-EINVAL); 1740 chan->log_def.lcsp1,
1707 } 1741 src_info->data_width,
1742 dst_info->data_width);
1708 1743
1709 spin_lock_irqsave(&d40c->lock, flags); 1744 ret = d40_log_sg_to_lli(sg_dst, sg_len,
1710 d40d = d40_desc_get(d40c); 1745 dst_dev_addr,
1746 desc->lli_log.dst,
1747 chan->log_def.lcsp3,
1748 dst_info->data_width,
1749 src_info->data_width);
1711 1750
1712 if (d40d == NULL) 1751 return ret < 0 ? ret : 0;
1752}
1753
1754static int
1755d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
1756 struct scatterlist *sg_src, struct scatterlist *sg_dst,
1757 unsigned int sg_len, dma_addr_t src_dev_addr,
1758 dma_addr_t dst_dev_addr)
1759{
1760 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1761 struct stedma40_half_channel_info *src_info = &cfg->src_info;
1762 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
1763 unsigned long flags = 0;
1764 int ret;
1765
1766 if (desc->cyclic)
1767 flags |= LLI_CYCLIC | LLI_TERM_INT;
1768
1769 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
1770 desc->lli_phy.src,
1771 virt_to_phys(desc->lli_phy.src),
1772 chan->src_def_cfg,
1773 src_info, dst_info, flags);
1774
1775 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
1776 desc->lli_phy.dst,
1777 virt_to_phys(desc->lli_phy.dst),
1778 chan->dst_def_cfg,
1779 dst_info, src_info, flags);
1780
1781 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
1782 desc->lli_pool.size, DMA_TO_DEVICE);
1783
1784 return ret < 0 ? ret : 0;
1785}
1786
1787
1788static struct d40_desc *
1789d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
1790 unsigned int sg_len, unsigned long dma_flags)
1791{
1792 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1793 struct d40_desc *desc;
1794 int ret;
1795
1796 desc = d40_desc_get(chan);
1797 if (!desc)
1798 return NULL;
1799
1800 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
1801 cfg->dst_info.data_width);
1802 if (desc->lli_len < 0) {
1803 chan_err(chan, "Unaligned size\n");
1713 goto err; 1804 goto err;
1805 }
1714 1806
1715 d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len, 1807 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
1716 d40c->dma_cfg.src_info.data_width, 1808 if (ret < 0) {
1717 d40c->dma_cfg.dst_info.data_width); 1809 chan_err(chan, "Could not allocate lli\n");
1718 if (d40d->lli_len < 0) {
1719 dev_err(&d40c->chan.dev->device,
1720 "[%s] Unaligned size\n", __func__);
1721 goto err; 1810 goto err;
1722 } 1811 }
1723 1812
1724 d40d->lli_current = 0;
1725 d40d->txd.flags = dma_flags;
1726 1813
1727 if (d40c->log_num != D40_PHY_CHAN) { 1814 desc->lli_current = 0;
1815 desc->txd.flags = dma_flags;
1816 desc->txd.tx_submit = d40_tx_submit;
1728 1817
1729 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { 1818 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
1730 dev_err(&d40c->chan.dev->device,
1731 "[%s] Out of memory\n", __func__);
1732 goto err;
1733 }
1734 1819
1735 (void) d40_log_sg_to_lli(sgl_src, 1820 return desc;
1736 sgl_len, 1821
1737 d40d->lli_log.src, 1822err:
1738 d40c->log_def.lcsp1, 1823 d40_desc_free(chan, desc);
1739 d40c->dma_cfg.src_info.data_width, 1824 return NULL;
1740 d40c->dma_cfg.dst_info.data_width); 1825}
1741 1826
1742 (void) d40_log_sg_to_lli(sgl_dst, 1827static dma_addr_t
1743 sgl_len, 1828d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
1744 d40d->lli_log.dst, 1829{
1745 d40c->log_def.lcsp3, 1830 struct stedma40_platform_data *plat = chan->base->plat_data;
1746 d40c->dma_cfg.dst_info.data_width, 1831 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
1747 d40c->dma_cfg.src_info.data_width); 1832 dma_addr_t addr;
1748 } else {
1749 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1750 dev_err(&d40c->chan.dev->device,
1751 "[%s] Out of memory\n", __func__);
1752 goto err;
1753 }
1754 1833
1755 res = d40_phy_sg_to_lli(sgl_src, 1834 if (chan->runtime_addr)
1756 sgl_len, 1835 return chan->runtime_addr;
1757 0,
1758 d40d->lli_phy.src,
1759 virt_to_phys(d40d->lli_phy.src),
1760 d40c->src_def_cfg,
1761 d40c->dma_cfg.src_info.data_width,
1762 d40c->dma_cfg.dst_info.data_width,
1763 d40c->dma_cfg.src_info.psize);
1764 1836
1765 if (res < 0) 1837 if (direction == DMA_FROM_DEVICE)
1766 goto err; 1838 addr = plat->dev_rx[cfg->src_dev_type];
1839 else if (direction == DMA_TO_DEVICE)
1840 addr = plat->dev_tx[cfg->dst_dev_type];
1767 1841
1768 res = d40_phy_sg_to_lli(sgl_dst, 1842 return addr;
1769 sgl_len, 1843}
1770 0,
1771 d40d->lli_phy.dst,
1772 virt_to_phys(d40d->lli_phy.dst),
1773 d40c->dst_def_cfg,
1774 d40c->dma_cfg.dst_info.data_width,
1775 d40c->dma_cfg.src_info.data_width,
1776 d40c->dma_cfg.dst_info.psize);
1777 1844
1778 if (res < 0) 1845static struct dma_async_tx_descriptor *
1779 goto err; 1846d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
1847 struct scatterlist *sg_dst, unsigned int sg_len,
1848 enum dma_data_direction direction, unsigned long dma_flags)
1849{
1850 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
1851 dma_addr_t src_dev_addr = 0;
1852 dma_addr_t dst_dev_addr = 0;
1853 struct d40_desc *desc;
1854 unsigned long flags;
1855 int ret;
1780 1856
1781 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 1857 if (!chan->phy_chan) {
1782 d40d->lli_pool.size, DMA_TO_DEVICE); 1858 chan_err(chan, "Cannot prepare unallocated channel\n");
1859 return NULL;
1783 } 1860 }
1784 1861
1785 dma_async_tx_descriptor_init(&d40d->txd, chan);
1786 1862
1787 d40d->txd.tx_submit = d40_tx_submit; 1863 spin_lock_irqsave(&chan->lock, flags);
1788 1864
1789 spin_unlock_irqrestore(&d40c->lock, flags); 1865 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
1866 if (desc == NULL)
1867 goto err;
1868
1869 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
1870 desc->cyclic = true;
1871
1872 if (direction != DMA_NONE) {
1873 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
1874
1875 if (direction == DMA_FROM_DEVICE)
1876 src_dev_addr = dev_addr;
1877 else if (direction == DMA_TO_DEVICE)
1878 dst_dev_addr = dev_addr;
1879 }
1880
1881 if (chan_is_logical(chan))
1882 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
1883 sg_len, src_dev_addr, dst_dev_addr);
1884 else
1885 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
1886 sg_len, src_dev_addr, dst_dev_addr);
1887
1888 if (ret) {
1889 chan_err(chan, "Failed to prepare %s sg job: %d\n",
1890 chan_is_logical(chan) ? "log" : "phy", ret);
1891 goto err;
1892 }
1893
1894 spin_unlock_irqrestore(&chan->lock, flags);
1895
1896 return &desc->txd;
1790 1897
1791 return &d40d->txd;
1792err: 1898err:
1793 if (d40d) 1899 if (desc)
1794 d40_desc_free(d40c, d40d); 1900 d40_desc_free(chan, desc);
1795 spin_unlock_irqrestore(&d40c->lock, flags); 1901 spin_unlock_irqrestore(&chan->lock, flags);
1796 return NULL; 1902 return NULL;
1797} 1903}
1798EXPORT_SYMBOL(stedma40_memcpy_sg);
1799 1904
1800bool stedma40_filter(struct dma_chan *chan, void *data) 1905bool stedma40_filter(struct dma_chan *chan, void *data)
1801{ 1906{
@@ -1818,6 +1923,38 @@ bool stedma40_filter(struct dma_chan *chan, void *data)
1818} 1923}
1819EXPORT_SYMBOL(stedma40_filter); 1924EXPORT_SYMBOL(stedma40_filter);
1820 1925
1926static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
1927{
1928 bool realtime = d40c->dma_cfg.realtime;
1929 bool highprio = d40c->dma_cfg.high_priority;
1930 u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1;
1931 u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
1932 u32 event = D40_TYPE_TO_EVENT(dev_type);
1933 u32 group = D40_TYPE_TO_GROUP(dev_type);
1934 u32 bit = 1 << event;
1935
1936 /* Destination event lines are stored in the upper halfword */
1937 if (!src)
1938 bit <<= 16;
1939
1940 writel(bit, d40c->base->virtbase + prioreg + group * 4);
1941 writel(bit, d40c->base->virtbase + rtreg + group * 4);
1942}
1943
1944static void d40_set_prio_realtime(struct d40_chan *d40c)
1945{
1946 if (d40c->base->rev < 3)
1947 return;
1948
1949 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
1950 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1951 __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true);
1952
1953 if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
1954 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1955 __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false);
1956}
1957
1821/* DMA ENGINE functions */ 1958/* DMA ENGINE functions */
1822static int d40_alloc_chan_resources(struct dma_chan *chan) 1959static int d40_alloc_chan_resources(struct dma_chan *chan)
1823{ 1960{
@@ -1834,9 +1971,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
1834 if (!d40c->configured) { 1971 if (!d40c->configured) {
1835 err = d40_config_memcpy(d40c); 1972 err = d40_config_memcpy(d40c);
1836 if (err) { 1973 if (err) {
1837 dev_err(&d40c->chan.dev->device, 1974 chan_err(d40c, "Failed to configure memcpy channel\n");
1838 "[%s] Failed to configure memcpy channel\n",
1839 __func__);
1840 goto fail; 1975 goto fail;
1841 } 1976 }
1842 } 1977 }
@@ -1844,16 +1979,17 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
1844 1979
1845 err = d40_allocate_channel(d40c); 1980 err = d40_allocate_channel(d40c);
1846 if (err) { 1981 if (err) {
1847 dev_err(&d40c->chan.dev->device, 1982 chan_err(d40c, "Failed to allocate channel\n");
1848 "[%s] Failed to allocate channel\n", __func__);
1849 goto fail; 1983 goto fail;
1850 } 1984 }
1851 1985
1852 /* Fill in basic CFG register values */ 1986 /* Fill in basic CFG register values */
1853 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, 1987 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1854 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); 1988 &d40c->dst_def_cfg, chan_is_logical(d40c));
1855 1989
1856 if (d40c->log_num != D40_PHY_CHAN) { 1990 d40_set_prio_realtime(d40c);
1991
1992 if (chan_is_logical(d40c)) {
1857 d40_log_cfg(&d40c->dma_cfg, 1993 d40_log_cfg(&d40c->dma_cfg,
1858 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 1994 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1859 1995
@@ -1886,8 +2022,7 @@ static void d40_free_chan_resources(struct dma_chan *chan)
1886 unsigned long flags; 2022 unsigned long flags;
1887 2023
1888 if (d40c->phy_chan == NULL) { 2024 if (d40c->phy_chan == NULL) {
1889 dev_err(&d40c->chan.dev->device, 2025 chan_err(d40c, "Cannot free unallocated channel\n");
1890 "[%s] Cannot free unallocated channel\n", __func__);
1891 return; 2026 return;
1892 } 2027 }
1893 2028
@@ -1897,8 +2032,7 @@ static void d40_free_chan_resources(struct dma_chan *chan)
1897 err = d40_free_dma(d40c); 2032 err = d40_free_dma(d40c);
1898 2033
1899 if (err) 2034 if (err)
1900 dev_err(&d40c->chan.dev->device, 2035 chan_err(d40c, "Failed to free channel\n");
1901 "[%s] Failed to free channel\n", __func__);
1902 spin_unlock_irqrestore(&d40c->lock, flags); 2036 spin_unlock_irqrestore(&d40c->lock, flags);
1903} 2037}
1904 2038
@@ -1908,251 +2042,31 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1908 size_t size, 2042 size_t size,
1909 unsigned long dma_flags) 2043 unsigned long dma_flags)
1910{ 2044{
1911 struct d40_desc *d40d; 2045 struct scatterlist dst_sg;
1912 struct d40_chan *d40c = container_of(chan, struct d40_chan, 2046 struct scatterlist src_sg;
1913 chan);
1914 unsigned long flags;
1915
1916 if (d40c->phy_chan == NULL) {
1917 dev_err(&d40c->chan.dev->device,
1918 "[%s] Channel is not allocated.\n", __func__);
1919 return ERR_PTR(-EINVAL);
1920 }
1921
1922 spin_lock_irqsave(&d40c->lock, flags);
1923 d40d = d40_desc_get(d40c);
1924
1925 if (d40d == NULL) {
1926 dev_err(&d40c->chan.dev->device,
1927 "[%s] Descriptor is NULL\n", __func__);
1928 goto err;
1929 }
1930 2047
1931 d40d->txd.flags = dma_flags; 2048 sg_init_table(&dst_sg, 1);
1932 d40d->lli_len = d40_size_2_dmalen(size, 2049 sg_init_table(&src_sg, 1);
1933 d40c->dma_cfg.src_info.data_width,
1934 d40c->dma_cfg.dst_info.data_width);
1935 if (d40d->lli_len < 0) {
1936 dev_err(&d40c->chan.dev->device,
1937 "[%s] Unaligned size\n", __func__);
1938 goto err;
1939 }
1940 2050
2051 sg_dma_address(&dst_sg) = dst;
2052 sg_dma_address(&src_sg) = src;
1941 2053
1942 dma_async_tx_descriptor_init(&d40d->txd, chan); 2054 sg_dma_len(&dst_sg) = size;
2055 sg_dma_len(&src_sg) = size;
1943 2056
1944 d40d->txd.tx_submit = d40_tx_submit; 2057 return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
1945
1946 if (d40c->log_num != D40_PHY_CHAN) {
1947
1948 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
1949 dev_err(&d40c->chan.dev->device,
1950 "[%s] Out of memory\n", __func__);
1951 goto err;
1952 }
1953 d40d->lli_current = 0;
1954
1955 if (d40_log_buf_to_lli(d40d->lli_log.src,
1956 src,
1957 size,
1958 d40c->log_def.lcsp1,
1959 d40c->dma_cfg.src_info.data_width,
1960 d40c->dma_cfg.dst_info.data_width,
1961 true) == NULL)
1962 goto err;
1963
1964 if (d40_log_buf_to_lli(d40d->lli_log.dst,
1965 dst,
1966 size,
1967 d40c->log_def.lcsp3,
1968 d40c->dma_cfg.dst_info.data_width,
1969 d40c->dma_cfg.src_info.data_width,
1970 true) == NULL)
1971 goto err;
1972
1973 } else {
1974
1975 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
1976 dev_err(&d40c->chan.dev->device,
1977 "[%s] Out of memory\n", __func__);
1978 goto err;
1979 }
1980
1981 if (d40_phy_buf_to_lli(d40d->lli_phy.src,
1982 src,
1983 size,
1984 d40c->dma_cfg.src_info.psize,
1985 0,
1986 d40c->src_def_cfg,
1987 true,
1988 d40c->dma_cfg.src_info.data_width,
1989 d40c->dma_cfg.dst_info.data_width,
1990 false) == NULL)
1991 goto err;
1992
1993 if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
1994 dst,
1995 size,
1996 d40c->dma_cfg.dst_info.psize,
1997 0,
1998 d40c->dst_def_cfg,
1999 true,
2000 d40c->dma_cfg.dst_info.data_width,
2001 d40c->dma_cfg.src_info.data_width,
2002 false) == NULL)
2003 goto err;
2004
2005 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
2006 d40d->lli_pool.size, DMA_TO_DEVICE);
2007 }
2008
2009 spin_unlock_irqrestore(&d40c->lock, flags);
2010 return &d40d->txd;
2011
2012err:
2013 if (d40d)
2014 d40_desc_free(d40c, d40d);
2015 spin_unlock_irqrestore(&d40c->lock, flags);
2016 return NULL;
2017} 2058}
2018 2059
2019static struct dma_async_tx_descriptor * 2060static struct dma_async_tx_descriptor *
2020d40_prep_sg(struct dma_chan *chan, 2061d40_prep_memcpy_sg(struct dma_chan *chan,
2021 struct scatterlist *dst_sg, unsigned int dst_nents, 2062 struct scatterlist *dst_sg, unsigned int dst_nents,
2022 struct scatterlist *src_sg, unsigned int src_nents, 2063 struct scatterlist *src_sg, unsigned int src_nents,
2023 unsigned long dma_flags) 2064 unsigned long dma_flags)
2024{ 2065{
2025 if (dst_nents != src_nents) 2066 if (dst_nents != src_nents)
2026 return NULL; 2067 return NULL;
2027 2068
2028 return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags); 2069 return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
2029}
2030
2031static int d40_prep_slave_sg_log(struct d40_desc *d40d,
2032 struct d40_chan *d40c,
2033 struct scatterlist *sgl,
2034 unsigned int sg_len,
2035 enum dma_data_direction direction,
2036 unsigned long dma_flags)
2037{
2038 dma_addr_t dev_addr = 0;
2039 int total_size;
2040
2041 d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
2042 d40c->dma_cfg.src_info.data_width,
2043 d40c->dma_cfg.dst_info.data_width);
2044 if (d40d->lli_len < 0) {
2045 dev_err(&d40c->chan.dev->device,
2046 "[%s] Unaligned size\n", __func__);
2047 return -EINVAL;
2048 }
2049
2050 if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
2051 dev_err(&d40c->chan.dev->device,
2052 "[%s] Out of memory\n", __func__);
2053 return -ENOMEM;
2054 }
2055
2056 d40d->lli_current = 0;
2057
2058 if (direction == DMA_FROM_DEVICE)
2059 if (d40c->runtime_addr)
2060 dev_addr = d40c->runtime_addr;
2061 else
2062 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
2063 else if (direction == DMA_TO_DEVICE)
2064 if (d40c->runtime_addr)
2065 dev_addr = d40c->runtime_addr;
2066 else
2067 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
2068
2069 else
2070 return -EINVAL;
2071
2072 total_size = d40_log_sg_to_dev(sgl, sg_len,
2073 &d40d->lli_log,
2074 &d40c->log_def,
2075 d40c->dma_cfg.src_info.data_width,
2076 d40c->dma_cfg.dst_info.data_width,
2077 direction,
2078 dev_addr);
2079
2080 if (total_size < 0)
2081 return -EINVAL;
2082
2083 return 0;
2084}
2085
2086static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
2087 struct d40_chan *d40c,
2088 struct scatterlist *sgl,
2089 unsigned int sgl_len,
2090 enum dma_data_direction direction,
2091 unsigned long dma_flags)
2092{
2093 dma_addr_t src_dev_addr;
2094 dma_addr_t dst_dev_addr;
2095 int res;
2096
2097 d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
2098 d40c->dma_cfg.src_info.data_width,
2099 d40c->dma_cfg.dst_info.data_width);
2100 if (d40d->lli_len < 0) {
2101 dev_err(&d40c->chan.dev->device,
2102 "[%s] Unaligned size\n", __func__);
2103 return -EINVAL;
2104 }
2105
2106 if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
2107 dev_err(&d40c->chan.dev->device,
2108 "[%s] Out of memory\n", __func__);
2109 return -ENOMEM;
2110 }
2111
2112 d40d->lli_current = 0;
2113
2114 if (direction == DMA_FROM_DEVICE) {
2115 dst_dev_addr = 0;
2116 if (d40c->runtime_addr)
2117 src_dev_addr = d40c->runtime_addr;
2118 else
2119 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
2120 } else if (direction == DMA_TO_DEVICE) {
2121 if (d40c->runtime_addr)
2122 dst_dev_addr = d40c->runtime_addr;
2123 else
2124 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
2125 src_dev_addr = 0;
2126 } else
2127 return -EINVAL;
2128
2129 res = d40_phy_sg_to_lli(sgl,
2130 sgl_len,
2131 src_dev_addr,
2132 d40d->lli_phy.src,
2133 virt_to_phys(d40d->lli_phy.src),
2134 d40c->src_def_cfg,
2135 d40c->dma_cfg.src_info.data_width,
2136 d40c->dma_cfg.dst_info.data_width,
2137 d40c->dma_cfg.src_info.psize);
2138 if (res < 0)
2139 return res;
2140
2141 res = d40_phy_sg_to_lli(sgl,
2142 sgl_len,
2143 dst_dev_addr,
2144 d40d->lli_phy.dst,
2145 virt_to_phys(d40d->lli_phy.dst),
2146 d40c->dst_def_cfg,
2147 d40c->dma_cfg.dst_info.data_width,
2148 d40c->dma_cfg.src_info.data_width,
2149 d40c->dma_cfg.dst_info.psize);
2150 if (res < 0)
2151 return res;
2152
2153 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
2154 d40d->lli_pool.size, DMA_TO_DEVICE);
2155 return 0;
2156} 2070}
2157 2071
2158static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, 2072static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
@@ -2161,52 +2075,40 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2161 enum dma_data_direction direction, 2075 enum dma_data_direction direction,
2162 unsigned long dma_flags) 2076 unsigned long dma_flags)
2163{ 2077{
2164 struct d40_desc *d40d; 2078 if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
2165 struct d40_chan *d40c = container_of(chan, struct d40_chan, 2079 return NULL;
2166 chan);
2167 unsigned long flags;
2168 int err;
2169
2170 if (d40c->phy_chan == NULL) {
2171 dev_err(&d40c->chan.dev->device,
2172 "[%s] Cannot prepare unallocated channel\n", __func__);
2173 return ERR_PTR(-EINVAL);
2174 }
2175 2080
2176 spin_lock_irqsave(&d40c->lock, flags); 2081 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
2177 d40d = d40_desc_get(d40c); 2082}
2178 2083
2179 if (d40d == NULL) 2084static struct dma_async_tx_descriptor *
2180 goto err; 2085dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2086 size_t buf_len, size_t period_len,
2087 enum dma_data_direction direction)
2088{
2089 unsigned int periods = buf_len / period_len;
2090 struct dma_async_tx_descriptor *txd;
2091 struct scatterlist *sg;
2092 int i;
2181 2093
2182 if (d40c->log_num != D40_PHY_CHAN) 2094 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL);
2183 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, 2095 for (i = 0; i < periods; i++) {
2184 direction, dma_flags); 2096 sg_dma_address(&sg[i]) = dma_addr;
2185 else 2097 sg_dma_len(&sg[i]) = period_len;
2186 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, 2098 dma_addr += period_len;
2187 direction, dma_flags);
2188 if (err) {
2189 dev_err(&d40c->chan.dev->device,
2190 "[%s] Failed to prepare %s slave sg job: %d\n",
2191 __func__,
2192 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2193 goto err;
2194 } 2099 }
2195 2100
2196 d40d->txd.flags = dma_flags; 2101 sg[periods].offset = 0;
2102 sg[periods].length = 0;
2103 sg[periods].page_link =
2104 ((unsigned long)sg | 0x01) & ~0x02;
2197 2105
2198 dma_async_tx_descriptor_init(&d40d->txd, chan); 2106 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2107 DMA_PREP_INTERRUPT);
2199 2108
2200 d40d->txd.tx_submit = d40_tx_submit; 2109 kfree(sg);
2201 2110
2202 spin_unlock_irqrestore(&d40c->lock, flags); 2111 return txd;
2203 return &d40d->txd;
2204
2205err:
2206 if (d40d)
2207 d40_desc_free(d40c, d40d);
2208 spin_unlock_irqrestore(&d40c->lock, flags);
2209 return NULL;
2210} 2112}
2211 2113
2212static enum dma_status d40_tx_status(struct dma_chan *chan, 2114static enum dma_status d40_tx_status(struct dma_chan *chan,
@@ -2219,9 +2121,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
2219 int ret; 2121 int ret;
2220 2122
2221 if (d40c->phy_chan == NULL) { 2123 if (d40c->phy_chan == NULL) {
2222 dev_err(&d40c->chan.dev->device, 2124 chan_err(d40c, "Cannot read status of unallocated channel\n");
2223 "[%s] Cannot read status of unallocated channel\n",
2224 __func__);
2225 return -EINVAL; 2125 return -EINVAL;
2226 } 2126 }
2227 2127
@@ -2245,8 +2145,7 @@ static void d40_issue_pending(struct dma_chan *chan)
2245 unsigned long flags; 2145 unsigned long flags;
2246 2146
2247 if (d40c->phy_chan == NULL) { 2147 if (d40c->phy_chan == NULL) {
2248 dev_err(&d40c->chan.dev->device, 2148 chan_err(d40c, "Channel is not allocated!\n");
2249 "[%s] Channel is not allocated!\n", __func__);
2250 return; 2149 return;
2251 } 2150 }
2252 2151
@@ -2339,7 +2238,7 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2339 return; 2238 return;
2340 } 2239 }
2341 2240
2342 if (d40c->log_num != D40_PHY_CHAN) { 2241 if (chan_is_logical(d40c)) {
2343 if (config_maxburst >= 16) 2242 if (config_maxburst >= 16)
2344 psize = STEDMA40_PSIZE_LOG_16; 2243 psize = STEDMA40_PSIZE_LOG_16;
2345 else if (config_maxburst >= 8) 2244 else if (config_maxburst >= 8)
@@ -2372,7 +2271,7 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2372 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2271 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2373 2272
2374 /* Fill in register values */ 2273 /* Fill in register values */
2375 if (d40c->log_num != D40_PHY_CHAN) 2274 if (chan_is_logical(d40c))
2376 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 2275 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2377 else 2276 else
2378 d40_phy_cfg(cfg, &d40c->src_def_cfg, 2277 d40_phy_cfg(cfg, &d40c->src_def_cfg,
@@ -2393,25 +2292,20 @@ static void d40_set_runtime_config(struct dma_chan *chan,
2393static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 2292static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2394 unsigned long arg) 2293 unsigned long arg)
2395{ 2294{
2396 unsigned long flags;
2397 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2295 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2398 2296
2399 if (d40c->phy_chan == NULL) { 2297 if (d40c->phy_chan == NULL) {
2400 dev_err(&d40c->chan.dev->device, 2298 chan_err(d40c, "Channel is not allocated!\n");
2401 "[%s] Channel is not allocated!\n", __func__);
2402 return -EINVAL; 2299 return -EINVAL;
2403 } 2300 }
2404 2301
2405 switch (cmd) { 2302 switch (cmd) {
2406 case DMA_TERMINATE_ALL: 2303 case DMA_TERMINATE_ALL:
2407 spin_lock_irqsave(&d40c->lock, flags); 2304 return d40_terminate_all(d40c);
2408 d40_term_all(d40c);
2409 spin_unlock_irqrestore(&d40c->lock, flags);
2410 return 0;
2411 case DMA_PAUSE: 2305 case DMA_PAUSE:
2412 return d40_pause(chan); 2306 return d40_pause(d40c);
2413 case DMA_RESUME: 2307 case DMA_RESUME:
2414 return d40_resume(chan); 2308 return d40_resume(d40c);
2415 case DMA_SLAVE_CONFIG: 2309 case DMA_SLAVE_CONFIG:
2416 d40_set_runtime_config(chan, 2310 d40_set_runtime_config(chan,
2417 (struct dma_slave_config *) arg); 2311 (struct dma_slave_config *) arg);
@@ -2456,6 +2350,35 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2456 } 2350 }
2457} 2351}
2458 2352
2353static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2354{
2355 if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2356 dev->device_prep_slave_sg = d40_prep_slave_sg;
2357
2358 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2359 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2360
2361 /*
2362 * This controller can only access address at even
2363 * 32bit boundaries, i.e. 2^2
2364 */
2365 dev->copy_align = 2;
2366 }
2367
2368 if (dma_has_cap(DMA_SG, dev->cap_mask))
2369 dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2370
2371 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2372 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2373
2374 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2375 dev->device_free_chan_resources = d40_free_chan_resources;
2376 dev->device_issue_pending = d40_issue_pending;
2377 dev->device_tx_status = d40_tx_status;
2378 dev->device_control = d40_control;
2379 dev->dev = base->dev;
2380}
2381
2459static int __init d40_dmaengine_init(struct d40_base *base, 2382static int __init d40_dmaengine_init(struct d40_base *base,
2460 int num_reserved_chans) 2383 int num_reserved_chans)
2461{ 2384{
@@ -2466,23 +2389,14 @@ static int __init d40_dmaengine_init(struct d40_base *base,
2466 2389
2467 dma_cap_zero(base->dma_slave.cap_mask); 2390 dma_cap_zero(base->dma_slave.cap_mask);
2468 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); 2391 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2392 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2469 2393
2470 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; 2394 d40_ops_init(base, &base->dma_slave);
2471 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2472 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2473 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2474 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2475 base->dma_slave.device_tx_status = d40_tx_status;
2476 base->dma_slave.device_issue_pending = d40_issue_pending;
2477 base->dma_slave.device_control = d40_control;
2478 base->dma_slave.dev = base->dev;
2479 2395
2480 err = dma_async_device_register(&base->dma_slave); 2396 err = dma_async_device_register(&base->dma_slave);
2481 2397
2482 if (err) { 2398 if (err) {
2483 dev_err(base->dev, 2399 d40_err(base->dev, "Failed to register slave channels\n");
2484 "[%s] Failed to register slave channels\n",
2485 __func__);
2486 goto failure1; 2400 goto failure1;
2487 } 2401 }
2488 2402
@@ -2491,29 +2405,15 @@ static int __init d40_dmaengine_init(struct d40_base *base,
2491 2405
2492 dma_cap_zero(base->dma_memcpy.cap_mask); 2406 dma_cap_zero(base->dma_memcpy.cap_mask);
2493 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); 2407 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2494 dma_cap_set(DMA_SG, base->dma_slave.cap_mask); 2408 dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
2495 2409
2496 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; 2410 d40_ops_init(base, &base->dma_memcpy);
2497 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2498 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2499 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2500 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2501 base->dma_memcpy.device_tx_status = d40_tx_status;
2502 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2503 base->dma_memcpy.device_control = d40_control;
2504 base->dma_memcpy.dev = base->dev;
2505 /*
2506 * This controller can only access address at even
2507 * 32bit boundaries, i.e. 2^2
2508 */
2509 base->dma_memcpy.copy_align = 2;
2510 2411
2511 err = dma_async_device_register(&base->dma_memcpy); 2412 err = dma_async_device_register(&base->dma_memcpy);
2512 2413
2513 if (err) { 2414 if (err) {
2514 dev_err(base->dev, 2415 d40_err(base->dev,
2515 "[%s] Failed to regsiter memcpy only channels\n", 2416 "Failed to regsiter memcpy only channels\n");
2516 __func__);
2517 goto failure2; 2417 goto failure2;
2518 } 2418 }
2519 2419
@@ -2523,24 +2423,15 @@ static int __init d40_dmaengine_init(struct d40_base *base,
2523 dma_cap_zero(base->dma_both.cap_mask); 2423 dma_cap_zero(base->dma_both.cap_mask);
2524 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); 2424 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2525 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); 2425 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2526 dma_cap_set(DMA_SG, base->dma_slave.cap_mask); 2426 dma_cap_set(DMA_SG, base->dma_both.cap_mask);
2527 2427 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
2528 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; 2428
2529 base->dma_both.device_free_chan_resources = d40_free_chan_resources; 2429 d40_ops_init(base, &base->dma_both);
2530 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2531 base->dma_slave.device_prep_dma_sg = d40_prep_sg;
2532 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2533 base->dma_both.device_tx_status = d40_tx_status;
2534 base->dma_both.device_issue_pending = d40_issue_pending;
2535 base->dma_both.device_control = d40_control;
2536 base->dma_both.dev = base->dev;
2537 base->dma_both.copy_align = 2;
2538 err = dma_async_device_register(&base->dma_both); 2430 err = dma_async_device_register(&base->dma_both);
2539 2431
2540 if (err) { 2432 if (err) {
2541 dev_err(base->dev, 2433 d40_err(base->dev,
2542 "[%s] Failed to register logical and physical capable channels\n", 2434 "Failed to register logical and physical capable channels\n");
2543 __func__);
2544 goto failure3; 2435 goto failure3;
2545 } 2436 }
2546 return 0; 2437 return 0;
@@ -2616,9 +2507,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2616 { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, 2507 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2617 /* 2508 /*
2618 * D40_DREG_PERIPHID2 Depends on HW revision: 2509 * D40_DREG_PERIPHID2 Depends on HW revision:
2619 * MOP500/HREF ED has 0x0008, 2510 * DB8500ed has 0x0008,
2620 * ? has 0x0018, 2511 * ? has 0x0018,
2621 * HREF V1 has 0x0028 2512 * DB8500v1 has 0x0028
2513 * DB8500v2 has 0x0038
2622 */ 2514 */
2623 { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, 2515 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2624 2516
@@ -2642,8 +2534,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2642 clk = clk_get(&pdev->dev, NULL); 2534 clk = clk_get(&pdev->dev, NULL);
2643 2535
2644 if (IS_ERR(clk)) { 2536 if (IS_ERR(clk)) {
2645 dev_err(&pdev->dev, "[%s] No matching clock found\n", 2537 d40_err(&pdev->dev, "No matching clock found\n");
2646 __func__);
2647 goto failure; 2538 goto failure;
2648 } 2539 }
2649 2540
@@ -2666,9 +2557,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2666 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { 2557 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2667 if (dma_id_regs[i].val != 2558 if (dma_id_regs[i].val !=
2668 readl(virtbase + dma_id_regs[i].reg)) { 2559 readl(virtbase + dma_id_regs[i].reg)) {
2669 dev_err(&pdev->dev, 2560 d40_err(&pdev->dev,
2670 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", 2561 "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2671 __func__,
2672 dma_id_regs[i].val, 2562 dma_id_regs[i].val,
2673 dma_id_regs[i].reg, 2563 dma_id_regs[i].reg,
2674 readl(virtbase + dma_id_regs[i].reg)); 2564 readl(virtbase + dma_id_regs[i].reg));
@@ -2681,9 +2571,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2681 2571
2682 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != 2572 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
2683 D40_HW_DESIGNER) { 2573 D40_HW_DESIGNER) {
2684 dev_err(&pdev->dev, 2574 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
2685 "[%s] Unknown designer! Got %x wanted %x\n", 2575 val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2686 __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK,
2687 D40_HW_DESIGNER); 2576 D40_HW_DESIGNER);
2688 goto failure; 2577 goto failure;
2689 } 2578 }
@@ -2713,7 +2602,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2713 sizeof(struct d40_chan), GFP_KERNEL); 2602 sizeof(struct d40_chan), GFP_KERNEL);
2714 2603
2715 if (base == NULL) { 2604 if (base == NULL) {
2716 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); 2605 d40_err(&pdev->dev, "Out of memory\n");
2717 goto failure; 2606 goto failure;
2718 } 2607 }
2719 2608
@@ -2860,6 +2749,7 @@ static void __init d40_hw_init(struct d40_base *base)
2860 2749
2861static int __init d40_lcla_allocate(struct d40_base *base) 2750static int __init d40_lcla_allocate(struct d40_base *base)
2862{ 2751{
2752 struct d40_lcla_pool *pool = &base->lcla_pool;
2863 unsigned long *page_list; 2753 unsigned long *page_list;
2864 int i, j; 2754 int i, j;
2865 int ret = 0; 2755 int ret = 0;
@@ -2885,9 +2775,8 @@ static int __init d40_lcla_allocate(struct d40_base *base)
2885 base->lcla_pool.pages); 2775 base->lcla_pool.pages);
2886 if (!page_list[i]) { 2776 if (!page_list[i]) {
2887 2777
2888 dev_err(base->dev, 2778 d40_err(base->dev, "Failed to allocate %d pages.\n",
2889 "[%s] Failed to allocate %d pages.\n", 2779 base->lcla_pool.pages);
2890 __func__, base->lcla_pool.pages);
2891 2780
2892 for (j = 0; j < i; j++) 2781 for (j = 0; j < i; j++)
2893 free_pages(page_list[j], base->lcla_pool.pages); 2782 free_pages(page_list[j], base->lcla_pool.pages);
@@ -2925,6 +2814,15 @@ static int __init d40_lcla_allocate(struct d40_base *base)
2925 LCLA_ALIGNMENT); 2814 LCLA_ALIGNMENT);
2926 } 2815 }
2927 2816
2817 pool->dma_addr = dma_map_single(base->dev, pool->base,
2818 SZ_1K * base->num_phy_chans,
2819 DMA_TO_DEVICE);
2820 if (dma_mapping_error(base->dev, pool->dma_addr)) {
2821 pool->dma_addr = 0;
2822 ret = -ENOMEM;
2823 goto failure;
2824 }
2825
2928 writel(virt_to_phys(base->lcla_pool.base), 2826 writel(virt_to_phys(base->lcla_pool.base),
2929 base->virtbase + D40_DREG_LCLA); 2827 base->virtbase + D40_DREG_LCLA);
2930failure: 2828failure:
@@ -2957,9 +2855,7 @@ static int __init d40_probe(struct platform_device *pdev)
2957 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); 2855 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2958 if (!res) { 2856 if (!res) {
2959 ret = -ENOENT; 2857 ret = -ENOENT;
2960 dev_err(&pdev->dev, 2858 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
2961 "[%s] No \"lcpa\" memory resource\n",
2962 __func__);
2963 goto failure; 2859 goto failure;
2964 } 2860 }
2965 base->lcpa_size = resource_size(res); 2861 base->lcpa_size = resource_size(res);
@@ -2968,9 +2864,9 @@ static int __init d40_probe(struct platform_device *pdev)
2968 if (request_mem_region(res->start, resource_size(res), 2864 if (request_mem_region(res->start, resource_size(res),
2969 D40_NAME " I/O lcpa") == NULL) { 2865 D40_NAME " I/O lcpa") == NULL) {
2970 ret = -EBUSY; 2866 ret = -EBUSY;
2971 dev_err(&pdev->dev, 2867 d40_err(&pdev->dev,
2972 "[%s] Failed to request LCPA region 0x%x-0x%x\n", 2868 "Failed to request LCPA region 0x%x-0x%x\n",
2973 __func__, res->start, res->end); 2869 res->start, res->end);
2974 goto failure; 2870 goto failure;
2975 } 2871 }
2976 2872
@@ -2986,16 +2882,13 @@ static int __init d40_probe(struct platform_device *pdev)
2986 base->lcpa_base = ioremap(res->start, resource_size(res)); 2882 base->lcpa_base = ioremap(res->start, resource_size(res));
2987 if (!base->lcpa_base) { 2883 if (!base->lcpa_base) {
2988 ret = -ENOMEM; 2884 ret = -ENOMEM;
2989 dev_err(&pdev->dev, 2885 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
2990 "[%s] Failed to ioremap LCPA region\n",
2991 __func__);
2992 goto failure; 2886 goto failure;
2993 } 2887 }
2994 2888
2995 ret = d40_lcla_allocate(base); 2889 ret = d40_lcla_allocate(base);
2996 if (ret) { 2890 if (ret) {
2997 dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n", 2891 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
2998 __func__);
2999 goto failure; 2892 goto failure;
3000 } 2893 }
3001 2894
@@ -3004,9 +2897,8 @@ static int __init d40_probe(struct platform_device *pdev)
3004 base->irq = platform_get_irq(pdev, 0); 2897 base->irq = platform_get_irq(pdev, 0);
3005 2898
3006 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); 2899 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
3007
3008 if (ret) { 2900 if (ret) {
3009 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); 2901 d40_err(&pdev->dev, "No IRQ defined\n");
3010 goto failure; 2902 goto failure;
3011 } 2903 }
3012 2904
@@ -3025,6 +2917,12 @@ failure:
3025 kmem_cache_destroy(base->desc_slab); 2917 kmem_cache_destroy(base->desc_slab);
3026 if (base->virtbase) 2918 if (base->virtbase)
3027 iounmap(base->virtbase); 2919 iounmap(base->virtbase);
2920
2921 if (base->lcla_pool.dma_addr)
2922 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
2923 SZ_1K * base->num_phy_chans,
2924 DMA_TO_DEVICE);
2925
3028 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) 2926 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3029 free_pages((unsigned long)base->lcla_pool.base, 2927 free_pages((unsigned long)base->lcla_pool.base,
3030 base->lcla_pool.pages); 2928 base->lcla_pool.pages);
@@ -3049,7 +2947,7 @@ failure:
3049 kfree(base); 2947 kfree(base);
3050 } 2948 }
3051 2949
3052 dev_err(&pdev->dev, "[%s] probe failed\n", __func__); 2950 d40_err(&pdev->dev, "probe failed\n");
3053 return ret; 2951 return ret;
3054} 2952}
3055 2953
@@ -3060,7 +2958,7 @@ static struct platform_driver d40_driver = {
3060 }, 2958 },
3061}; 2959};
3062 2960
3063int __init stedma40_init(void) 2961static int __init stedma40_init(void)
3064{ 2962{
3065 return platform_driver_probe(&d40_driver, d40_probe); 2963 return platform_driver_probe(&d40_driver, d40_probe);
3066} 2964}
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 0b096a38322d..cad9e1daedff 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -125,13 +125,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
125static int d40_phy_fill_lli(struct d40_phy_lli *lli, 125static int d40_phy_fill_lli(struct d40_phy_lli *lli,
126 dma_addr_t data, 126 dma_addr_t data,
127 u32 data_size, 127 u32 data_size,
128 int psize,
129 dma_addr_t next_lli, 128 dma_addr_t next_lli,
130 u32 reg_cfg, 129 u32 reg_cfg,
131 bool term_int, 130 struct stedma40_half_channel_info *info,
132 u32 data_width, 131 unsigned int flags)
133 bool is_device)
134{ 132{
133 bool addr_inc = flags & LLI_ADDR_INC;
134 bool term_int = flags & LLI_TERM_INT;
135 unsigned int data_width = info->data_width;
136 int psize = info->psize;
135 int num_elems; 137 int num_elems;
136 138
137 if (psize == STEDMA40_PSIZE_PHY_1) 139 if (psize == STEDMA40_PSIZE_PHY_1)
@@ -154,7 +156,7 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli,
154 * Distance to next element sized entry. 156 * Distance to next element sized entry.
155 * Usually the size of the element unless you want gaps. 157 * Usually the size of the element unless you want gaps.
156 */ 158 */
157 if (!is_device) 159 if (addr_inc)
158 lli->reg_elt |= (0x1 << data_width) << 160 lli->reg_elt |= (0x1 << data_width) <<
159 D40_SREG_ELEM_PHY_EIDX_POS; 161 D40_SREG_ELEM_PHY_EIDX_POS;
160 162
@@ -198,47 +200,51 @@ static int d40_seg_size(int size, int data_width1, int data_width2)
198 return seg_max; 200 return seg_max;
199} 201}
200 202
201struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, 203static struct d40_phy_lli *
202 dma_addr_t addr, 204d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size,
203 u32 size, 205 dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg,
204 int psize, 206 struct stedma40_half_channel_info *info,
205 dma_addr_t lli_phys, 207 struct stedma40_half_channel_info *otherinfo,
206 u32 reg_cfg, 208 unsigned long flags)
207 bool term_int,
208 u32 data_width1,
209 u32 data_width2,
210 bool is_device)
211{ 209{
210 bool lastlink = flags & LLI_LAST_LINK;
211 bool addr_inc = flags & LLI_ADDR_INC;
212 bool term_int = flags & LLI_TERM_INT;
213 bool cyclic = flags & LLI_CYCLIC;
212 int err; 214 int err;
213 dma_addr_t next = lli_phys; 215 dma_addr_t next = lli_phys;
214 int size_rest = size; 216 int size_rest = size;
215 int size_seg = 0; 217 int size_seg = 0;
216 218
219 /*
220 * This piece may be split up based on d40_seg_size(); we only want the
221 * term int on the last part.
222 */
223 if (term_int)
224 flags &= ~LLI_TERM_INT;
225
217 do { 226 do {
218 size_seg = d40_seg_size(size_rest, data_width1, data_width2); 227 size_seg = d40_seg_size(size_rest, info->data_width,
228 otherinfo->data_width);
219 size_rest -= size_seg; 229 size_rest -= size_seg;
220 230
221 if (term_int && size_rest == 0) 231 if (size_rest == 0 && term_int)
222 next = 0; 232 flags |= LLI_TERM_INT;
233
234 if (size_rest == 0 && lastlink)
235 next = cyclic ? first_phys : 0;
223 else 236 else
224 next = ALIGN(next + sizeof(struct d40_phy_lli), 237 next = ALIGN(next + sizeof(struct d40_phy_lli),
225 D40_LLI_ALIGN); 238 D40_LLI_ALIGN);
226 239
227 err = d40_phy_fill_lli(lli, 240 err = d40_phy_fill_lli(lli, addr, size_seg, next,
228 addr, 241 reg_cfg, info, flags);
229 size_seg,
230 psize,
231 next,
232 reg_cfg,
233 !next,
234 data_width1,
235 is_device);
236 242
237 if (err) 243 if (err)
238 goto err; 244 goto err;
239 245
240 lli++; 246 lli++;
241 if (!is_device) 247 if (addr_inc)
242 addr += size_seg; 248 addr += size_seg;
243 } while (size_rest); 249 } while (size_rest);
244 250
@@ -254,39 +260,35 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
254 struct d40_phy_lli *lli_sg, 260 struct d40_phy_lli *lli_sg,
255 dma_addr_t lli_phys, 261 dma_addr_t lli_phys,
256 u32 reg_cfg, 262 u32 reg_cfg,
257 u32 data_width1, 263 struct stedma40_half_channel_info *info,
258 u32 data_width2, 264 struct stedma40_half_channel_info *otherinfo,
259 int psize) 265 unsigned long flags)
260{ 266{
261 int total_size = 0; 267 int total_size = 0;
262 int i; 268 int i;
263 struct scatterlist *current_sg = sg; 269 struct scatterlist *current_sg = sg;
264 dma_addr_t dst;
265 struct d40_phy_lli *lli = lli_sg; 270 struct d40_phy_lli *lli = lli_sg;
266 dma_addr_t l_phys = lli_phys; 271 dma_addr_t l_phys = lli_phys;
267 272
273 if (!target)
274 flags |= LLI_ADDR_INC;
275
268 for_each_sg(sg, current_sg, sg_len, i) { 276 for_each_sg(sg, current_sg, sg_len, i) {
277 dma_addr_t sg_addr = sg_dma_address(current_sg);
278 unsigned int len = sg_dma_len(current_sg);
279 dma_addr_t dst = target ?: sg_addr;
269 280
270 total_size += sg_dma_len(current_sg); 281 total_size += sg_dma_len(current_sg);
271 282
272 if (target) 283 if (i == sg_len - 1)
273 dst = target; 284 flags |= LLI_TERM_INT | LLI_LAST_LINK;
274 else
275 dst = sg_phys(current_sg);
276 285
277 l_phys = ALIGN(lli_phys + (lli - lli_sg) * 286 l_phys = ALIGN(lli_phys + (lli - lli_sg) *
278 sizeof(struct d40_phy_lli), D40_LLI_ALIGN); 287 sizeof(struct d40_phy_lli), D40_LLI_ALIGN);
279 288
280 lli = d40_phy_buf_to_lli(lli, 289 lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys,
281 dst, 290 reg_cfg, info, otherinfo, flags);
282 sg_dma_len(current_sg), 291
283 psize,
284 l_phys,
285 reg_cfg,
286 sg_len - 1 == i,
287 data_width1,
288 data_width2,
289 target == dst);
290 if (lli == NULL) 292 if (lli == NULL)
291 return -EINVAL; 293 return -EINVAL;
292 } 294 }
@@ -295,45 +297,22 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
295} 297}
296 298
297 299
298void d40_phy_lli_write(void __iomem *virtbase,
299 u32 phy_chan_num,
300 struct d40_phy_lli *lli_dst,
301 struct d40_phy_lli *lli_src)
302{
303
304 writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE +
305 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG);
306 writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE +
307 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
308 writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE +
309 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR);
310 writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE +
311 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK);
312
313 writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE +
314 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG);
315 writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE +
316 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
317 writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE +
318 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR);
319 writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE +
320 phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK);
321
322}
323
324/* DMA logical lli operations */ 300/* DMA logical lli operations */
325 301
326static void d40_log_lli_link(struct d40_log_lli *lli_dst, 302static void d40_log_lli_link(struct d40_log_lli *lli_dst,
327 struct d40_log_lli *lli_src, 303 struct d40_log_lli *lli_src,
328 int next) 304 int next, unsigned int flags)
329{ 305{
306 bool interrupt = flags & LLI_TERM_INT;
330 u32 slos = 0; 307 u32 slos = 0;
331 u32 dlos = 0; 308 u32 dlos = 0;
332 309
333 if (next != -EINVAL) { 310 if (next != -EINVAL) {
334 slos = next * 2; 311 slos = next * 2;
335 dlos = next * 2 + 1; 312 dlos = next * 2 + 1;
336 } else { 313 }
314
315 if (interrupt) {
337 lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; 316 lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK;
338 lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; 317 lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK;
339 } 318 }
@@ -348,9 +327,9 @@ static void d40_log_lli_link(struct d40_log_lli *lli_dst,
348void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, 327void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
349 struct d40_log_lli *lli_dst, 328 struct d40_log_lli *lli_dst,
350 struct d40_log_lli *lli_src, 329 struct d40_log_lli *lli_src,
351 int next) 330 int next, unsigned int flags)
352{ 331{
353 d40_log_lli_link(lli_dst, lli_src, next); 332 d40_log_lli_link(lli_dst, lli_src, next, flags);
354 333
355 writel(lli_src->lcsp02, &lcpa[0].lcsp0); 334 writel(lli_src->lcsp02, &lcpa[0].lcsp0);
356 writel(lli_src->lcsp13, &lcpa[0].lcsp1); 335 writel(lli_src->lcsp13, &lcpa[0].lcsp1);
@@ -361,9 +340,9 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
361void d40_log_lli_lcla_write(struct d40_log_lli *lcla, 340void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
362 struct d40_log_lli *lli_dst, 341 struct d40_log_lli *lli_dst,
363 struct d40_log_lli *lli_src, 342 struct d40_log_lli *lli_src,
364 int next) 343 int next, unsigned int flags)
365{ 344{
366 d40_log_lli_link(lli_dst, lli_src, next); 345 d40_log_lli_link(lli_dst, lli_src, next, flags);
367 346
368 writel(lli_src->lcsp02, &lcla[0].lcsp02); 347 writel(lli_src->lcsp02, &lcla[0].lcsp02);
369 writel(lli_src->lcsp13, &lcla[0].lcsp13); 348 writel(lli_src->lcsp13, &lcla[0].lcsp13);
@@ -375,8 +354,10 @@ static void d40_log_fill_lli(struct d40_log_lli *lli,
375 dma_addr_t data, u32 data_size, 354 dma_addr_t data, u32 data_size,
376 u32 reg_cfg, 355 u32 reg_cfg,
377 u32 data_width, 356 u32 data_width,
378 bool addr_inc) 357 unsigned int flags)
379{ 358{
359 bool addr_inc = flags & LLI_ADDR_INC;
360
380 lli->lcsp13 = reg_cfg; 361 lli->lcsp13 = reg_cfg;
381 362
382 /* The number of elements to transfer */ 363 /* The number of elements to transfer */
@@ -395,67 +376,15 @@ static void d40_log_fill_lli(struct d40_log_lli *lli,
395 376
396} 377}
397 378
398int d40_log_sg_to_dev(struct scatterlist *sg, 379static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
399 int sg_len,
400 struct d40_log_lli_bidir *lli,
401 struct d40_def_lcsp *lcsp,
402 u32 src_data_width,
403 u32 dst_data_width,
404 enum dma_data_direction direction,
405 dma_addr_t dev_addr)
406{
407 int total_size = 0;
408 struct scatterlist *current_sg = sg;
409 int i;
410 struct d40_log_lli *lli_src = lli->src;
411 struct d40_log_lli *lli_dst = lli->dst;
412
413 for_each_sg(sg, current_sg, sg_len, i) {
414 total_size += sg_dma_len(current_sg);
415
416 if (direction == DMA_TO_DEVICE) {
417 lli_src =
418 d40_log_buf_to_lli(lli_src,
419 sg_phys(current_sg),
420 sg_dma_len(current_sg),
421 lcsp->lcsp1, src_data_width,
422 dst_data_width,
423 true);
424 lli_dst =
425 d40_log_buf_to_lli(lli_dst,
426 dev_addr,
427 sg_dma_len(current_sg),
428 lcsp->lcsp3, dst_data_width,
429 src_data_width,
430 false);
431 } else {
432 lli_dst =
433 d40_log_buf_to_lli(lli_dst,
434 sg_phys(current_sg),
435 sg_dma_len(current_sg),
436 lcsp->lcsp3, dst_data_width,
437 src_data_width,
438 true);
439 lli_src =
440 d40_log_buf_to_lli(lli_src,
441 dev_addr,
442 sg_dma_len(current_sg),
443 lcsp->lcsp1, src_data_width,
444 dst_data_width,
445 false);
446 }
447 }
448 return total_size;
449}
450
451struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
452 dma_addr_t addr, 380 dma_addr_t addr,
453 int size, 381 int size,
454 u32 lcsp13, /* src or dst*/ 382 u32 lcsp13, /* src or dst*/
455 u32 data_width1, 383 u32 data_width1,
456 u32 data_width2, 384 u32 data_width2,
457 bool addr_inc) 385 unsigned int flags)
458{ 386{
387 bool addr_inc = flags & LLI_ADDR_INC;
459 struct d40_log_lli *lli = lli_sg; 388 struct d40_log_lli *lli = lli_sg;
460 int size_rest = size; 389 int size_rest = size;
461 int size_seg = 0; 390 int size_seg = 0;
@@ -468,7 +397,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
468 addr, 397 addr,
469 size_seg, 398 size_seg,
470 lcsp13, data_width1, 399 lcsp13, data_width1,
471 addr_inc); 400 flags);
472 if (addr_inc) 401 if (addr_inc)
473 addr += size_seg; 402 addr += size_seg;
474 lli++; 403 lli++;
@@ -479,6 +408,7 @@ struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
479 408
480int d40_log_sg_to_lli(struct scatterlist *sg, 409int d40_log_sg_to_lli(struct scatterlist *sg,
481 int sg_len, 410 int sg_len,
411 dma_addr_t dev_addr,
482 struct d40_log_lli *lli_sg, 412 struct d40_log_lli *lli_sg,
483 u32 lcsp13, /* src or dst*/ 413 u32 lcsp13, /* src or dst*/
484 u32 data_width1, u32 data_width2) 414 u32 data_width1, u32 data_width2)
@@ -487,14 +417,24 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
487 struct scatterlist *current_sg = sg; 417 struct scatterlist *current_sg = sg;
488 int i; 418 int i;
489 struct d40_log_lli *lli = lli_sg; 419 struct d40_log_lli *lli = lli_sg;
420 unsigned long flags = 0;
421
422 if (!dev_addr)
423 flags |= LLI_ADDR_INC;
490 424
491 for_each_sg(sg, current_sg, sg_len, i) { 425 for_each_sg(sg, current_sg, sg_len, i) {
426 dma_addr_t sg_addr = sg_dma_address(current_sg);
427 unsigned int len = sg_dma_len(current_sg);
428 dma_addr_t addr = dev_addr ?: sg_addr;
429
492 total_size += sg_dma_len(current_sg); 430 total_size += sg_dma_len(current_sg);
493 lli = d40_log_buf_to_lli(lli, 431
494 sg_phys(current_sg), 432 lli = d40_log_buf_to_lli(lli, addr, len,
495 sg_dma_len(current_sg),
496 lcsp13, 433 lcsp13,
497 data_width1, data_width2, true); 434 data_width1,
435 data_width2,
436 flags);
498 } 437 }
438
499 return total_size; 439 return total_size;
500} 440}
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 9cc43495bea2..195ee65ee7f3 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -163,6 +163,22 @@
163#define D40_DREG_LCEIS1 0x0B4 163#define D40_DREG_LCEIS1 0x0B4
164#define D40_DREG_LCEIS2 0x0B8 164#define D40_DREG_LCEIS2 0x0B8
165#define D40_DREG_LCEIS3 0x0BC 165#define D40_DREG_LCEIS3 0x0BC
166#define D40_DREG_PSEG1 0x110
167#define D40_DREG_PSEG2 0x114
168#define D40_DREG_PSEG3 0x118
169#define D40_DREG_PSEG4 0x11C
170#define D40_DREG_PCEG1 0x120
171#define D40_DREG_PCEG2 0x124
172#define D40_DREG_PCEG3 0x128
173#define D40_DREG_PCEG4 0x12C
174#define D40_DREG_RSEG1 0x130
175#define D40_DREG_RSEG2 0x134
176#define D40_DREG_RSEG3 0x138
177#define D40_DREG_RSEG4 0x13C
178#define D40_DREG_RCEG1 0x140
179#define D40_DREG_RCEG2 0x144
180#define D40_DREG_RCEG3 0x148
181#define D40_DREG_RCEG4 0x14C
166#define D40_DREG_STFU 0xFC8 182#define D40_DREG_STFU 0xFC8
167#define D40_DREG_ICFG 0xFCC 183#define D40_DREG_ICFG 0xFCC
168#define D40_DREG_PERIPHID0 0xFE0 184#define D40_DREG_PERIPHID0 0xFE0
@@ -277,6 +293,13 @@ struct d40_def_lcsp {
277 293
278/* Physical channels */ 294/* Physical channels */
279 295
296enum d40_lli_flags {
297 LLI_ADDR_INC = 1 << 0,
298 LLI_TERM_INT = 1 << 1,
299 LLI_CYCLIC = 1 << 2,
300 LLI_LAST_LINK = 1 << 3,
301};
302
280void d40_phy_cfg(struct stedma40_chan_cfg *cfg, 303void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
281 u32 *src_cfg, 304 u32 *src_cfg,
282 u32 *dst_cfg, 305 u32 *dst_cfg,
@@ -292,46 +315,15 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
292 struct d40_phy_lli *lli, 315 struct d40_phy_lli *lli,
293 dma_addr_t lli_phys, 316 dma_addr_t lli_phys,
294 u32 reg_cfg, 317 u32 reg_cfg,
295 u32 data_width1, 318 struct stedma40_half_channel_info *info,
296 u32 data_width2, 319 struct stedma40_half_channel_info *otherinfo,
297 int psize); 320 unsigned long flags);
298
299struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
300 dma_addr_t data,
301 u32 data_size,
302 int psize,
303 dma_addr_t next_lli,
304 u32 reg_cfg,
305 bool term_int,
306 u32 data_width1,
307 u32 data_width2,
308 bool is_device);
309
310void d40_phy_lli_write(void __iomem *virtbase,
311 u32 phy_chan_num,
312 struct d40_phy_lli *lli_dst,
313 struct d40_phy_lli *lli_src);
314 321
315/* Logical channels */ 322/* Logical channels */
316 323
317struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
318 dma_addr_t addr,
319 int size,
320 u32 lcsp13, /* src or dst*/
321 u32 data_width1, u32 data_width2,
322 bool addr_inc);
323
324int d40_log_sg_to_dev(struct scatterlist *sg,
325 int sg_len,
326 struct d40_log_lli_bidir *lli,
327 struct d40_def_lcsp *lcsp,
328 u32 src_data_width,
329 u32 dst_data_width,
330 enum dma_data_direction direction,
331 dma_addr_t dev_addr);
332
333int d40_log_sg_to_lli(struct scatterlist *sg, 324int d40_log_sg_to_lli(struct scatterlist *sg,
334 int sg_len, 325 int sg_len,
326 dma_addr_t dev_addr,
335 struct d40_log_lli *lli_sg, 327 struct d40_log_lli *lli_sg,
336 u32 lcsp13, /* src or dst*/ 328 u32 lcsp13, /* src or dst*/
337 u32 data_width1, u32 data_width2); 329 u32 data_width1, u32 data_width2);
@@ -339,11 +331,11 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
339void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, 331void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
340 struct d40_log_lli *lli_dst, 332 struct d40_log_lli *lli_dst,
341 struct d40_log_lli *lli_src, 333 struct d40_log_lli *lli_src,
342 int next); 334 int next, unsigned int flags);
343 335
344void d40_log_lli_lcla_write(struct d40_log_lli *lcla, 336void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
345 struct d40_log_lli *lli_dst, 337 struct d40_log_lli *lli_dst,
346 struct d40_log_lli *lli_src, 338 struct d40_log_lli *lli_src,
347 int next); 339 int next, unsigned int flags);
348 340
349#endif /* STE_DMA40_LLI_H */ 341#endif /* STE_DMA40_LLI_H */
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 3c56afc5eb1b..b3a25a55ba23 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -145,4 +145,16 @@ config ISCSI_IBFT
145 detect iSCSI boot parameters dynamically during system boot, say Y. 145 detect iSCSI boot parameters dynamically during system boot, say Y.
146 Otherwise, say N. 146 Otherwise, say N.
147 147
148config SIGMA
149 tristate "SigmaStudio firmware loader"
150 depends on I2C
151 select CRC32
152 default n
153 help
154 Enable helper functions for working with Analog Devices SigmaDSP
155 parts and binary firmwares produced by Analog Devices SigmaStudio.
156
157 If unsure, say N here. Drivers that need these helpers will select
158 this option automatically.
159
148endmenu 160endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 20c17fca1232..00bb0b80a79f 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_DMIID) += dmi-id.o
12obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o 12obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
13obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o 13obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
14obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o 14obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
15obj-$(CONFIG_SIGMA) += sigma.o
diff --git a/drivers/firmware/sigma.c b/drivers/firmware/sigma.c
new file mode 100644
index 000000000000..c19cd2c39fa6
--- /dev/null
+++ b/drivers/firmware/sigma.c
@@ -0,0 +1,115 @@
1/*
2 * Load Analog Devices SigmaStudio firmware files
3 *
4 * Copyright 2009-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/crc32.h>
10#include <linux/delay.h>
11#include <linux/firmware.h>
12#include <linux/kernel.h>
13#include <linux/i2c.h>
14#include <linux/sigma.h>
15
16/* Return: 0==OK, <0==error, =1 ==no more actions */
17static int
18process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
19{
20 struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
21 size_t len = sigma_action_len(sa);
22 int ret = 0;
23
24 pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
25 sa->instr, sa->addr, len);
26
27 switch (sa->instr) {
28 case SIGMA_ACTION_WRITEXBYTES:
29 case SIGMA_ACTION_WRITESINGLE:
30 case SIGMA_ACTION_WRITESAFELOAD:
31 if (ssfw->fw->size < ssfw->pos + len)
32 return -EINVAL;
33 ret = i2c_master_send(client, (void *)&sa->addr, len);
34 if (ret < 0)
35 return -EINVAL;
36 break;
37
38 case SIGMA_ACTION_DELAY:
39 ret = 0;
40 udelay(len);
41 len = 0;
42 break;
43
44 case SIGMA_ACTION_END:
45 return 1;
46
47 default:
48 return -EINVAL;
49 }
50
51 /* when arrive here ret=0 or sent data */
52 ssfw->pos += sigma_action_size(sa, len);
53 return ssfw->pos == ssfw->fw->size;
54}
55
56static int
57process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
58{
59 pr_debug("%s: processing %p\n", __func__, ssfw);
60
61 while (1) {
62 int ret = process_sigma_action(client, ssfw);
63 pr_debug("%s: action returned %i\n", __func__, ret);
64 if (ret == 1)
65 return 0;
66 else if (ret)
67 return ret;
68 }
69}
70
71int process_sigma_firmware(struct i2c_client *client, const char *name)
72{
73 int ret;
74 struct sigma_firmware_header *ssfw_head;
75 struct sigma_firmware ssfw;
76 const struct firmware *fw;
77 u32 crc;
78
79 pr_debug("%s: loading firmware %s\n", __func__, name);
80
81 /* first load the blob */
82 ret = request_firmware(&fw, name, &client->dev);
83 if (ret) {
84 pr_debug("%s: request_firmware() failed with %i\n", __func__, ret);
85 return ret;
86 }
87 ssfw.fw = fw;
88
89 /* then verify the header */
90 ret = -EINVAL;
91 if (fw->size < sizeof(*ssfw_head))
92 goto done;
93
94 ssfw_head = (void *)fw->data;
95 if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
96 goto done;
97
98 crc = crc32(0, fw->data, fw->size);
99 pr_debug("%s: crc=%x\n", __func__, crc);
100 if (crc != ssfw_head->crc)
101 goto done;
102
103 ssfw.pos = sizeof(*ssfw_head);
104
105 /* finally process all of the actions */
106 ret = process_sigma_actions(client, &ssfw);
107
108 done:
109 release_firmware(fw);
110
111 pr_debug("%s: loaded %s\n", __func__, name);
112
113 return ret;
114}
115EXPORT_SYMBOL(process_sigma_firmware);
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index d3a9c6e02477..00a55dfdba82 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -88,18 +88,20 @@ static const struct backlight_ops nv50_bl_ops = {
88 .update_status = nv50_set_intensity, 88 .update_status = nv50_set_intensity,
89}; 89};
90 90
91static int nouveau_nv40_backlight_init(struct drm_device *dev) 91static int nouveau_nv40_backlight_init(struct drm_connector *connector)
92{ 92{
93 struct backlight_properties props; 93 struct drm_device *dev = connector->dev;
94 struct drm_nouveau_private *dev_priv = dev->dev_private; 94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 struct backlight_properties props;
95 struct backlight_device *bd; 96 struct backlight_device *bd;
96 97
97 if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) 98 if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
98 return 0; 99 return 0;
99 100
100 memset(&props, 0, sizeof(struct backlight_properties)); 101 memset(&props, 0, sizeof(struct backlight_properties));
102 props.type = BACKLIGHT_RAW;
101 props.max_brightness = 31; 103 props.max_brightness = 31;
102 bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev, 104 bd = backlight_device_register("nv_backlight", &connector->kdev, dev,
103 &nv40_bl_ops, &props); 105 &nv40_bl_ops, &props);
104 if (IS_ERR(bd)) 106 if (IS_ERR(bd))
105 return PTR_ERR(bd); 107 return PTR_ERR(bd);
@@ -111,18 +113,20 @@ static int nouveau_nv40_backlight_init(struct drm_device *dev)
111 return 0; 113 return 0;
112} 114}
113 115
114static int nouveau_nv50_backlight_init(struct drm_device *dev) 116static int nouveau_nv50_backlight_init(struct drm_connector *connector)
115{ 117{
116 struct backlight_properties props; 118 struct drm_device *dev = connector->dev;
117 struct drm_nouveau_private *dev_priv = dev->dev_private; 119 struct drm_nouveau_private *dev_priv = dev->dev_private;
120 struct backlight_properties props;
118 struct backlight_device *bd; 121 struct backlight_device *bd;
119 122
120 if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT)) 123 if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
121 return 0; 124 return 0;
122 125
123 memset(&props, 0, sizeof(struct backlight_properties)); 126 memset(&props, 0, sizeof(struct backlight_properties));
127 props.type = BACKLIGHT_RAW;
124 props.max_brightness = 1025; 128 props.max_brightness = 1025;
125 bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev, 129 bd = backlight_device_register("nv_backlight", &connector->kdev, dev,
126 &nv50_bl_ops, &props); 130 &nv50_bl_ops, &props);
127 if (IS_ERR(bd)) 131 if (IS_ERR(bd))
128 return PTR_ERR(bd); 132 return PTR_ERR(bd);
@@ -133,8 +137,9 @@ static int nouveau_nv50_backlight_init(struct drm_device *dev)
133 return 0; 137 return 0;
134} 138}
135 139
136int nouveau_backlight_init(struct drm_device *dev) 140int nouveau_backlight_init(struct drm_connector *connector)
137{ 141{
142 struct drm_device *dev = connector->dev;
138 struct drm_nouveau_private *dev_priv = dev->dev_private; 143 struct drm_nouveau_private *dev_priv = dev->dev_private;
139 144
140#ifdef CONFIG_ACPI 145#ifdef CONFIG_ACPI
@@ -147,9 +152,9 @@ int nouveau_backlight_init(struct drm_device *dev)
147 152
148 switch (dev_priv->card_type) { 153 switch (dev_priv->card_type) {
149 case NV_40: 154 case NV_40:
150 return nouveau_nv40_backlight_init(dev); 155 return nouveau_nv40_backlight_init(connector);
151 case NV_50: 156 case NV_50:
152 return nouveau_nv50_backlight_init(dev); 157 return nouveau_nv50_backlight_init(connector);
153 default: 158 default:
154 break; 159 break;
155 } 160 }
@@ -157,8 +162,9 @@ int nouveau_backlight_init(struct drm_device *dev)
157 return 0; 162 return 0;
158} 163}
159 164
160void nouveau_backlight_exit(struct drm_device *dev) 165void nouveau_backlight_exit(struct drm_connector *connector)
161{ 166{
167 struct drm_device *dev = connector->dev;
162 struct drm_nouveau_private *dev_priv = dev->dev_private; 168 struct drm_nouveau_private *dev_priv = dev->dev_private;
163 169
164 if (dev_priv->backlight) { 170 if (dev_priv->backlight) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 390d82c3c4b0..7ae151109a66 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -116,6 +116,10 @@ nouveau_connector_destroy(struct drm_connector *connector)
116 nouveau_connector_hotplug, connector); 116 nouveau_connector_hotplug, connector);
117 } 117 }
118 118
119 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
120 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
121 nouveau_backlight_exit(connector);
122
119 kfree(nv_connector->edid); 123 kfree(nv_connector->edid);
120 drm_sysfs_connector_remove(connector); 124 drm_sysfs_connector_remove(connector);
121 drm_connector_cleanup(connector); 125 drm_connector_cleanup(connector);
@@ -894,6 +898,11 @@ nouveau_connector_create(struct drm_device *dev, int index)
894 } 898 }
895 899
896 drm_sysfs_connector_add(connector); 900 drm_sysfs_connector_add(connector);
901
902 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
903 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
904 nouveau_backlight_init(connector);
905
897 dcb->drm = connector; 906 dcb->drm = connector;
898 return dcb->drm; 907 return dcb->drm;
899 908
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 06111887b789..fff180a99867 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -999,15 +999,15 @@ static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector
999 999
1000/* nouveau_backlight.c */ 1000/* nouveau_backlight.c */
1001#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT 1001#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
1002extern int nouveau_backlight_init(struct drm_device *); 1002extern int nouveau_backlight_init(struct drm_connector *);
1003extern void nouveau_backlight_exit(struct drm_device *); 1003extern void nouveau_backlight_exit(struct drm_connector *);
1004#else 1004#else
1005static inline int nouveau_backlight_init(struct drm_device *dev) 1005static inline int nouveau_backlight_init(struct drm_connector *dev)
1006{ 1006{
1007 return 0; 1007 return 0;
1008} 1008}
1009 1009
1010static inline void nouveau_backlight_exit(struct drm_device *dev) { } 1010static inline void nouveau_backlight_exit(struct drm_connector *dev) { }
1011#endif 1011#endif
1012 1012
1013/* nouveau_bios.c */ 1013/* nouveau_bios.c */
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 05294910e135..4fcbd091a117 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -704,10 +704,6 @@ nouveau_card_init(struct drm_device *dev)
704 goto out_fence; 704 goto out_fence;
705 } 705 }
706 706
707 ret = nouveau_backlight_init(dev);
708 if (ret)
709 NV_ERROR(dev, "Error %d registering backlight\n", ret);
710
711 nouveau_fbcon_init(dev); 707 nouveau_fbcon_init(dev);
712 drm_kms_helper_poll_init(dev); 708 drm_kms_helper_poll_init(dev);
713 return 0; 709 return 0;
@@ -759,8 +755,6 @@ static void nouveau_card_takedown(struct drm_device *dev)
759 struct drm_nouveau_private *dev_priv = dev->dev_private; 755 struct drm_nouveau_private *dev_priv = dev->dev_private;
760 struct nouveau_engine *engine = &dev_priv->engine; 756 struct nouveau_engine *engine = &dev_priv->engine;
761 757
762 nouveau_backlight_exit(dev);
763
764 if (!engine->graph.accel_blocked) { 758 if (!engine->graph.accel_blocked) {
765 nouveau_fence_fini(dev); 759 nouveau_fence_fini(dev);
766 nouveau_channel_put_unlocked(&dev_priv->channel); 760 nouveau_channel_put_unlocked(&dev_priv->channel);
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 1c02d23f6fcc..9746fee59f56 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,6 +1,7 @@
1config DRM_RADEON_KMS 1config DRM_RADEON_KMS
2 bool "Enable modesetting on radeon by default - NEW DRIVER" 2 bool "Enable modesetting on radeon by default - NEW DRIVER"
3 depends on DRM_RADEON 3 depends on DRM_RADEON
4 select BACKLIGHT_CLASS_DEVICE
4 help 5 help
5 Choose this option if you want kernel modesetting enabled by default. 6 Choose this option if you want kernel modesetting enabled by default.
6 7
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 3f3c9aac46cc..28c7961cd19b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -40,6 +40,10 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
40 struct drm_encoder *encoder, 40 struct drm_encoder *encoder,
41 bool connected); 41 bool connected);
42 42
43extern void
44radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
45 struct drm_connector *drm_connector);
46
43void radeon_connector_hotplug(struct drm_connector *connector) 47void radeon_connector_hotplug(struct drm_connector *connector)
44{ 48{
45 struct drm_device *dev = connector->dev; 49 struct drm_device *dev = connector->dev;
@@ -1526,6 +1530,17 @@ radeon_add_legacy_connector(struct drm_device *dev,
1526 connector->polled = DRM_CONNECTOR_POLL_HPD; 1530 connector->polled = DRM_CONNECTOR_POLL_HPD;
1527 connector->display_info.subpixel_order = subpixel_order; 1531 connector->display_info.subpixel_order = subpixel_order;
1528 drm_sysfs_connector_add(connector); 1532 drm_sysfs_connector_add(connector);
1533 if (connector_type == DRM_MODE_CONNECTOR_LVDS) {
1534 struct drm_encoder *drm_encoder;
1535
1536 list_for_each_entry(drm_encoder, &dev->mode_config.encoder_list, head) {
1537 struct radeon_encoder *radeon_encoder;
1538
1539 radeon_encoder = to_radeon_encoder(drm_encoder);
1540 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_LVDS)
1541 radeon_legacy_backlight_init(radeon_encoder, connector);
1542 }
1543 }
1529 return; 1544 return;
1530 1545
1531failed: 1546failed:
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 59f834ba283d..5b54268ed6b2 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -28,6 +28,10 @@
28#include "radeon_drm.h" 28#include "radeon_drm.h"
29#include "radeon.h" 29#include "radeon.h"
30#include "atom.h" 30#include "atom.h"
31#include <linux/backlight.h>
32#ifdef CONFIG_PMAC_BACKLIGHT
33#include <asm/backlight.h>
34#endif
31 35
32static void radeon_legacy_encoder_disable(struct drm_encoder *encoder) 36static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
33{ 37{
@@ -39,7 +43,7 @@ static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
39 radeon_encoder->active_device = 0; 43 radeon_encoder->active_device = 0;
40} 44}
41 45
42static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) 46static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
43{ 47{
44 struct drm_device *dev = encoder->dev; 48 struct drm_device *dev = encoder->dev;
45 struct radeon_device *rdev = dev->dev_private; 49 struct radeon_device *rdev = dev->dev_private;
@@ -47,15 +51,23 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
47 uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man; 51 uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
48 int panel_pwr_delay = 2000; 52 int panel_pwr_delay = 2000;
49 bool is_mac = false; 53 bool is_mac = false;
54 uint8_t backlight_level;
50 DRM_DEBUG_KMS("\n"); 55 DRM_DEBUG_KMS("\n");
51 56
57 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
58 backlight_level = (lvds_gen_cntl >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
59
52 if (radeon_encoder->enc_priv) { 60 if (radeon_encoder->enc_priv) {
53 if (rdev->is_atom_bios) { 61 if (rdev->is_atom_bios) {
54 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; 62 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
55 panel_pwr_delay = lvds->panel_pwr_delay; 63 panel_pwr_delay = lvds->panel_pwr_delay;
64 if (lvds->bl_dev)
65 backlight_level = lvds->backlight_level;
56 } else { 66 } else {
57 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; 67 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
58 panel_pwr_delay = lvds->panel_pwr_delay; 68 panel_pwr_delay = lvds->panel_pwr_delay;
69 if (lvds->bl_dev)
70 backlight_level = lvds->backlight_level;
59 } 71 }
60 } 72 }
61 73
@@ -82,11 +94,13 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
82 lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET; 94 lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
83 WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); 95 WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
84 96
85 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); 97 lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS |
86 lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON); 98 RADEON_LVDS_BL_MOD_LEVEL_MASK);
99 lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN |
100 RADEON_LVDS_DIGON | RADEON_LVDS_BLON |
101 (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
87 if (is_mac) 102 if (is_mac)
88 lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN; 103 lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
89 lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS);
90 udelay(panel_pwr_delay * 1000); 104 udelay(panel_pwr_delay * 1000);
91 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); 105 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
92 break; 106 break;
@@ -95,7 +109,6 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
95 case DRM_MODE_DPMS_OFF: 109 case DRM_MODE_DPMS_OFF:
96 pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); 110 pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
97 WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb); 111 WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
98 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
99 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; 112 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
100 if (is_mac) { 113 if (is_mac) {
101 lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN; 114 lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN;
@@ -119,6 +132,25 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
119 132
120} 133}
121 134
135static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
136{
137 struct radeon_device *rdev = encoder->dev->dev_private;
138 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
139 DRM_DEBUG("\n");
140
141 if (radeon_encoder->enc_priv) {
142 if (rdev->is_atom_bios) {
143 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
144 lvds->dpms_mode = mode;
145 } else {
146 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
147 lvds->dpms_mode = mode;
148 }
149 }
150
151 radeon_legacy_lvds_update(encoder, mode);
152}
153
122static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) 154static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
123{ 155{
124 struct radeon_device *rdev = encoder->dev->dev_private; 156 struct radeon_device *rdev = encoder->dev->dev_private;
@@ -237,9 +269,222 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
237 .disable = radeon_legacy_encoder_disable, 269 .disable = radeon_legacy_encoder_disable,
238}; 270};
239 271
272#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
273
274#define MAX_RADEON_LEVEL 0xFF
275
276struct radeon_backlight_privdata {
277 struct radeon_encoder *encoder;
278 uint8_t negative;
279};
280
281static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
282{
283 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
284 uint8_t level;
285
286 /* Convert brightness to hardware level */
287 if (bd->props.brightness < 0)
288 level = 0;
289 else if (bd->props.brightness > MAX_RADEON_LEVEL)
290 level = MAX_RADEON_LEVEL;
291 else
292 level = bd->props.brightness;
293
294 if (pdata->negative)
295 level = MAX_RADEON_LEVEL - level;
296
297 return level;
298}
299
300static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
301{
302 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
303 struct radeon_encoder *radeon_encoder = pdata->encoder;
304 struct drm_device *dev = radeon_encoder->base.dev;
305 struct radeon_device *rdev = dev->dev_private;
306 int dpms_mode = DRM_MODE_DPMS_ON;
307
308 if (radeon_encoder->enc_priv) {
309 if (rdev->is_atom_bios) {
310 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
311 dpms_mode = lvds->dpms_mode;
312 lvds->backlight_level = radeon_legacy_lvds_level(bd);
313 } else {
314 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
315 dpms_mode = lvds->dpms_mode;
316 lvds->backlight_level = radeon_legacy_lvds_level(bd);
317 }
318 }
319
320 if (bd->props.brightness > 0)
321 radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
322 else
323 radeon_legacy_lvds_update(&radeon_encoder->base, DRM_MODE_DPMS_OFF);
324
325 return 0;
326}
327
328static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd)
329{
330 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
331 struct radeon_encoder *radeon_encoder = pdata->encoder;
332 struct drm_device *dev = radeon_encoder->base.dev;
333 struct radeon_device *rdev = dev->dev_private;
334 uint8_t backlight_level;
335
336 backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
337 RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
338
339 return pdata->negative ? MAX_RADEON_LEVEL - backlight_level : backlight_level;
340}
341
342static const struct backlight_ops radeon_backlight_ops = {
343 .get_brightness = radeon_legacy_backlight_get_brightness,
344 .update_status = radeon_legacy_backlight_update_status,
345};
346
347void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
348 struct drm_connector *drm_connector)
349{
350 struct drm_device *dev = radeon_encoder->base.dev;
351 struct radeon_device *rdev = dev->dev_private;
352 struct backlight_device *bd;
353 struct backlight_properties props;
354 struct radeon_backlight_privdata *pdata;
355 uint8_t backlight_level;
356
357 if (!radeon_encoder->enc_priv)
358 return;
359
360#ifdef CONFIG_PMAC_BACKLIGHT
361 if (!pmac_has_backlight_type("ati") &&
362 !pmac_has_backlight_type("mnca"))
363 return;
364#endif
365
366 pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
367 if (!pdata) {
368 DRM_ERROR("Memory allocation failed\n");
369 goto error;
370 }
371
372 props.max_brightness = MAX_RADEON_LEVEL;
373 props.type = BACKLIGHT_RAW;
374 bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
375 pdata, &radeon_backlight_ops, &props);
376 if (IS_ERR(bd)) {
377 DRM_ERROR("Backlight registration failed\n");
378 goto error;
379 }
380
381 pdata->encoder = radeon_encoder;
382
383 backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
384 RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
385
386 /* First, try to detect backlight level sense based on the assumption
387 * that firmware set it up at full brightness
388 */
389 if (backlight_level == 0)
390 pdata->negative = true;
391 else if (backlight_level == 0xff)
392 pdata->negative = false;
393 else {
394 /* XXX hack... maybe some day we can figure out in what direction
395 * backlight should work on a given panel?
396 */
397 pdata->negative = (rdev->family != CHIP_RV200 &&
398 rdev->family != CHIP_RV250 &&
399 rdev->family != CHIP_RV280 &&
400 rdev->family != CHIP_RV350);
401
402#ifdef CONFIG_PMAC_BACKLIGHT
403 pdata->negative = (pdata->negative ||
404 of_machine_is_compatible("PowerBook4,3") ||
405 of_machine_is_compatible("PowerBook6,3") ||
406 of_machine_is_compatible("PowerBook6,5"));
407#endif
408 }
409
410 if (rdev->is_atom_bios) {
411 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
412 lvds->bl_dev = bd;
413 } else {
414 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
415 lvds->bl_dev = bd;
416 }
417
418 bd->props.brightness = radeon_legacy_backlight_get_brightness(bd);
419 bd->props.power = FB_BLANK_UNBLANK;
420 backlight_update_status(bd);
421
422 DRM_INFO("radeon legacy LVDS backlight initialized\n");
423
424 return;
425
426error:
427 kfree(pdata);
428 return;
429}
430
431static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder)
432{
433 struct drm_device *dev = radeon_encoder->base.dev;
434 struct radeon_device *rdev = dev->dev_private;
435 struct backlight_device *bd = NULL;
436
437 if (!radeon_encoder->enc_priv)
438 return;
439
440 if (rdev->is_atom_bios) {
441 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
442 bd = lvds->bl_dev;
443 lvds->bl_dev = NULL;
444 } else {
445 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
446 bd = lvds->bl_dev;
447 lvds->bl_dev = NULL;
448 }
449
450 if (bd) {
451 struct radeon_legacy_backlight_privdata *pdata;
452
453 pdata = bl_get_data(bd);
454 backlight_device_unregister(bd);
455 kfree(pdata);
456
457 DRM_INFO("radeon legacy LVDS backlight unloaded\n");
458 }
459}
460
461#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
462
463void radeon_legacy_backlight_init(struct radeon_encoder *encoder)
464{
465}
466
467static void radeon_legacy_backlight_exit(struct radeon_encoder *encoder)
468{
469}
470
471#endif
472
473
474static void radeon_lvds_enc_destroy(struct drm_encoder *encoder)
475{
476 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
477
478 if (radeon_encoder->enc_priv) {
479 radeon_legacy_backlight_exit(radeon_encoder);
480 kfree(radeon_encoder->enc_priv);
481 }
482 drm_encoder_cleanup(encoder);
483 kfree(radeon_encoder);
484}
240 485
241static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = { 486static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
242 .destroy = radeon_enc_destroy, 487 .destroy = radeon_lvds_enc_destroy,
243}; 488};
244 489
245static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) 490static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5067d18d0009..e4582814bb78 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -302,6 +302,9 @@ struct radeon_encoder_lvds {
302 uint32_t lvds_gen_cntl; 302 uint32_t lvds_gen_cntl;
303 /* panel mode */ 303 /* panel mode */
304 struct drm_display_mode native_mode; 304 struct drm_display_mode native_mode;
305 struct backlight_device *bl_dev;
306 int dpms_mode;
307 uint8_t backlight_level;
305}; 308};
306 309
307struct radeon_encoder_tv_dac { 310struct radeon_encoder_tv_dac {
@@ -355,6 +358,9 @@ struct radeon_encoder_atom_dig {
355 uint32_t lcd_ss_id; 358 uint32_t lcd_ss_id;
356 /* panel mode */ 359 /* panel mode */
357 struct drm_display_mode native_mode; 360 struct drm_display_mode native_mode;
361 struct backlight_device *bl_dev;
362 int dpms_mode;
363 uint8_t backlight_level;
358}; 364};
359 365
360struct radeon_encoder_atom_dac { 366struct radeon_encoder_atom_dac {
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index de9cf21b3494..657da5a3d5c6 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -944,6 +944,7 @@ static int picolcd_init_backlight(struct picolcd_data *data, struct hid_report *
944 } 944 }
945 945
946 memset(&props, 0, sizeof(props)); 946 memset(&props, 0, sizeof(props));
947 props.type = BACKLIGHT_RAW;
947 props.max_brightness = 0xff; 948 props.max_brightness = 0xff;
948 bdev = backlight_device_register(dev_name(dev), dev, data, 949 bdev = backlight_device_register(dev_name(dev), dev, data,
949 &picolcd_blops, &props); 950 &picolcd_blops, &props);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index ad415e6ec5a1..326652f673f7 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -547,15 +547,18 @@ config I2C_PUV3
547 547
548config I2C_PXA 548config I2C_PXA
549 tristate "Intel PXA2XX I2C adapter" 549 tristate "Intel PXA2XX I2C adapter"
550 depends on ARCH_PXA || ARCH_MMP 550 depends on ARCH_PXA || ARCH_MMP || (X86_32 && PCI && OF)
551 help 551 help
552 If you have devices in the PXA I2C bus, say yes to this option. 552 If you have devices in the PXA I2C bus, say yes to this option.
553 This driver can also be built as a module. If so, the module 553 This driver can also be built as a module. If so, the module
554 will be called i2c-pxa. 554 will be called i2c-pxa.
555 555
556config I2C_PXA_PCI
557 def_bool I2C_PXA && X86_32 && PCI && OF
558
556config I2C_PXA_SLAVE 559config I2C_PXA_SLAVE
557 bool "Intel PXA2XX I2C Slave comms support" 560 bool "Intel PXA2XX I2C Slave comms support"
558 depends on I2C_PXA 561 depends on I2C_PXA && !X86_32
559 help 562 help
560 Support I2C slave mode communications on the PXA I2C bus. This 563 Support I2C slave mode communications on the PXA I2C bus. This
561 is necessary for systems where the PXA may be a target on the 564 is necessary for systems where the PXA may be a target on the
@@ -668,15 +671,28 @@ config I2C_XILINX
668 will be called xilinx_i2c. 671 will be called xilinx_i2c.
669 672
670config I2C_EG20T 673config I2C_EG20T
671 tristate "PCH I2C of Intel EG20T" 674 tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH"
672 depends on PCI 675 depends on PCI
673 help 676 help
674 This driver is for PCH(Platform controller Hub) I2C of EG20T which 677 This driver is for PCH(Platform controller Hub) I2C of EG20T which
675 is an IOH(Input/Output Hub) for x86 embedded processor. 678 is an IOH(Input/Output Hub) for x86 embedded processor.
676 This driver can access PCH I2C bus device. 679 This driver can access PCH I2C bus device.
680
681 This driver also supports the ML7213, a companion chip for the
682 Atom E6xx series and compatible with the Intel EG20T PCH.
677 683
678comment "External I2C/SMBus adapter drivers" 684comment "External I2C/SMBus adapter drivers"
679 685
686config I2C_DIOLAN_U2C
687 tristate "Diolan U2C-12 USB adapter"
688 depends on USB
689 help
690 If you say yes to this option, support will be included for Diolan
691 U2C-12, a USB to I2C interface.
692
693 This driver can also be built as a module. If so, the module
694 will be called i2c-diolan-u2c.
695
680config I2C_PARPORT 696config I2C_PARPORT
681 tristate "Parallel port adapter" 697 tristate "Parallel port adapter"
682 depends on PARPORT 698 depends on PARPORT
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 3878c959d4fa..e6cf294d3729 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
54obj-$(CONFIG_I2C_PNX) += i2c-pnx.o 54obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
55obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o 55obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o
56obj-$(CONFIG_I2C_PXA) += i2c-pxa.o 56obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
57obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o
57obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o 58obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
58obj-$(CONFIG_I2C_S6000) += i2c-s6000.o 59obj-$(CONFIG_I2C_S6000) += i2c-s6000.o
59obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o 60obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
@@ -67,6 +68,7 @@ obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
67obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o 68obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o
68 69
69# External I2C/SMBus adapter drivers 70# External I2C/SMBus adapter drivers
71obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
70obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o 72obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
71obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o 73obj-$(CONFIG_I2C_PARPORT_LIGHT) += i2c-parport-light.o
72obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o 74obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
new file mode 100644
index 000000000000..76366716a854
--- /dev/null
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -0,0 +1,535 @@
1/*
2 * Driver for the Diolan u2c-12 USB-I2C adapter
3 *
4 * Copyright (c) 2010-2011 Ericsson AB
5 *
6 * Derived from:
7 * i2c-tiny-usb.c
8 * Copyright (C) 2006-2007 Till Harbaum (Till@Harbaum.org)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2.
13 */
14
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/slab.h>
20#include <linux/usb.h>
21#include <linux/i2c.h>
22
23#define DRIVER_NAME "i2c-diolan-u2c"
24
25#define USB_VENDOR_ID_DIOLAN 0x0abf
26#define USB_DEVICE_ID_DIOLAN_U2C 0x3370
27
28#define DIOLAN_OUT_EP 0x02
29#define DIOLAN_IN_EP 0x84
30
31/* commands via USB, must match command ids in the firmware */
32#define CMD_I2C_READ 0x01
33#define CMD_I2C_WRITE 0x02
34#define CMD_I2C_SCAN 0x03 /* Returns list of detected devices */
35#define CMD_I2C_RELEASE_SDA 0x04
36#define CMD_I2C_RELEASE_SCL 0x05
37#define CMD_I2C_DROP_SDA 0x06
38#define CMD_I2C_DROP_SCL 0x07
39#define CMD_I2C_READ_SDA 0x08
40#define CMD_I2C_READ_SCL 0x09
41#define CMD_GET_FW_VERSION 0x0a
42#define CMD_GET_SERIAL 0x0b
43#define CMD_I2C_START 0x0c
44#define CMD_I2C_STOP 0x0d
45#define CMD_I2C_REPEATED_START 0x0e
46#define CMD_I2C_PUT_BYTE 0x0f
47#define CMD_I2C_GET_BYTE 0x10
48#define CMD_I2C_PUT_ACK 0x11
49#define CMD_I2C_GET_ACK 0x12
50#define CMD_I2C_PUT_BYTE_ACK 0x13
51#define CMD_I2C_GET_BYTE_ACK 0x14
52#define CMD_I2C_SET_SPEED 0x1b
53#define CMD_I2C_GET_SPEED 0x1c
54#define CMD_I2C_SET_CLK_SYNC 0x24
55#define CMD_I2C_GET_CLK_SYNC 0x25
56#define CMD_I2C_SET_CLK_SYNC_TO 0x26
57#define CMD_I2C_GET_CLK_SYNC_TO 0x27
58
59#define RESP_OK 0x00
60#define RESP_FAILED 0x01
61#define RESP_BAD_MEMADDR 0x04
62#define RESP_DATA_ERR 0x05
63#define RESP_NOT_IMPLEMENTED 0x06
64#define RESP_NACK 0x07
65#define RESP_TIMEOUT 0x09
66
67#define U2C_I2C_SPEED_FAST 0 /* 400 kHz */
68#define U2C_I2C_SPEED_STD 1 /* 100 kHz */
69#define U2C_I2C_SPEED_2KHZ 242 /* 2 kHz, minimum speed */
70#define U2C_I2C_SPEED(f) ((DIV_ROUND_UP(1000000, (f)) - 10) / 2 + 1)
71
72#define U2C_I2C_FREQ_FAST 400000
73#define U2C_I2C_FREQ_STD 100000
74#define U2C_I2C_FREQ(s) (1000000 / (2 * (s - 1) + 10))
75
76#define DIOLAN_USB_TIMEOUT 100 /* in ms */
77#define DIOLAN_SYNC_TIMEOUT 20 /* in ms */
78
79#define DIOLAN_OUTBUF_LEN 128
80#define DIOLAN_FLUSH_LEN (DIOLAN_OUTBUF_LEN - 4)
81#define DIOLAN_INBUF_LEN 256 /* Maximum supported receive length */
82
83/* Structure to hold all of our device specific stuff */
84struct i2c_diolan_u2c {
85 u8 obuffer[DIOLAN_OUTBUF_LEN]; /* output buffer */
86 u8 ibuffer[DIOLAN_INBUF_LEN]; /* input buffer */
87 struct usb_device *usb_dev; /* the usb device for this device */
88 struct usb_interface *interface;/* the interface for this device */
89 struct i2c_adapter adapter; /* i2c related things */
90 int olen; /* Output buffer length */
91 int ocount; /* Number of enqueued messages */
92};
93
94static uint frequency = U2C_I2C_FREQ_STD; /* I2C clock frequency in Hz */
95
96module_param(frequency, uint, S_IRUGO | S_IWUSR);
97MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
98
99/* usb layer */
100
101/* Send command to device, and get response. */
102static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
103{
104 int ret = 0;
105 int actual;
106 int i;
107
108 if (!dev->olen || !dev->ocount)
109 return -EINVAL;
110
111 ret = usb_bulk_msg(dev->usb_dev,
112 usb_sndbulkpipe(dev->usb_dev, DIOLAN_OUT_EP),
113 dev->obuffer, dev->olen, &actual,
114 DIOLAN_USB_TIMEOUT);
115 if (!ret) {
116 for (i = 0; i < dev->ocount; i++) {
117 int tmpret;
118
119 tmpret = usb_bulk_msg(dev->usb_dev,
120 usb_rcvbulkpipe(dev->usb_dev,
121 DIOLAN_IN_EP),
122 dev->ibuffer,
123 sizeof(dev->ibuffer), &actual,
124 DIOLAN_USB_TIMEOUT);
125 /*
126 * Stop command processing if a previous command
127 * returned an error.
128 * Note that we still need to retrieve all messages.
129 */
130 if (ret < 0)
131 continue;
132 ret = tmpret;
133 if (ret == 0 && actual > 0) {
134 switch (dev->ibuffer[actual - 1]) {
135 case RESP_NACK:
136 /*
137 * Return ENXIO if NACK was received as
138 * response to the address phase,
139 * EIO otherwise
140 */
141 ret = i == 1 ? -ENXIO : -EIO;
142 break;
143 case RESP_TIMEOUT:
144 ret = -ETIMEDOUT;
145 break;
146 case RESP_OK:
147 /* strip off return code */
148 ret = actual - 1;
149 break;
150 default:
151 ret = -EIO;
152 break;
153 }
154 }
155 }
156 }
157 dev->olen = 0;
158 dev->ocount = 0;
159 return ret;
160}
161
162static int diolan_write_cmd(struct i2c_diolan_u2c *dev, bool flush)
163{
164 if (flush || dev->olen >= DIOLAN_FLUSH_LEN)
165 return diolan_usb_transfer(dev);
166 return 0;
167}
168
169/* Send command (no data) */
170static int diolan_usb_cmd(struct i2c_diolan_u2c *dev, u8 command, bool flush)
171{
172 dev->obuffer[dev->olen++] = command;
173 dev->ocount++;
174 return diolan_write_cmd(dev, flush);
175}
176
177/* Send command with one byte of data */
178static int diolan_usb_cmd_data(struct i2c_diolan_u2c *dev, u8 command, u8 data,
179 bool flush)
180{
181 dev->obuffer[dev->olen++] = command;
182 dev->obuffer[dev->olen++] = data;
183 dev->ocount++;
184 return diolan_write_cmd(dev, flush);
185}
186
187/* Send command with two bytes of data */
188static int diolan_usb_cmd_data2(struct i2c_diolan_u2c *dev, u8 command, u8 d1,
189 u8 d2, bool flush)
190{
191 dev->obuffer[dev->olen++] = command;
192 dev->obuffer[dev->olen++] = d1;
193 dev->obuffer[dev->olen++] = d2;
194 dev->ocount++;
195 return diolan_write_cmd(dev, flush);
196}
197
198/*
199 * Flush input queue.
200 * If we don't do this at startup and the controller has queued up
201 * messages which were not retrieved, it will stop responding
202 * at some point.
203 */
204static void diolan_flush_input(struct i2c_diolan_u2c *dev)
205{
206 int i;
207
208 for (i = 0; i < 10; i++) {
209 int actual = 0;
210 int ret;
211
212 ret = usb_bulk_msg(dev->usb_dev,
213 usb_rcvbulkpipe(dev->usb_dev, DIOLAN_IN_EP),
214 dev->ibuffer, sizeof(dev->ibuffer), &actual,
215 DIOLAN_USB_TIMEOUT);
216 if (ret < 0 || actual == 0)
217 break;
218 }
219 if (i == 10)
220 dev_err(&dev->interface->dev, "Failed to flush input buffer\n");
221}
222
223static int diolan_i2c_start(struct i2c_diolan_u2c *dev)
224{
225 return diolan_usb_cmd(dev, CMD_I2C_START, false);
226}
227
228static int diolan_i2c_repeated_start(struct i2c_diolan_u2c *dev)
229{
230 return diolan_usb_cmd(dev, CMD_I2C_REPEATED_START, false);
231}
232
233static int diolan_i2c_stop(struct i2c_diolan_u2c *dev)
234{
235 return diolan_usb_cmd(dev, CMD_I2C_STOP, true);
236}
237
238static int diolan_i2c_get_byte_ack(struct i2c_diolan_u2c *dev, bool ack,
239 u8 *byte)
240{
241 int ret;
242
243 ret = diolan_usb_cmd_data(dev, CMD_I2C_GET_BYTE_ACK, ack, true);
244 if (ret > 0)
245 *byte = dev->ibuffer[0];
246 else if (ret == 0)
247 ret = -EIO;
248
249 return ret;
250}
251
252static int diolan_i2c_put_byte_ack(struct i2c_diolan_u2c *dev, u8 byte)
253{
254 return diolan_usb_cmd_data(dev, CMD_I2C_PUT_BYTE_ACK, byte, false);
255}
256
257static int diolan_set_speed(struct i2c_diolan_u2c *dev, u8 speed)
258{
259 return diolan_usb_cmd_data(dev, CMD_I2C_SET_SPEED, speed, true);
260}
261
262/* Enable or disable clock synchronization (stretching) */
263static int diolan_set_clock_synch(struct i2c_diolan_u2c *dev, bool enable)
264{
265 return diolan_usb_cmd_data(dev, CMD_I2C_SET_CLK_SYNC, enable, true);
266}
267
268/* Set clock synchronization timeout in ms */
269static int diolan_set_clock_synch_timeout(struct i2c_diolan_u2c *dev, int ms)
270{
271 int to_val = ms * 10;
272
273 return diolan_usb_cmd_data2(dev, CMD_I2C_SET_CLK_SYNC_TO,
274 to_val & 0xff, (to_val >> 8) & 0xff, true);
275}
276
277static void diolan_fw_version(struct i2c_diolan_u2c *dev)
278{
279 int ret;
280
281 ret = diolan_usb_cmd(dev, CMD_GET_FW_VERSION, true);
282 if (ret >= 2)
283 dev_info(&dev->interface->dev,
284 "Diolan U2C firmware version %u.%u\n",
285 (unsigned int)dev->ibuffer[0],
286 (unsigned int)dev->ibuffer[1]);
287}
288
289static void diolan_get_serial(struct i2c_diolan_u2c *dev)
290{
291 int ret;
292 u32 serial;
293
294 ret = diolan_usb_cmd(dev, CMD_GET_SERIAL, true);
295 if (ret >= 4) {
296 serial = le32_to_cpu(*(u32 *)dev->ibuffer);
297 dev_info(&dev->interface->dev,
298 "Diolan U2C serial number %u\n", serial);
299 }
300}
301
302static int diolan_init(struct i2c_diolan_u2c *dev)
303{
304 int speed, ret;
305
306 if (frequency >= 200000) {
307 speed = U2C_I2C_SPEED_FAST;
308 frequency = U2C_I2C_FREQ_FAST;
309 } else if (frequency >= 100000 || frequency == 0) {
310 speed = U2C_I2C_SPEED_STD;
311 frequency = U2C_I2C_FREQ_STD;
312 } else {
313 speed = U2C_I2C_SPEED(frequency);
314 if (speed > U2C_I2C_SPEED_2KHZ)
315 speed = U2C_I2C_SPEED_2KHZ;
316 frequency = U2C_I2C_FREQ(speed);
317 }
318
319 dev_info(&dev->interface->dev,
320 "Diolan U2C at USB bus %03d address %03d speed %d Hz\n",
321 dev->usb_dev->bus->busnum, dev->usb_dev->devnum, frequency);
322
323 diolan_flush_input(dev);
324 diolan_fw_version(dev);
325 diolan_get_serial(dev);
326
327 /* Set I2C speed */
328 ret = diolan_set_speed(dev, speed);
329 if (ret < 0)
330 return ret;
331
332 /* Configure I2C clock synchronization */
333 ret = diolan_set_clock_synch(dev, speed != U2C_I2C_SPEED_FAST);
334 if (ret < 0)
335 return ret;
336
337 if (speed != U2C_I2C_SPEED_FAST)
338 ret = diolan_set_clock_synch_timeout(dev, DIOLAN_SYNC_TIMEOUT);
339
340 return ret;
341}
342
343/* i2c layer */
344
345static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
346 int num)
347{
348 struct i2c_diolan_u2c *dev = i2c_get_adapdata(adapter);
349 struct i2c_msg *pmsg;
350 int i, j;
351 int ret, sret;
352
353 ret = diolan_i2c_start(dev);
354 if (ret < 0)
355 return ret;
356
357 for (i = 0; i < num; i++) {
358 pmsg = &msgs[i];
359 if (i) {
360 ret = diolan_i2c_repeated_start(dev);
361 if (ret < 0)
362 goto abort;
363 }
364 if (pmsg->flags & I2C_M_RD) {
365 ret =
366 diolan_i2c_put_byte_ack(dev, (pmsg->addr << 1) | 1);
367 if (ret < 0)
368 goto abort;
369 for (j = 0; j < pmsg->len; j++) {
370 u8 byte;
371 bool ack = j < pmsg->len - 1;
372
373 /*
374 * Don't send NACK if this is the first byte
375 * of a SMBUS_BLOCK message.
376 */
377 if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN))
378 ack = true;
379
380 ret = diolan_i2c_get_byte_ack(dev, ack, &byte);
381 if (ret < 0)
382 goto abort;
383 /*
384 * Adjust count if first received byte is length
385 */
386 if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN)) {
387 if (byte == 0
388 || byte > I2C_SMBUS_BLOCK_MAX) {
389 ret = -EPROTO;
390 goto abort;
391 }
392 pmsg->len += byte;
393 }
394 pmsg->buf[j] = byte;
395 }
396 } else {
397 ret = diolan_i2c_put_byte_ack(dev, pmsg->addr << 1);
398 if (ret < 0)
399 goto abort;
400 for (j = 0; j < pmsg->len; j++) {
401 ret = diolan_i2c_put_byte_ack(dev,
402 pmsg->buf[j]);
403 if (ret < 0)
404 goto abort;
405 }
406 }
407 }
408abort:
409 sret = diolan_i2c_stop(dev);
410 if (sret < 0 && ret >= 0)
411 ret = sret;
412 return ret;
413}
414
415/*
416 * Return list of supported functionality.
417 */
418static u32 diolan_usb_func(struct i2c_adapter *a)
419{
420 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
421 I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL;
422}
423
424static const struct i2c_algorithm diolan_usb_algorithm = {
425 .master_xfer = diolan_usb_xfer,
426 .functionality = diolan_usb_func,
427};
428
429/* device layer */
430
431static const struct usb_device_id diolan_u2c_table[] = {
432 { USB_DEVICE(USB_VENDOR_ID_DIOLAN, USB_DEVICE_ID_DIOLAN_U2C) },
433 { }
434};
435
436MODULE_DEVICE_TABLE(usb, diolan_u2c_table);
437
438static void diolan_u2c_free(struct i2c_diolan_u2c *dev)
439{
440 usb_put_dev(dev->usb_dev);
441 kfree(dev);
442}
443
444static int diolan_u2c_probe(struct usb_interface *interface,
445 const struct usb_device_id *id)
446{
447 struct i2c_diolan_u2c *dev;
448 int ret;
449
450 /* allocate memory for our device state and initialize it */
451 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
452 if (dev == NULL) {
453 dev_err(&interface->dev, "no memory for device state\n");
454 ret = -ENOMEM;
455 goto error;
456 }
457
458 dev->usb_dev = usb_get_dev(interface_to_usbdev(interface));
459 dev->interface = interface;
460
461 /* save our data pointer in this interface device */
462 usb_set_intfdata(interface, dev);
463
464 /* setup i2c adapter description */
465 dev->adapter.owner = THIS_MODULE;
466 dev->adapter.class = I2C_CLASS_HWMON;
467 dev->adapter.algo = &diolan_usb_algorithm;
468 i2c_set_adapdata(&dev->adapter, dev);
469 snprintf(dev->adapter.name, sizeof(dev->adapter.name),
470 DRIVER_NAME " at bus %03d device %03d",
471 dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
472
473 dev->adapter.dev.parent = &dev->interface->dev;
474
475 /* initialize diolan i2c interface */
476 ret = diolan_init(dev);
477 if (ret < 0) {
478 dev_err(&interface->dev, "failed to initialize adapter\n");
479 goto error_free;
480 }
481
482 /* and finally attach to i2c layer */
483 ret = i2c_add_adapter(&dev->adapter);
484 if (ret < 0) {
485 dev_err(&interface->dev, "failed to add I2C adapter\n");
486 goto error_free;
487 }
488
489 dev_dbg(&interface->dev, "connected " DRIVER_NAME "\n");
490
491 return 0;
492
493error_free:
494 usb_set_intfdata(interface, NULL);
495 diolan_u2c_free(dev);
496error:
497 return ret;
498}
499
500static void diolan_u2c_disconnect(struct usb_interface *interface)
501{
502 struct i2c_diolan_u2c *dev = usb_get_intfdata(interface);
503
504 i2c_del_adapter(&dev->adapter);
505 usb_set_intfdata(interface, NULL);
506 diolan_u2c_free(dev);
507
508 dev_dbg(&interface->dev, "disconnected\n");
509}
510
511static struct usb_driver diolan_u2c_driver = {
512 .name = DRIVER_NAME,
513 .probe = diolan_u2c_probe,
514 .disconnect = diolan_u2c_disconnect,
515 .id_table = diolan_u2c_table,
516};
517
518static int __init diolan_u2c_init(void)
519{
520 /* register this driver with the USB subsystem */
521 return usb_register(&diolan_u2c_driver);
522}
523
524static void __exit diolan_u2c_exit(void)
525{
526 /* deregister this driver with the USB subsystem */
527 usb_deregister(&diolan_u2c_driver);
528}
529
530module_init(diolan_u2c_init);
531module_exit(diolan_u2c_exit);
532
533MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
534MODULE_DESCRIPTION(DRIVER_NAME " driver");
535MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 50ea1f43bdc1..878a12026af2 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -132,6 +132,13 @@
132#define pch_pci_dbg(pdev, fmt, arg...) \ 132#define pch_pci_dbg(pdev, fmt, arg...) \
133 dev_dbg(&pdev->dev, "%s :" fmt, __func__, ##arg) 133 dev_dbg(&pdev->dev, "%s :" fmt, __func__, ##arg)
134 134
135/*
136Set the number of I2C instance max
137Intel EG20T PCH : 1ch
138OKI SEMICONDUCTOR ML7213 IOH : 2ch
139*/
140#define PCH_I2C_MAX_DEV 2
141
135/** 142/**
136 * struct i2c_algo_pch_data - for I2C driver functionalities 143 * struct i2c_algo_pch_data - for I2C driver functionalities
137 * @pch_adapter: stores the reference to i2c_adapter structure 144 * @pch_adapter: stores the reference to i2c_adapter structure
@@ -156,12 +163,14 @@ struct i2c_algo_pch_data {
156 * @pch_data: stores a list of i2c_algo_pch_data 163 * @pch_data: stores a list of i2c_algo_pch_data
157 * @pch_i2c_suspended: specifies whether the system is suspended or not 164 * @pch_i2c_suspended: specifies whether the system is suspended or not
158 * perhaps with more lines and words. 165 * perhaps with more lines and words.
166 * @ch_num: specifies the number of i2c instance
159 * 167 *
160 * pch_data has as many elements as maximum I2C channels 168 * pch_data has as many elements as maximum I2C channels
161 */ 169 */
162struct adapter_info { 170struct adapter_info {
163 struct i2c_algo_pch_data pch_data; 171 struct i2c_algo_pch_data pch_data[PCH_I2C_MAX_DEV];
164 bool pch_i2c_suspended; 172 bool pch_i2c_suspended;
173 int ch_num;
165}; 174};
166 175
167 176
@@ -170,8 +179,13 @@ static int pch_clk = 50000; /* specifies I2C clock speed in KHz */
170static wait_queue_head_t pch_event; 179static wait_queue_head_t pch_event;
171static DEFINE_MUTEX(pch_mutex); 180static DEFINE_MUTEX(pch_mutex);
172 181
182/* Definition for ML7213 by OKI SEMICONDUCTOR */
183#define PCI_VENDOR_ID_ROHM 0x10DB
184#define PCI_DEVICE_ID_ML7213_I2C 0x802D
185
173static struct pci_device_id __devinitdata pch_pcidev_id[] = { 186static struct pci_device_id __devinitdata pch_pcidev_id[] = {
174 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_I2C)}, 187 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, },
188 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, },
175 {0,} 189 {0,}
176}; 190};
177 191
@@ -212,8 +226,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
212 /* Initialize I2C registers */ 226 /* Initialize I2C registers */
213 iowrite32(0x21, p + PCH_I2CNF); 227 iowrite32(0x21, p + PCH_I2CNF);
214 228
215 pch_setbit(adap->pch_base_address, PCH_I2CCTL, 229 pch_setbit(adap->pch_base_address, PCH_I2CCTL, PCH_I2CCTL_I2CMEN);
216 PCH_I2CCTL_I2CMEN);
217 230
218 if (pch_i2c_speed != 400) 231 if (pch_i2c_speed != 400)
219 pch_i2c_speed = 100; 232 pch_i2c_speed = 100;
@@ -255,7 +268,7 @@ static inline bool ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
255 * @timeout: waiting time counter (us). 268 * @timeout: waiting time counter (us).
256 */ 269 */
257static s32 pch_i2c_wait_for_bus_idle(struct i2c_algo_pch_data *adap, 270static s32 pch_i2c_wait_for_bus_idle(struct i2c_algo_pch_data *adap,
258 s32 timeout) 271 s32 timeout)
259{ 272{
260 void __iomem *p = adap->pch_base_address; 273 void __iomem *p = adap->pch_base_address;
261 274
@@ -475,8 +488,8 @@ static void pch_i2c_sendnack(struct i2c_algo_pch_data *adap)
475 * @last: specifies whether last message or not. 488 * @last: specifies whether last message or not.
476 * @first: specifies whether first message or not. 489 * @first: specifies whether first message or not.
477 */ 490 */
478s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, 491static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
479 u32 last, u32 first) 492 u32 last, u32 first)
480{ 493{
481 struct i2c_algo_pch_data *adap = i2c_adap->algo_data; 494 struct i2c_algo_pch_data *adap = i2c_adap->algo_data;
482 495
@@ -569,10 +582,10 @@ s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
569} 582}
570 583
571/** 584/**
572 * pch_i2c_cb_ch0() - Interrupt handler Call back function 585 * pch_i2c_cb() - Interrupt handler Call back function
573 * @adap: Pointer to struct i2c_algo_pch_data. 586 * @adap: Pointer to struct i2c_algo_pch_data.
574 */ 587 */
575static void pch_i2c_cb_ch0(struct i2c_algo_pch_data *adap) 588static void pch_i2c_cb(struct i2c_algo_pch_data *adap)
576{ 589{
577 u32 sts; 590 u32 sts;
578 void __iomem *p = adap->pch_base_address; 591 void __iomem *p = adap->pch_base_address;
@@ -600,24 +613,30 @@ static void pch_i2c_cb_ch0(struct i2c_algo_pch_data *adap)
600 */ 613 */
601static irqreturn_t pch_i2c_handler(int irq, void *pData) 614static irqreturn_t pch_i2c_handler(int irq, void *pData)
602{ 615{
603 s32 reg_val; 616 u32 reg_val;
604 617 int flag;
605 struct i2c_algo_pch_data *adap_data = (struct i2c_algo_pch_data *)pData; 618 int i;
606 void __iomem *p = adap_data->pch_base_address; 619 struct adapter_info *adap_info = pData;
607 u32 mode = ioread32(p + PCH_I2CMOD) & (BUFFER_MODE | EEPROM_SR_MODE); 620 void __iomem *p;
608 621 u32 mode;
609 if (mode != NORMAL_MODE) { 622
610 pch_err(adap_data, "I2C mode is not supported\n"); 623 for (i = 0, flag = 0; i < adap_info->ch_num; i++) {
611 return IRQ_NONE; 624 p = adap_info->pch_data[i].pch_base_address;
625 mode = ioread32(p + PCH_I2CMOD);
626 mode &= BUFFER_MODE | EEPROM_SR_MODE;
627 if (mode != NORMAL_MODE) {
628 pch_err(adap_info->pch_data,
629 "I2C-%d mode(%d) is not supported\n", mode, i);
630 continue;
631 }
632 reg_val = ioread32(p + PCH_I2CSR);
633 if (reg_val & (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT)) {
634 pch_i2c_cb(&adap_info->pch_data[i]);
635 flag = 1;
636 }
612 } 637 }
613 638
614 reg_val = ioread32(p + PCH_I2CSR); 639 return flag ? IRQ_HANDLED : IRQ_NONE;
615 if (reg_val & (I2CMAL_BIT | I2CMCF_BIT | I2CMIF_BIT))
616 pch_i2c_cb_ch0(adap_data);
617 else
618 return IRQ_NONE;
619
620 return IRQ_HANDLED;
621} 640}
622 641
623/** 642/**
@@ -627,7 +646,7 @@ static irqreturn_t pch_i2c_handler(int irq, void *pData)
627 * @num: number of messages. 646 * @num: number of messages.
628 */ 647 */
629static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap, 648static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap,
630 struct i2c_msg *msgs, s32 num) 649 struct i2c_msg *msgs, s32 num)
631{ 650{
632 struct i2c_msg *pmsg; 651 struct i2c_msg *pmsg;
633 u32 i = 0; 652 u32 i = 0;
@@ -710,11 +729,13 @@ static void pch_i2c_disbl_int(struct i2c_algo_pch_data *adap)
710} 729}
711 730
712static int __devinit pch_i2c_probe(struct pci_dev *pdev, 731static int __devinit pch_i2c_probe(struct pci_dev *pdev,
713 const struct pci_device_id *id) 732 const struct pci_device_id *id)
714{ 733{
715 void __iomem *base_addr; 734 void __iomem *base_addr;
716 s32 ret; 735 int ret;
736 int i, j;
717 struct adapter_info *adap_info; 737 struct adapter_info *adap_info;
738 struct i2c_adapter *pch_adap;
718 739
719 pch_pci_dbg(pdev, "Entered.\n"); 740 pch_pci_dbg(pdev, "Entered.\n");
720 741
@@ -744,44 +765,48 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
744 goto err_pci_iomap; 765 goto err_pci_iomap;
745 } 766 }
746 767
747 adap_info->pch_i2c_suspended = false; 768 /* Set the number of I2C channel instance */
769 adap_info->ch_num = id->driver_data;
748 770
749 adap_info->pch_data.p_adapter_info = adap_info; 771 for (i = 0; i < adap_info->ch_num; i++) {
772 pch_adap = &adap_info->pch_data[i].pch_adapter;
773 adap_info->pch_i2c_suspended = false;
750 774
751 adap_info->pch_data.pch_adapter.owner = THIS_MODULE; 775 adap_info->pch_data[i].p_adapter_info = adap_info;
752 adap_info->pch_data.pch_adapter.class = I2C_CLASS_HWMON;
753 strcpy(adap_info->pch_data.pch_adapter.name, KBUILD_MODNAME);
754 adap_info->pch_data.pch_adapter.algo = &pch_algorithm;
755 adap_info->pch_data.pch_adapter.algo_data =
756 &adap_info->pch_data;
757 776
758 /* (i * 0x80) + base_addr; */ 777 pch_adap->owner = THIS_MODULE;
759 adap_info->pch_data.pch_base_address = base_addr; 778 pch_adap->class = I2C_CLASS_HWMON;
779 strcpy(pch_adap->name, KBUILD_MODNAME);
780 pch_adap->algo = &pch_algorithm;
781 pch_adap->algo_data = &adap_info->pch_data[i];
760 782
761 adap_info->pch_data.pch_adapter.dev.parent = &pdev->dev; 783 /* base_addr + offset; */
784 adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i;
762 785
763 ret = i2c_add_adapter(&(adap_info->pch_data.pch_adapter)); 786 pch_adap->dev.parent = &pdev->dev;
764 787
765 if (ret) { 788 ret = i2c_add_adapter(pch_adap);
766 pch_pci_err(pdev, "i2c_add_adapter FAILED\n"); 789 if (ret) {
767 goto err_i2c_add_adapter; 790 pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
768 } 791 goto err_i2c_add_adapter;
792 }
769 793
770 pch_i2c_init(&adap_info->pch_data); 794 pch_i2c_init(&adap_info->pch_data[i]);
795 }
771 ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, 796 ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
772 KBUILD_MODNAME, &adap_info->pch_data); 797 KBUILD_MODNAME, adap_info);
773 if (ret) { 798 if (ret) {
774 pch_pci_err(pdev, "request_irq FAILED\n"); 799 pch_pci_err(pdev, "request_irq FAILED\n");
775 goto err_request_irq; 800 goto err_i2c_add_adapter;
776 } 801 }
777 802
778 pci_set_drvdata(pdev, adap_info); 803 pci_set_drvdata(pdev, adap_info);
779 pch_pci_dbg(pdev, "returns %d.\n", ret); 804 pch_pci_dbg(pdev, "returns %d.\n", ret);
780 return 0; 805 return 0;
781 806
782err_request_irq:
783 i2c_del_adapter(&(adap_info->pch_data.pch_adapter));
784err_i2c_add_adapter: 807err_i2c_add_adapter:
808 for (j = 0; j < i; j++)
809 i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
785 pci_iounmap(pdev, base_addr); 810 pci_iounmap(pdev, base_addr);
786err_pci_iomap: 811err_pci_iomap:
787 pci_release_regions(pdev); 812 pci_release_regions(pdev);
@@ -794,17 +819,22 @@ err_pci_enable:
794 819
795static void __devexit pch_i2c_remove(struct pci_dev *pdev) 820static void __devexit pch_i2c_remove(struct pci_dev *pdev)
796{ 821{
822 int i;
797 struct adapter_info *adap_info = pci_get_drvdata(pdev); 823 struct adapter_info *adap_info = pci_get_drvdata(pdev);
798 824
799 pch_i2c_disbl_int(&adap_info->pch_data); 825 free_irq(pdev->irq, adap_info);
800 free_irq(pdev->irq, &adap_info->pch_data);
801 i2c_del_adapter(&(adap_info->pch_data.pch_adapter));
802 826
803 if (adap_info->pch_data.pch_base_address) { 827 for (i = 0; i < adap_info->ch_num; i++) {
804 pci_iounmap(pdev, adap_info->pch_data.pch_base_address); 828 pch_i2c_disbl_int(&adap_info->pch_data[i]);
805 adap_info->pch_data.pch_base_address = 0; 829 i2c_del_adapter(&adap_info->pch_data[i].pch_adapter);
806 } 830 }
807 831
832 if (adap_info->pch_data[0].pch_base_address)
833 pci_iounmap(pdev, adap_info->pch_data[0].pch_base_address);
834
835 for (i = 0; i < adap_info->ch_num; i++)
836 adap_info->pch_data[i].pch_base_address = 0;
837
808 pci_set_drvdata(pdev, NULL); 838 pci_set_drvdata(pdev, NULL);
809 839
810 pci_release_regions(pdev); 840 pci_release_regions(pdev);
@@ -817,17 +847,22 @@ static void __devexit pch_i2c_remove(struct pci_dev *pdev)
817static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state) 847static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
818{ 848{
819 int ret; 849 int ret;
850 int i;
820 struct adapter_info *adap_info = pci_get_drvdata(pdev); 851 struct adapter_info *adap_info = pci_get_drvdata(pdev);
821 void __iomem *p = adap_info->pch_data.pch_base_address; 852 void __iomem *p = adap_info->pch_data[0].pch_base_address;
822 853
823 adap_info->pch_i2c_suspended = true; 854 adap_info->pch_i2c_suspended = true;
824 855
825 while ((adap_info->pch_data.pch_i2c_xfer_in_progress)) { 856 for (i = 0; i < adap_info->ch_num; i++) {
826 /* Wait until all channel transfers are completed */ 857 while ((adap_info->pch_data[i].pch_i2c_xfer_in_progress)) {
827 msleep(20); 858 /* Wait until all channel transfers are completed */
859 msleep(20);
860 }
828 } 861 }
862
829 /* Disable the i2c interrupts */ 863 /* Disable the i2c interrupts */
830 pch_i2c_disbl_int(&adap_info->pch_data); 864 for (i = 0; i < adap_info->ch_num; i++)
865 pch_i2c_disbl_int(&adap_info->pch_data[i]);
831 866
832 pch_pci_dbg(pdev, "I2CSR = %x I2CBUFSTA = %x I2CESRSTA = %x " 867 pch_pci_dbg(pdev, "I2CSR = %x I2CBUFSTA = %x I2CESRSTA = %x "
833 "invoked function pch_i2c_disbl_int successfully\n", 868 "invoked function pch_i2c_disbl_int successfully\n",
@@ -850,6 +885,7 @@ static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state)
850 885
851static int pch_i2c_resume(struct pci_dev *pdev) 886static int pch_i2c_resume(struct pci_dev *pdev)
852{ 887{
888 int i;
853 struct adapter_info *adap_info = pci_get_drvdata(pdev); 889 struct adapter_info *adap_info = pci_get_drvdata(pdev);
854 890
855 pci_set_power_state(pdev, PCI_D0); 891 pci_set_power_state(pdev, PCI_D0);
@@ -862,7 +898,8 @@ static int pch_i2c_resume(struct pci_dev *pdev)
862 898
863 pci_enable_wake(pdev, PCI_D3hot, 0); 899 pci_enable_wake(pdev, PCI_D3hot, 0);
864 900
865 pch_i2c_init(&adap_info->pch_data); 901 for (i = 0; i < adap_info->ch_num; i++)
902 pch_i2c_init(&adap_info->pch_data[i]);
866 903
867 adap_info->pch_i2c_suspended = false; 904 adap_info->pch_i2c_suspended = false;
868 905
@@ -894,7 +931,7 @@ static void __exit pch_pci_exit(void)
894} 931}
895module_exit(pch_pci_exit); 932module_exit(pch_pci_exit);
896 933
897MODULE_DESCRIPTION("PCH I2C PCI Driver"); 934MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH I2C Driver");
898MODULE_LICENSE("GPL"); 935MODULE_LICENSE("GPL");
899MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>"); 936MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>");
900module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR)); 937module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 8022e2390a5a..caf96dc8ca1b 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -118,6 +118,8 @@ static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
118{ 118{
119 mxs_reset_block(i2c->regs); 119 mxs_reset_block(i2c->regs);
120 writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET); 120 writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
121 writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
122 i2c->regs + MXS_I2C_QUEUECTRL_SET);
121} 123}
122 124
123static void mxs_i2c_pioq_setup_read(struct mxs_i2c_dev *i2c, u8 addr, int len, 125static void mxs_i2c_pioq_setup_read(struct mxs_i2c_dev *i2c, u8 addr, int len,
@@ -347,8 +349,6 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
347 349
348 /* Do reset to enforce correct startup after pinmuxing */ 350 /* Do reset to enforce correct startup after pinmuxing */
349 mxs_i2c_reset(i2c); 351 mxs_i2c_reset(i2c);
350 writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
351 i2c->regs + MXS_I2C_QUEUECTRL_SET);
352 352
353 adap = &i2c->adapter; 353 adap = &i2c->adapter;
354 strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name)); 354 strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
new file mode 100644
index 000000000000..6659d269b841
--- /dev/null
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -0,0 +1,176 @@
1/*
2 * The CE4100's I2C device is more or less the same one as found on PXA.
3 * It does not support slave mode, the register slightly moved. This PCI
4 * device provides three bars, every contains a single I2C controller.
5 */
6#include <linux/pci.h>
7#include <linux/platform_device.h>
8#include <linux/i2c/pxa-i2c.h>
9#include <linux/of.h>
10#include <linux/of_device.h>
11#include <linux/of_address.h>
12
13#define CE4100_PCI_I2C_DEVS 3
14
15struct ce4100_devices {
16 struct platform_device *pdev[CE4100_PCI_I2C_DEVS];
17};
18
19static struct platform_device *add_i2c_device(struct pci_dev *dev, int bar)
20{
21 struct platform_device *pdev;
22 struct i2c_pxa_platform_data pdata;
23 struct resource res[2];
24 struct device_node *child;
25 static int devnum;
26 int ret;
27
28 memset(&pdata, 0, sizeof(struct i2c_pxa_platform_data));
29 memset(&res, 0, sizeof(res));
30
31 res[0].flags = IORESOURCE_MEM;
32 res[0].start = pci_resource_start(dev, bar);
33 res[0].end = pci_resource_end(dev, bar);
34
35 res[1].flags = IORESOURCE_IRQ;
36 res[1].start = dev->irq;
37 res[1].end = dev->irq;
38
39 for_each_child_of_node(dev->dev.of_node, child) {
40 const void *prop;
41 struct resource r;
42 int ret;
43
44 ret = of_address_to_resource(child, 0, &r);
45 if (ret < 0)
46 continue;
47 if (r.start != res[0].start)
48 continue;
49 if (r.end != res[0].end)
50 continue;
51 if (r.flags != res[0].flags)
52 continue;
53
54 prop = of_get_property(child, "fast-mode", NULL);
55 if (prop)
56 pdata.fast_mode = 1;
57
58 break;
59 }
60
61 if (!child) {
62 dev_err(&dev->dev, "failed to match a DT node for bar %d.\n",
63 bar);
64 ret = -EINVAL;
65 goto out;
66 }
67
68 pdev = platform_device_alloc("ce4100-i2c", devnum);
69 if (!pdev) {
70 of_node_put(child);
71 ret = -ENOMEM;
72 goto out;
73 }
74 pdev->dev.parent = &dev->dev;
75 pdev->dev.of_node = child;
76
77 ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
78 if (ret)
79 goto err;
80
81 ret = platform_device_add_data(pdev, &pdata, sizeof(pdata));
82 if (ret)
83 goto err;
84
85 ret = platform_device_add(pdev);
86 if (ret)
87 goto err;
88 devnum++;
89 return pdev;
90err:
91 platform_device_put(pdev);
92out:
93 return ERR_PTR(ret);
94}
95
96static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
97 const struct pci_device_id *ent)
98{
99 int ret;
100 int i;
101 struct ce4100_devices *sds;
102
103 ret = pci_enable_device_mem(dev);
104 if (ret)
105 return ret;
106
107 if (!dev->dev.of_node) {
108 dev_err(&dev->dev, "Missing device tree node.\n");
109 return -EINVAL;
110 }
111 sds = kzalloc(sizeof(*sds), GFP_KERNEL);
112 if (!sds)
113 goto err_mem;
114
115 for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
116 sds->pdev[i] = add_i2c_device(dev, i);
117 if (IS_ERR(sds->pdev[i])) {
118 while (--i >= 0)
119 platform_device_unregister(sds->pdev[i]);
120 goto err_dev_add;
121 }
122 }
123 pci_set_drvdata(dev, sds);
124 return 0;
125
126err_dev_add:
127 pci_set_drvdata(dev, NULL);
128 kfree(sds);
129err_mem:
130 pci_disable_device(dev);
131 return ret;
132}
133
134static void __devexit ce4100_i2c_remove(struct pci_dev *dev)
135{
136 struct ce4100_devices *sds;
137 unsigned int i;
138
139 sds = pci_get_drvdata(dev);
140 pci_set_drvdata(dev, NULL);
141
142 for (i = 0; i < ARRAY_SIZE(sds->pdev); i++)
143 platform_device_unregister(sds->pdev[i]);
144
145 pci_disable_device(dev);
146 kfree(sds);
147}
148
149static struct pci_device_id ce4100_i2c_devices[] __devinitdata = {
150 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e68)},
151 { },
152};
153MODULE_DEVICE_TABLE(pci, ce4100_i2c_devices);
154
155static struct pci_driver ce4100_i2c_driver = {
156 .name = "ce4100_i2c",
157 .id_table = ce4100_i2c_devices,
158 .probe = ce4100_i2c_probe,
159 .remove = __devexit_p(ce4100_i2c_remove),
160};
161
162static int __init ce4100_i2c_init(void)
163{
164 return pci_register_driver(&ce4100_i2c_driver);
165}
166module_init(ce4100_i2c_init);
167
168static void __exit ce4100_i2c_exit(void)
169{
170 pci_unregister_driver(&ce4100_i2c_driver);
171}
172module_exit(ce4100_i2c_exit);
173
174MODULE_DESCRIPTION("CE4100 PCI-I2C glue code for PXA's driver");
175MODULE_LICENSE("GPL v2");
176MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index f4c19a97e0b3..f59224a5c761 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -29,38 +29,75 @@
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/i2c-pxa.h> 31#include <linux/i2c-pxa.h>
32#include <linux/of_i2c.h>
32#include <linux/platform_device.h> 33#include <linux/platform_device.h>
33#include <linux/err.h> 34#include <linux/err.h>
34#include <linux/clk.h> 35#include <linux/clk.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/i2c/pxa-i2c.h>
37 39
38#include <asm/irq.h> 40#include <asm/irq.h>
39#include <plat/i2c.h> 41
42#ifndef CONFIG_HAVE_CLK
43#define clk_get(dev, id) NULL
44#define clk_put(clk) do { } while (0)
45#define clk_disable(clk) do { } while (0)
46#define clk_enable(clk) do { } while (0)
47#endif
48
49struct pxa_reg_layout {
50 u32 ibmr;
51 u32 idbr;
52 u32 icr;
53 u32 isr;
54 u32 isar;
55};
56
57enum pxa_i2c_types {
58 REGS_PXA2XX,
59 REGS_PXA3XX,
60 REGS_CE4100,
61};
40 62
41/* 63/*
42 * I2C register offsets will be shifted 0 or 1 bit left, depending on 64 * I2C registers definitions
43 * different SoCs
44 */ 65 */
45#define REG_SHIFT_0 (0 << 0) 66static struct pxa_reg_layout pxa_reg_layout[] = {
46#define REG_SHIFT_1 (1 << 0) 67 [REGS_PXA2XX] = {
47#define REG_SHIFT(d) ((d) & 0x1) 68 .ibmr = 0x00,
69 .idbr = 0x08,
70 .icr = 0x10,
71 .isr = 0x18,
72 .isar = 0x20,
73 },
74 [REGS_PXA3XX] = {
75 .ibmr = 0x00,
76 .idbr = 0x04,
77 .icr = 0x08,
78 .isr = 0x0c,
79 .isar = 0x10,
80 },
81 [REGS_CE4100] = {
82 .ibmr = 0x14,
83 .idbr = 0x0c,
84 .icr = 0x00,
85 .isr = 0x04,
86 /* no isar register */
87 },
88};
48 89
49static const struct platform_device_id i2c_pxa_id_table[] = { 90static const struct platform_device_id i2c_pxa_id_table[] = {
50 { "pxa2xx-i2c", REG_SHIFT_1 }, 91 { "pxa2xx-i2c", REGS_PXA2XX },
51 { "pxa3xx-pwri2c", REG_SHIFT_0 }, 92 { "pxa3xx-pwri2c", REGS_PXA3XX },
93 { "ce4100-i2c", REGS_CE4100 },
52 { }, 94 { },
53}; 95};
54MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table); 96MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table);
55 97
56/* 98/*
57 * I2C registers and bit definitions 99 * I2C bit definitions
58 */ 100 */
59#define IBMR (0x00)
60#define IDBR (0x08)
61#define ICR (0x10)
62#define ISR (0x18)
63#define ISAR (0x20)
64 101
65#define ICR_START (1 << 0) /* start bit */ 102#define ICR_START (1 << 0) /* start bit */
66#define ICR_STOP (1 << 1) /* stop bit */ 103#define ICR_STOP (1 << 1) /* stop bit */
@@ -111,7 +148,11 @@ struct pxa_i2c {
111 u32 icrlog[32]; 148 u32 icrlog[32];
112 149
113 void __iomem *reg_base; 150 void __iomem *reg_base;
114 unsigned int reg_shift; 151 void __iomem *reg_ibmr;
152 void __iomem *reg_idbr;
153 void __iomem *reg_icr;
154 void __iomem *reg_isr;
155 void __iomem *reg_isar;
115 156
116 unsigned long iobase; 157 unsigned long iobase;
117 unsigned long iosize; 158 unsigned long iosize;
@@ -121,11 +162,11 @@ struct pxa_i2c {
121 unsigned int fast_mode :1; 162 unsigned int fast_mode :1;
122}; 163};
123 164
124#define _IBMR(i2c) ((i2c)->reg_base + (0x0 << (i2c)->reg_shift)) 165#define _IBMR(i2c) ((i2c)->reg_ibmr)
125#define _IDBR(i2c) ((i2c)->reg_base + (0x4 << (i2c)->reg_shift)) 166#define _IDBR(i2c) ((i2c)->reg_idbr)
126#define _ICR(i2c) ((i2c)->reg_base + (0x8 << (i2c)->reg_shift)) 167#define _ICR(i2c) ((i2c)->reg_icr)
127#define _ISR(i2c) ((i2c)->reg_base + (0xc << (i2c)->reg_shift)) 168#define _ISR(i2c) ((i2c)->reg_isr)
128#define _ISAR(i2c) ((i2c)->reg_base + (0x10 << (i2c)->reg_shift)) 169#define _ISAR(i2c) ((i2c)->reg_isar)
129 170
130/* 171/*
131 * I2C Slave mode address 172 * I2C Slave mode address
@@ -418,7 +459,8 @@ static void i2c_pxa_reset(struct pxa_i2c *i2c)
418 writel(I2C_ISR_INIT, _ISR(i2c)); 459 writel(I2C_ISR_INIT, _ISR(i2c));
419 writel(readl(_ICR(i2c)) & ~ICR_UR, _ICR(i2c)); 460 writel(readl(_ICR(i2c)) & ~ICR_UR, _ICR(i2c));
420 461
421 writel(i2c->slave_addr, _ISAR(i2c)); 462 if (i2c->reg_isar)
463 writel(i2c->slave_addr, _ISAR(i2c));
422 464
423 /* set control register values */ 465 /* set control register values */
424 writel(I2C_ICR_INIT | (i2c->fast_mode ? ICR_FM : 0), _ICR(i2c)); 466 writel(I2C_ICR_INIT | (i2c->fast_mode ? ICR_FM : 0), _ICR(i2c));
@@ -729,8 +771,10 @@ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num)
729 */ 771 */
730 ret = i2c->msg_idx; 772 ret = i2c->msg_idx;
731 773
732 if (timeout == 0) 774 if (!timeout && i2c->msg_num) {
733 i2c_pxa_scream_blue_murder(i2c, "timeout"); 775 i2c_pxa_scream_blue_murder(i2c, "timeout");
776 ret = I2C_RETRY;
777 }
734 778
735 out: 779 out:
736 return ret; 780 return ret;
@@ -915,11 +959,16 @@ static void i2c_pxa_irq_rxfull(struct pxa_i2c *i2c, u32 isr)
915 writel(icr, _ICR(i2c)); 959 writel(icr, _ICR(i2c));
916} 960}
917 961
962#define VALID_INT_SOURCE (ISR_SSD | ISR_ALD | ISR_ITE | ISR_IRF | \
963 ISR_SAD | ISR_BED)
918static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id) 964static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id)
919{ 965{
920 struct pxa_i2c *i2c = dev_id; 966 struct pxa_i2c *i2c = dev_id;
921 u32 isr = readl(_ISR(i2c)); 967 u32 isr = readl(_ISR(i2c));
922 968
969 if (!(isr & VALID_INT_SOURCE))
970 return IRQ_NONE;
971
923 if (i2c_debug > 2 && 0) { 972 if (i2c_debug > 2 && 0) {
924 dev_dbg(&i2c->adap.dev, "%s: ISR=%08x, ICR=%08x, IBMR=%02x\n", 973 dev_dbg(&i2c->adap.dev, "%s: ISR=%08x, ICR=%08x, IBMR=%02x\n",
925 __func__, isr, readl(_ICR(i2c)), readl(_IBMR(i2c))); 974 __func__, isr, readl(_ICR(i2c)), readl(_IBMR(i2c)));
@@ -934,7 +983,7 @@ static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id)
934 /* 983 /*
935 * Always clear all pending IRQs. 984 * Always clear all pending IRQs.
936 */ 985 */
937 writel(isr & (ISR_SSD|ISR_ALD|ISR_ITE|ISR_IRF|ISR_SAD|ISR_BED), _ISR(i2c)); 986 writel(isr & VALID_INT_SOURCE, _ISR(i2c));
938 987
939 if (isr & ISR_SAD) 988 if (isr & ISR_SAD)
940 i2c_pxa_slave_start(i2c, isr); 989 i2c_pxa_slave_start(i2c, isr);
@@ -1001,6 +1050,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
1001 struct resource *res; 1050 struct resource *res;
1002 struct i2c_pxa_platform_data *plat = dev->dev.platform_data; 1051 struct i2c_pxa_platform_data *plat = dev->dev.platform_data;
1003 const struct platform_device_id *id = platform_get_device_id(dev); 1052 const struct platform_device_id *id = platform_get_device_id(dev);
1053 enum pxa_i2c_types i2c_type = id->driver_data;
1004 int ret; 1054 int ret;
1005 int irq; 1055 int irq;
1006 1056
@@ -1044,7 +1094,13 @@ static int i2c_pxa_probe(struct platform_device *dev)
1044 ret = -EIO; 1094 ret = -EIO;
1045 goto eremap; 1095 goto eremap;
1046 } 1096 }
1047 i2c->reg_shift = REG_SHIFT(id->driver_data); 1097
1098 i2c->reg_ibmr = i2c->reg_base + pxa_reg_layout[i2c_type].ibmr;
1099 i2c->reg_idbr = i2c->reg_base + pxa_reg_layout[i2c_type].idbr;
1100 i2c->reg_icr = i2c->reg_base + pxa_reg_layout[i2c_type].icr;
1101 i2c->reg_isr = i2c->reg_base + pxa_reg_layout[i2c_type].isr;
1102 if (i2c_type != REGS_CE4100)
1103 i2c->reg_isar = i2c->reg_base + pxa_reg_layout[i2c_type].isar;
1048 1104
1049 i2c->iobase = res->start; 1105 i2c->iobase = res->start;
1050 i2c->iosize = resource_size(res); 1106 i2c->iosize = resource_size(res);
@@ -1072,7 +1128,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
1072 i2c->adap.algo = &i2c_pxa_pio_algorithm; 1128 i2c->adap.algo = &i2c_pxa_pio_algorithm;
1073 } else { 1129 } else {
1074 i2c->adap.algo = &i2c_pxa_algorithm; 1130 i2c->adap.algo = &i2c_pxa_algorithm;
1075 ret = request_irq(irq, i2c_pxa_handler, IRQF_DISABLED, 1131 ret = request_irq(irq, i2c_pxa_handler, IRQF_SHARED,
1076 i2c->adap.name, i2c); 1132 i2c->adap.name, i2c);
1077 if (ret) 1133 if (ret)
1078 goto ereqirq; 1134 goto ereqirq;
@@ -1082,12 +1138,19 @@ static int i2c_pxa_probe(struct platform_device *dev)
1082 1138
1083 i2c->adap.algo_data = i2c; 1139 i2c->adap.algo_data = i2c;
1084 i2c->adap.dev.parent = &dev->dev; 1140 i2c->adap.dev.parent = &dev->dev;
1141#ifdef CONFIG_OF
1142 i2c->adap.dev.of_node = dev->dev.of_node;
1143#endif
1085 1144
1086 ret = i2c_add_numbered_adapter(&i2c->adap); 1145 if (i2c_type == REGS_CE4100)
1146 ret = i2c_add_adapter(&i2c->adap);
1147 else
1148 ret = i2c_add_numbered_adapter(&i2c->adap);
1087 if (ret < 0) { 1149 if (ret < 0) {
1088 printk(KERN_INFO "I2C: Failed to add bus\n"); 1150 printk(KERN_INFO "I2C: Failed to add bus\n");
1089 goto eadapt; 1151 goto eadapt;
1090 } 1152 }
1153 of_i2c_register_devices(&i2c->adap);
1091 1154
1092 platform_set_drvdata(dev, i2c); 1155 platform_set_drvdata(dev, i2c);
1093 1156
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 6f190f4cdbc0..9bec8699b8a3 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -34,6 +34,16 @@ config LEDS_ATMEL_PWM
34 This option enables support for LEDs driven using outputs 34 This option enables support for LEDs driven using outputs
35 of the dedicated PWM controller found on newer Atmel SOCs. 35 of the dedicated PWM controller found on newer Atmel SOCs.
36 36
37config LEDS_LM3530
38 tristate "LCD Backlight driver for LM3530"
39 depends on LEDS_CLASS
40 depends on I2C
41 help
42 This option enables support for the LCD backlight using
43 LM3530 ambient light sensor chip. This ALS chip can be
44 controlled manually or using PWM input or using ambient
45 light automatically.
46
37config LEDS_LOCOMO 47config LEDS_LOCOMO
38 tristate "LED Support for Locomo device" 48 tristate "LED Support for Locomo device"
39 depends on LEDS_CLASS 49 depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index aae6989ff6b6..39c80fca84d2 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
9obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o 9obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o
10obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o 10obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
11obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o 11obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
12obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
12obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o 13obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
13obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o 14obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
14obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o 15obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 19dc4b61a105..3ebe3824662d 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -19,7 +19,7 @@
19#include <linux/leds.h> 19#include <linux/leds.h>
20#include <linux/leds-bd2802.h> 20#include <linux/leds-bd2802.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22 22#include <linux/pm.h>
23 23
24#define LED_CTL(rgb2en, rgb1en) ((rgb2en) << 4 | ((rgb1en) << 0)) 24#define LED_CTL(rgb2en, rgb1en) ((rgb2en) << 4 | ((rgb1en) << 0))
25 25
@@ -319,20 +319,6 @@ static void bd2802_turn_off(struct bd2802_led *led, enum led_ids id,
319 bd2802_update_state(led, id, color, BD2802_OFF); 319 bd2802_update_state(led, id, color, BD2802_OFF);
320} 320}
321 321
322static void bd2802_restore_state(struct bd2802_led *led)
323{
324 int i;
325
326 for (i = 0; i < LED_NUM; i++) {
327 if (led->led[i].r)
328 bd2802_turn_on(led, i, RED, led->led[i].r);
329 if (led->led[i].g)
330 bd2802_turn_on(led, i, GREEN, led->led[i].g);
331 if (led->led[i].b)
332 bd2802_turn_on(led, i, BLUE, led->led[i].b);
333 }
334}
335
336#define BD2802_SET_REGISTER(reg_addr, reg_name) \ 322#define BD2802_SET_REGISTER(reg_addr, reg_name) \
337static ssize_t bd2802_store_reg##reg_addr(struct device *dev, \ 323static ssize_t bd2802_store_reg##reg_addr(struct device *dev, \
338 struct device_attribute *attr, const char *buf, size_t count) \ 324 struct device_attribute *attr, const char *buf, size_t count) \
@@ -761,8 +747,25 @@ static int __exit bd2802_remove(struct i2c_client *client)
761 return 0; 747 return 0;
762} 748}
763 749
764static int bd2802_suspend(struct i2c_client *client, pm_message_t mesg) 750#ifdef CONFIG_PM
751
752static void bd2802_restore_state(struct bd2802_led *led)
765{ 753{
754 int i;
755
756 for (i = 0; i < LED_NUM; i++) {
757 if (led->led[i].r)
758 bd2802_turn_on(led, i, RED, led->led[i].r);
759 if (led->led[i].g)
760 bd2802_turn_on(led, i, GREEN, led->led[i].g);
761 if (led->led[i].b)
762 bd2802_turn_on(led, i, BLUE, led->led[i].b);
763 }
764}
765
766static int bd2802_suspend(struct device *dev)
767{
768 struct i2c_client *client = to_i2c_client(dev);
766 struct bd2802_led *led = i2c_get_clientdata(client); 769 struct bd2802_led *led = i2c_get_clientdata(client);
767 770
768 gpio_set_value(led->pdata->reset_gpio, 0); 771 gpio_set_value(led->pdata->reset_gpio, 0);
@@ -770,8 +773,9 @@ static int bd2802_suspend(struct i2c_client *client, pm_message_t mesg)
770 return 0; 773 return 0;
771} 774}
772 775
773static int bd2802_resume(struct i2c_client *client) 776static int bd2802_resume(struct device *dev)
774{ 777{
778 struct i2c_client *client = to_i2c_client(dev);
775 struct bd2802_led *led = i2c_get_clientdata(client); 779 struct bd2802_led *led = i2c_get_clientdata(client);
776 780
777 if (!bd2802_is_all_off(led) || led->adf_on) { 781 if (!bd2802_is_all_off(led) || led->adf_on) {
@@ -782,6 +786,12 @@ static int bd2802_resume(struct i2c_client *client)
782 return 0; 786 return 0;
783} 787}
784 788
789static SIMPLE_DEV_PM_OPS(bd2802_pm, bd2802_suspend, bd2802_resume);
790#define BD2802_PM (&bd2802_pm)
791#else /* CONFIG_PM */
792#define BD2802_PM NULL
793#endif
794
785static const struct i2c_device_id bd2802_id[] = { 795static const struct i2c_device_id bd2802_id[] = {
786 { "BD2802", 0 }, 796 { "BD2802", 0 },
787 { } 797 { }
@@ -791,11 +801,10 @@ MODULE_DEVICE_TABLE(i2c, bd2802_id);
791static struct i2c_driver bd2802_i2c_driver = { 801static struct i2c_driver bd2802_i2c_driver = {
792 .driver = { 802 .driver = {
793 .name = "BD2802", 803 .name = "BD2802",
804 .pm = BD2802_PM,
794 }, 805 },
795 .probe = bd2802_probe, 806 .probe = bd2802_probe,
796 .remove = __exit_p(bd2802_remove), 807 .remove = __exit_p(bd2802_remove),
797 .suspend = bd2802_suspend,
798 .resume = bd2802_resume,
799 .id_table = bd2802_id, 808 .id_table = bd2802_id,
800}; 809};
801 810
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
new file mode 100644
index 000000000000..e7089a1f6cb6
--- /dev/null
+++ b/drivers/leds/leds-lm3530.c
@@ -0,0 +1,378 @@
1/*
2 * Copyright (C) 2011 ST-Ericsson SA.
3 * Copyright (C) 2009 Motorola, Inc.
4 *
5 * License Terms: GNU General Public License v2
6 *
7 * Simple driver for National Semiconductor LM3530 Backlight driver chip
8 *
9 * Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
10 * based on leds-lm3530.c by Dan Murphy <D.Murphy@motorola.com>
11 */
12
13#include <linux/i2c.h>
14#include <linux/leds.h>
15#include <linux/slab.h>
16#include <linux/platform_device.h>
17#include <linux/input.h>
18#include <linux/led-lm3530.h>
19#include <linux/types.h>
20
21#define LM3530_LED_DEV "lcd-backlight"
22#define LM3530_NAME "lm3530-led"
23
24#define LM3530_GEN_CONFIG 0x10
25#define LM3530_ALS_CONFIG 0x20
26#define LM3530_BRT_RAMP_RATE 0x30
27#define LM3530_ALS_ZONE_REG 0x40
28#define LM3530_ALS_IMP_SELECT 0x41
29#define LM3530_BRT_CTRL_REG 0xA0
30#define LM3530_ALS_ZB0_REG 0x60
31#define LM3530_ALS_ZB1_REG 0x61
32#define LM3530_ALS_ZB2_REG 0x62
33#define LM3530_ALS_ZB3_REG 0x63
34#define LM3530_ALS_Z0T_REG 0x70
35#define LM3530_ALS_Z1T_REG 0x71
36#define LM3530_ALS_Z2T_REG 0x72
37#define LM3530_ALS_Z3T_REG 0x73
38#define LM3530_ALS_Z4T_REG 0x74
39#define LM3530_REG_MAX 15
40
41/* General Control Register */
42#define LM3530_EN_I2C_SHIFT (0)
43#define LM3530_RAMP_LAW_SHIFT (1)
44#define LM3530_MAX_CURR_SHIFT (2)
45#define LM3530_EN_PWM_SHIFT (5)
46#define LM3530_PWM_POL_SHIFT (6)
47#define LM3530_EN_PWM_SIMPLE_SHIFT (7)
48
49#define LM3530_ENABLE_I2C (1 << LM3530_EN_I2C_SHIFT)
50#define LM3530_ENABLE_PWM (1 << LM3530_EN_PWM_SHIFT)
51#define LM3530_POL_LOW (1 << LM3530_PWM_POL_SHIFT)
52#define LM3530_ENABLE_PWM_SIMPLE (1 << LM3530_EN_PWM_SIMPLE_SHIFT)
53
54/* ALS Config Register Options */
55#define LM3530_ALS_AVG_TIME_SHIFT (0)
56#define LM3530_EN_ALS_SHIFT (3)
57#define LM3530_ALS_SEL_SHIFT (5)
58
59#define LM3530_ENABLE_ALS (3 << LM3530_EN_ALS_SHIFT)
60
61/* Brightness Ramp Rate Register */
62#define LM3530_BRT_RAMP_FALL_SHIFT (0)
63#define LM3530_BRT_RAMP_RISE_SHIFT (3)
64
65/* ALS Resistor Select */
66#define LM3530_ALS1_IMP_SHIFT (0)
67#define LM3530_ALS2_IMP_SHIFT (4)
68
69/* Zone Boundary Register defaults */
70#define LM3530_DEF_ZB_0 (0x33)
71#define LM3530_DEF_ZB_1 (0x66)
72#define LM3530_DEF_ZB_2 (0x99)
73#define LM3530_DEF_ZB_3 (0xCC)
74
75/* Zone Target Register defaults */
76#define LM3530_DEF_ZT_0 (0x19)
77#define LM3530_DEF_ZT_1 (0x33)
78#define LM3530_DEF_ZT_2 (0x4C)
79#define LM3530_DEF_ZT_3 (0x66)
80#define LM3530_DEF_ZT_4 (0x7F)
81
82struct lm3530_mode_map {
83 const char *mode;
84 enum lm3530_mode mode_val;
85};
86
87static struct lm3530_mode_map mode_map[] = {
88 { "man", LM3530_BL_MODE_MANUAL },
89 { "als", LM3530_BL_MODE_ALS },
90 { "pwm", LM3530_BL_MODE_PWM },
91};
92
93/**
94 * struct lm3530_data
95 * @led_dev: led class device
96 * @client: i2c client
97 * @pdata: LM3530 platform data
98 * @mode: mode of operation - manual, ALS, PWM
99 */
100struct lm3530_data {
101 struct led_classdev led_dev;
102 struct i2c_client *client;
103 struct lm3530_platform_data *pdata;
104 enum lm3530_mode mode;
105};
106
107static const u8 lm3530_reg[LM3530_REG_MAX] = {
108 LM3530_GEN_CONFIG,
109 LM3530_ALS_CONFIG,
110 LM3530_BRT_RAMP_RATE,
111 LM3530_ALS_ZONE_REG,
112 LM3530_ALS_IMP_SELECT,
113 LM3530_BRT_CTRL_REG,
114 LM3530_ALS_ZB0_REG,
115 LM3530_ALS_ZB1_REG,
116 LM3530_ALS_ZB2_REG,
117 LM3530_ALS_ZB3_REG,
118 LM3530_ALS_Z0T_REG,
119 LM3530_ALS_Z1T_REG,
120 LM3530_ALS_Z2T_REG,
121 LM3530_ALS_Z3T_REG,
122 LM3530_ALS_Z4T_REG,
123};
124
125static int lm3530_get_mode_from_str(const char *str)
126{
127 int i;
128
129 for (i = 0; i < ARRAY_SIZE(mode_map); i++)
130 if (sysfs_streq(str, mode_map[i].mode))
131 return mode_map[i].mode_val;
132
133 return -1;
134}
135
136static int lm3530_init_registers(struct lm3530_data *drvdata)
137{
138 int ret = 0;
139 int i;
140 u8 gen_config;
141 u8 als_config = 0;
142 u8 brt_ramp;
143 u8 als_imp_sel = 0;
144 u8 brightness;
145 u8 reg_val[LM3530_REG_MAX];
146 struct lm3530_platform_data *pltfm = drvdata->pdata;
147 struct i2c_client *client = drvdata->client;
148
149 gen_config = (pltfm->brt_ramp_law << LM3530_RAMP_LAW_SHIFT) |
150 ((pltfm->max_current & 7) << LM3530_MAX_CURR_SHIFT);
151
152 if (drvdata->mode == LM3530_BL_MODE_MANUAL ||
153 drvdata->mode == LM3530_BL_MODE_ALS)
154 gen_config |= (LM3530_ENABLE_I2C);
155
156 if (drvdata->mode == LM3530_BL_MODE_ALS) {
157 als_config =
158 (pltfm->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
159 (LM3530_ENABLE_ALS) |
160 (pltfm->als_input_mode << LM3530_ALS_SEL_SHIFT);
161
162 als_imp_sel =
163 (pltfm->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
164 (pltfm->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
165 }
166
167 if (drvdata->mode == LM3530_BL_MODE_PWM)
168 gen_config |= (LM3530_ENABLE_PWM) |
169 (pltfm->pwm_pol_hi << LM3530_PWM_POL_SHIFT) |
170 (LM3530_ENABLE_PWM_SIMPLE);
171
172 brt_ramp = (pltfm->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) |
173 (pltfm->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT);
174
175 brightness = pltfm->brt_val;
176
177 reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */
178 reg_val[1] = als_config; /* LM3530_ALS_CONFIG */
179 reg_val[2] = brt_ramp; /* LM3530_BRT_RAMP_RATE */
180 reg_val[3] = 0x00; /* LM3530_ALS_ZONE_REG */
181 reg_val[4] = als_imp_sel; /* LM3530_ALS_IMP_SELECT */
182 reg_val[5] = brightness; /* LM3530_BRT_CTRL_REG */
183 reg_val[6] = LM3530_DEF_ZB_0; /* LM3530_ALS_ZB0_REG */
184 reg_val[7] = LM3530_DEF_ZB_1; /* LM3530_ALS_ZB1_REG */
185 reg_val[8] = LM3530_DEF_ZB_2; /* LM3530_ALS_ZB2_REG */
186 reg_val[9] = LM3530_DEF_ZB_3; /* LM3530_ALS_ZB3_REG */
187 reg_val[10] = LM3530_DEF_ZT_0; /* LM3530_ALS_Z0T_REG */
188 reg_val[11] = LM3530_DEF_ZT_1; /* LM3530_ALS_Z1T_REG */
189 reg_val[12] = LM3530_DEF_ZT_2; /* LM3530_ALS_Z2T_REG */
190 reg_val[13] = LM3530_DEF_ZT_3; /* LM3530_ALS_Z3T_REG */
191 reg_val[14] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */
192
193 for (i = 0; i < LM3530_REG_MAX; i++) {
194 ret = i2c_smbus_write_byte_data(client,
195 lm3530_reg[i], reg_val[i]);
196 if (ret)
197 break;
198 }
199
200 return ret;
201}
202
203static void lm3530_brightness_set(struct led_classdev *led_cdev,
204 enum led_brightness brt_val)
205{
206 int err;
207 struct lm3530_data *drvdata =
208 container_of(led_cdev, struct lm3530_data, led_dev);
209
210 switch (drvdata->mode) {
211 case LM3530_BL_MODE_MANUAL:
212
213 /* set the brightness in brightness control register*/
214 err = i2c_smbus_write_byte_data(drvdata->client,
215 LM3530_BRT_CTRL_REG, brt_val / 2);
216 if (err)
217 dev_err(&drvdata->client->dev,
218 "Unable to set brightness: %d\n", err);
219 break;
220 case LM3530_BL_MODE_ALS:
221 break;
222 case LM3530_BL_MODE_PWM:
223 break;
224 default:
225 break;
226 }
227}
228
229
230static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute
231 *attr, const char *buf, size_t size)
232{
233 int err;
234 struct i2c_client *client = container_of(
235 dev->parent, struct i2c_client, dev);
236 struct lm3530_data *drvdata = i2c_get_clientdata(client);
237 int mode;
238
239 mode = lm3530_get_mode_from_str(buf);
240 if (mode < 0) {
241 dev_err(dev, "Invalid mode\n");
242 return -EINVAL;
243 }
244
245 if (mode == LM3530_BL_MODE_MANUAL)
246 drvdata->mode = LM3530_BL_MODE_MANUAL;
247 else if (mode == LM3530_BL_MODE_ALS)
248 drvdata->mode = LM3530_BL_MODE_ALS;
249 else if (mode == LM3530_BL_MODE_PWM) {
250 dev_err(dev, "PWM mode not supported\n");
251 return -EINVAL;
252 }
253
254 err = lm3530_init_registers(drvdata);
255 if (err) {
256 dev_err(dev, "Setting %s Mode failed :%d\n", buf, err);
257 return err;
258 }
259
260 return sizeof(drvdata->mode);
261}
262
263static DEVICE_ATTR(mode, 0644, NULL, lm3530_mode_set);
264
265static int __devinit lm3530_probe(struct i2c_client *client,
266 const struct i2c_device_id *id)
267{
268 struct lm3530_platform_data *pdata = client->dev.platform_data;
269 struct lm3530_data *drvdata;
270 int err = 0;
271
272 if (pdata == NULL) {
273 dev_err(&client->dev, "platform data required\n");
274 err = -ENODEV;
275 goto err_out;
276 }
277
278 /* BL mode */
279 if (pdata->mode > LM3530_BL_MODE_PWM) {
280 dev_err(&client->dev, "Illegal Mode request\n");
281 err = -EINVAL;
282 goto err_out;
283 }
284
285 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
286 dev_err(&client->dev, "I2C_FUNC_I2C not supported\n");
287 err = -EIO;
288 goto err_out;
289 }
290
291 drvdata = kzalloc(sizeof(struct lm3530_data), GFP_KERNEL);
292 if (drvdata == NULL) {
293 err = -ENOMEM;
294 goto err_out;
295 }
296
297 drvdata->mode = pdata->mode;
298 drvdata->client = client;
299 drvdata->pdata = pdata;
300 drvdata->led_dev.name = LM3530_LED_DEV;
301 drvdata->led_dev.brightness_set = lm3530_brightness_set;
302
303 i2c_set_clientdata(client, drvdata);
304
305 err = lm3530_init_registers(drvdata);
306 if (err < 0) {
307 dev_err(&client->dev, "Register Init failed: %d\n", err);
308 err = -ENODEV;
309 goto err_reg_init;
310 }
311
312 err = led_classdev_register((struct device *)
313 &client->dev, &drvdata->led_dev);
314 if (err < 0) {
315 dev_err(&client->dev, "Register led class failed: %d\n", err);
316 err = -ENODEV;
317 goto err_class_register;
318 }
319
320 err = device_create_file(drvdata->led_dev.dev, &dev_attr_mode);
321 if (err < 0) {
322 dev_err(&client->dev, "File device creation failed: %d\n", err);
323 err = -ENODEV;
324 goto err_create_file;
325 }
326
327 return 0;
328
329err_create_file:
330 led_classdev_unregister(&drvdata->led_dev);
331err_class_register:
332err_reg_init:
333 kfree(drvdata);
334err_out:
335 return err;
336}
337
338static int __devexit lm3530_remove(struct i2c_client *client)
339{
340 struct lm3530_data *drvdata = i2c_get_clientdata(client);
341
342 device_remove_file(drvdata->led_dev.dev, &dev_attr_mode);
343 led_classdev_unregister(&drvdata->led_dev);
344 kfree(drvdata);
345 return 0;
346}
347
348static const struct i2c_device_id lm3530_id[] = {
349 {LM3530_NAME, 0},
350 {}
351};
352
353static struct i2c_driver lm3530_i2c_driver = {
354 .probe = lm3530_probe,
355 .remove = lm3530_remove,
356 .id_table = lm3530_id,
357 .driver = {
358 .name = LM3530_NAME,
359 .owner = THIS_MODULE,
360 },
361};
362
363static int __init lm3530_init(void)
364{
365 return i2c_add_driver(&lm3530_i2c_driver);
366}
367
368static void __exit lm3530_exit(void)
369{
370 i2c_del_driver(&lm3530_i2c_driver);
371}
372
373module_init(lm3530_init);
374module_exit(lm3530_exit);
375
376MODULE_DESCRIPTION("Back Light driver for LM3530");
377MODULE_LICENSE("GPL v2");
378MODULE_AUTHOR("Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>");
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 80a3ae3c00b9..c0cff64a1ae6 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -534,7 +534,7 @@ static ssize_t lp5521_selftest(struct device *dev,
534} 534}
535 535
536/* led class device attributes */ 536/* led class device attributes */
537static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current); 537static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current);
538static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); 538static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
539 539
540static struct attribute *lp5521_led_attributes[] = { 540static struct attribute *lp5521_led_attributes[] = {
@@ -548,15 +548,15 @@ static struct attribute_group lp5521_led_attribute_group = {
548}; 548};
549 549
550/* device attributes */ 550/* device attributes */
551static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO, 551static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR,
552 show_engine1_mode, store_engine1_mode); 552 show_engine1_mode, store_engine1_mode);
553static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO, 553static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR,
554 show_engine2_mode, store_engine2_mode); 554 show_engine2_mode, store_engine2_mode);
555static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO, 555static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR,
556 show_engine3_mode, store_engine3_mode); 556 show_engine3_mode, store_engine3_mode);
557static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load); 557static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load);
558static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load); 558static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load);
559static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load); 559static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load);
560static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL); 560static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL);
561 561
562static struct attribute *lp5521_attributes[] = { 562static struct attribute *lp5521_attributes[] = {
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index d0c4068ecddd..e19fed25f137 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -713,7 +713,7 @@ static ssize_t store_current(struct device *dev,
713} 713}
714 714
715/* led class device attributes */ 715/* led class device attributes */
716static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current); 716static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, show_current, store_current);
717static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); 717static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
718 718
719static struct attribute *lp5523_led_attributes[] = { 719static struct attribute *lp5523_led_attributes[] = {
@@ -727,21 +727,21 @@ static struct attribute_group lp5523_led_attribute_group = {
727}; 727};
728 728
729/* device attributes */ 729/* device attributes */
730static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO, 730static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUSR,
731 show_engine1_mode, store_engine1_mode); 731 show_engine1_mode, store_engine1_mode);
732static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO, 732static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUSR,
733 show_engine2_mode, store_engine2_mode); 733 show_engine2_mode, store_engine2_mode);
734static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO, 734static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUSR,
735 show_engine3_mode, store_engine3_mode); 735 show_engine3_mode, store_engine3_mode);
736static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO, 736static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUSR,
737 show_engine1_leds, store_engine1_leds); 737 show_engine1_leds, store_engine1_leds);
738static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO, 738static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUSR,
739 show_engine2_leds, store_engine2_leds); 739 show_engine2_leds, store_engine2_leds);
740static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO, 740static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUSR,
741 show_engine3_leds, store_engine3_leds); 741 show_engine3_leds, store_engine3_leds);
742static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load); 742static DEVICE_ATTR(engine1_load, S_IWUSR, NULL, store_engine1_load);
743static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load); 743static DEVICE_ATTR(engine2_load, S_IWUSR, NULL, store_engine2_load);
744static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load); 744static DEVICE_ATTR(engine3_load, S_IWUSR, NULL, store_engine3_load);
745static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL); 745static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL);
746 746
747static struct attribute *lp5523_attributes[] = { 747static struct attribute *lp5523_attributes[] = {
diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c
index 1739557a9038..7e764b8365e6 100644
--- a/drivers/leds/leds-net5501.c
+++ b/drivers/leds/leds-net5501.c
@@ -19,7 +19,7 @@
19 19
20#include <asm/geode.h> 20#include <asm/geode.h>
21 21
22static struct gpio_led net5501_leds[] = { 22static const struct gpio_led net5501_leds[] = {
23 { 23 {
24 .name = "error", 24 .name = "error",
25 .gpio = 6, 25 .gpio = 6,
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index ade1e656bfb2..b1d91170ded0 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -163,6 +163,7 @@ void __init pmu_backlight_init()
163 snprintf(name, sizeof(name), "pmubl"); 163 snprintf(name, sizeof(name), "pmubl");
164 164
165 memset(&props, 0, sizeof(struct backlight_properties)); 165 memset(&props, 0, sizeof(struct backlight_properties));
166 props.type = BACKLIGHT_PLATFORM;
166 props.max_brightness = FB_BACKLIGHT_LEVELS - 1; 167 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
167 bd = backlight_device_register(name, NULL, NULL, &pmu_backlight_data, 168 bd = backlight_device_register(name, NULL, NULL, &pmu_backlight_data,
168 &props); 169 &props);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 203500d9b848..4e007c6a4b44 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -402,6 +402,16 @@ config DS1682
402 This driver can also be built as a module. If so, the module 402 This driver can also be built as a module. If so, the module
403 will be called ds1682. 403 will be called ds1682.
404 404
405config SPEAR13XX_PCIE_GADGET
406 bool "PCIe gadget support for SPEAr13XX platform"
407 depends on ARCH_SPEAR13XX
408 default n
409 help
410 This option enables gadget support for PCIe controller. If
411 board file defines any controller as PCIe endpoint then a sysfs
412 entry will be created for that controller. User can use these
413 sysfs node to configure PCIe EP as per his requirements.
414
405config TI_DAC7512 415config TI_DAC7512
406 tristate "Texas Instruments DAC7512" 416 tristate "Texas Instruments DAC7512"
407 depends on SPI && SYSFS 417 depends on SPI && SYSFS
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 804f421bc079..f5468602961f 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
37obj-$(CONFIG_HMC6352) += hmc6352.o 37obj-$(CONFIG_HMC6352) += hmc6352.o
38obj-y += eeprom/ 38obj-y += eeprom/
39obj-y += cb710/ 39obj-y += cb710/
40obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
40obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o 41obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
41obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o 42obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
42obj-$(CONFIG_PCH_PHUB) += pch_phub.o 43obj-$(CONFIG_PCH_PHUB) += pch_phub.o
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index 644d4cd071cc..81db7811cf68 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -245,9 +245,8 @@ static int apds9802als_probe(struct i2c_client *client,
245 als_set_default_config(client); 245 als_set_default_config(client);
246 mutex_init(&data->mutex); 246 mutex_init(&data->mutex);
247 247
248 pm_runtime_set_active(&client->dev);
248 pm_runtime_enable(&client->dev); 249 pm_runtime_enable(&client->dev);
249 pm_runtime_get(&client->dev);
250 pm_runtime_put(&client->dev);
251 250
252 return res; 251 return res;
253als_error1: 252als_error1:
@@ -255,12 +254,19 @@ als_error1:
255 return res; 254 return res;
256} 255}
257 256
258static int apds9802als_remove(struct i2c_client *client) 257static int __devexit apds9802als_remove(struct i2c_client *client)
259{ 258{
260 struct als_data *data = i2c_get_clientdata(client); 259 struct als_data *data = i2c_get_clientdata(client);
261 260
261 pm_runtime_get_sync(&client->dev);
262
262 als_set_power_state(client, false); 263 als_set_power_state(client, false);
263 sysfs_remove_group(&client->dev.kobj, &m_als_gr); 264 sysfs_remove_group(&client->dev.kobj, &m_als_gr);
265
266 pm_runtime_disable(&client->dev);
267 pm_runtime_set_suspended(&client->dev);
268 pm_runtime_put_noidle(&client->dev);
269
264 kfree(data); 270 kfree(data);
265 return 0; 271 return 0;
266} 272}
@@ -275,9 +281,6 @@ static int apds9802als_suspend(struct i2c_client *client, pm_message_t mesg)
275static int apds9802als_resume(struct i2c_client *client) 281static int apds9802als_resume(struct i2c_client *client)
276{ 282{
277 als_set_default_config(client); 283 als_set_default_config(client);
278
279 pm_runtime_get(&client->dev);
280 pm_runtime_put(&client->dev);
281 return 0; 284 return 0;
282} 285}
283 286
@@ -323,7 +326,7 @@ static struct i2c_driver apds9802als_driver = {
323 .pm = APDS9802ALS_PM_OPS, 326 .pm = APDS9802ALS_PM_OPS,
324 }, 327 },
325 .probe = apds9802als_probe, 328 .probe = apds9802als_probe,
326 .remove = apds9802als_remove, 329 .remove = __devexit_p(apds9802als_remove),
327 .suspend = apds9802als_suspend, 330 .suspend = apds9802als_suspend,
328 .resume = apds9802als_resume, 331 .resume = apds9802als_resume,
329 .id_table = apds9802als_id, 332 .id_table = apds9802als_id,
diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c
index 3891124001f2..a844810b50f6 100644
--- a/drivers/misc/atmel_tclib.c
+++ b/drivers/misc/atmel_tclib.c
@@ -75,7 +75,7 @@ out:
75 return tc; 75 return tc;
76 76
77fail_ioremap: 77fail_ioremap:
78 release_resource(r); 78 release_mem_region(r->start, ATMEL_TC_IOMEM_SIZE);
79fail: 79fail:
80 tc = NULL; 80 tc = NULL;
81 goto out; 81 goto out;
@@ -95,7 +95,7 @@ void atmel_tc_free(struct atmel_tc *tc)
95 spin_lock(&tc_list_lock); 95 spin_lock(&tc_list_lock);
96 if (tc->regs) { 96 if (tc->regs) {
97 iounmap(tc->regs); 97 iounmap(tc->regs);
98 release_resource(tc->iomem); 98 release_mem_region(tc->iomem->start, ATMEL_TC_IOMEM_SIZE);
99 tc->regs = NULL; 99 tc->regs = NULL;
100 tc->iomem = NULL; 100 tc->iomem = NULL;
101 } 101 }
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
index d5f3a3fd2319..d07cd67c951c 100644
--- a/drivers/misc/bh1780gli.c
+++ b/drivers/misc/bh1780gli.c
@@ -196,10 +196,11 @@ static int __devexit bh1780_remove(struct i2c_client *client)
196} 196}
197 197
198#ifdef CONFIG_PM 198#ifdef CONFIG_PM
199static int bh1780_suspend(struct i2c_client *client, pm_message_t mesg) 199static int bh1780_suspend(struct device *dev)
200{ 200{
201 struct bh1780_data *ddata; 201 struct bh1780_data *ddata;
202 int state, ret; 202 int state, ret;
203 struct i2c_client *client = to_i2c_client(dev);
203 204
204 ddata = i2c_get_clientdata(client); 205 ddata = i2c_get_clientdata(client);
205 state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL"); 206 state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
@@ -217,14 +218,14 @@ static int bh1780_suspend(struct i2c_client *client, pm_message_t mesg)
217 return 0; 218 return 0;
218} 219}
219 220
220static int bh1780_resume(struct i2c_client *client) 221static int bh1780_resume(struct device *dev)
221{ 222{
222 struct bh1780_data *ddata; 223 struct bh1780_data *ddata;
223 int state, ret; 224 int state, ret;
225 struct i2c_client *client = to_i2c_client(dev);
224 226
225 ddata = i2c_get_clientdata(client); 227 ddata = i2c_get_clientdata(client);
226 state = ddata->power_state; 228 state = ddata->power_state;
227
228 ret = bh1780_write(ddata, BH1780_REG_CONTROL, state, 229 ret = bh1780_write(ddata, BH1780_REG_CONTROL, state,
229 "CONTROL"); 230 "CONTROL");
230 231
@@ -233,9 +234,10 @@ static int bh1780_resume(struct i2c_client *client)
233 234
234 return 0; 235 return 0;
235} 236}
237static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume);
238#define BH1780_PMOPS (&bh1780_pm)
236#else 239#else
237#define bh1780_suspend NULL 240#define BH1780_PMOPS NULL
238#define bh1780_resume NULL
239#endif /* CONFIG_PM */ 241#endif /* CONFIG_PM */
240 242
241static const struct i2c_device_id bh1780_id[] = { 243static const struct i2c_device_id bh1780_id[] = {
@@ -247,11 +249,10 @@ static struct i2c_driver bh1780_driver = {
247 .probe = bh1780_probe, 249 .probe = bh1780_probe,
248 .remove = bh1780_remove, 250 .remove = bh1780_remove,
249 .id_table = bh1780_id, 251 .id_table = bh1780_id,
250 .suspend = bh1780_suspend,
251 .resume = bh1780_resume,
252 .driver = { 252 .driver = {
253 .name = "bh1780" 253 .name = "bh1780",
254 }, 254 .pm = BH1780_PMOPS,
255},
255}; 256};
256 257
257static int __init bh1780_init(void) 258static int __init bh1780_init(void)
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index b6e1c9a6679e..ecd276ad6b19 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -402,7 +402,7 @@ exit:
402 return status; 402 return status;
403} 403}
404 404
405static int bmp085_probe(struct i2c_client *client, 405static int __devinit bmp085_probe(struct i2c_client *client,
406 const struct i2c_device_id *id) 406 const struct i2c_device_id *id)
407{ 407{
408 struct bmp085_data *data; 408 struct bmp085_data *data;
@@ -438,7 +438,7 @@ exit:
438 return err; 438 return err;
439} 439}
440 440
441static int bmp085_remove(struct i2c_client *client) 441static int __devexit bmp085_remove(struct i2c_client *client)
442{ 442{
443 sysfs_remove_group(&client->dev.kobj, &bmp085_attr_group); 443 sysfs_remove_group(&client->dev.kobj, &bmp085_attr_group);
444 kfree(i2c_get_clientdata(client)); 444 kfree(i2c_get_clientdata(client));
@@ -458,7 +458,7 @@ static struct i2c_driver bmp085_driver = {
458 }, 458 },
459 .id_table = bmp085_id, 459 .id_table = bmp085_id,
460 .probe = bmp085_probe, 460 .probe = bmp085_probe,
461 .remove = bmp085_remove, 461 .remove = __devexit_p(bmp085_remove),
462 462
463 .detect = bmp085_detect, 463 .detect = bmp085_detect,
464 .address_list = normal_i2c 464 .address_list = normal_i2c
diff --git a/drivers/misc/ep93xx_pwm.c b/drivers/misc/ep93xx_pwm.c
index 46b3439673e9..16d7179e2f9b 100644
--- a/drivers/misc/ep93xx_pwm.c
+++ b/drivers/misc/ep93xx_pwm.c
@@ -249,11 +249,11 @@ static ssize_t ep93xx_pwm_set_invert(struct device *dev,
249 249
250static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL); 250static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL);
251static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL); 251static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL);
252static DEVICE_ATTR(freq, S_IWUGO | S_IRUGO, 252static DEVICE_ATTR(freq, S_IWUSR | S_IRUGO,
253 ep93xx_pwm_get_freq, ep93xx_pwm_set_freq); 253 ep93xx_pwm_get_freq, ep93xx_pwm_set_freq);
254static DEVICE_ATTR(duty_percent, S_IWUGO | S_IRUGO, 254static DEVICE_ATTR(duty_percent, S_IWUSR | S_IRUGO,
255 ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent); 255 ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent);
256static DEVICE_ATTR(invert, S_IWUGO | S_IRUGO, 256static DEVICE_ATTR(invert, S_IWUSR | S_IRUGO,
257 ep93xx_pwm_get_invert, ep93xx_pwm_set_invert); 257 ep93xx_pwm_get_invert, ep93xx_pwm_set_invert);
258 258
259static struct attribute *ep93xx_pwm_attrs[] = { 259static struct attribute *ep93xx_pwm_attrs[] = {
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index 234bfcaf2099..ca938fc8a8d6 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -75,7 +75,7 @@ static ssize_t compass_heading_data_show(struct device *dev,
75{ 75{
76 struct i2c_client *client = to_i2c_client(dev); 76 struct i2c_client *client = to_i2c_client(dev);
77 unsigned char i2c_data[2]; 77 unsigned char i2c_data[2];
78 unsigned int ret; 78 int ret;
79 79
80 mutex_lock(&compass_mutex); 80 mutex_lock(&compass_mutex);
81 ret = compass_command(client, 'A'); 81 ret = compass_command(client, 'A');
@@ -86,7 +86,7 @@ static ssize_t compass_heading_data_show(struct device *dev,
86 msleep(10); /* sending 'A' cmd we need to wait for 7-10 millisecs */ 86 msleep(10); /* sending 'A' cmd we need to wait for 7-10 millisecs */
87 ret = i2c_master_recv(client, i2c_data, 2); 87 ret = i2c_master_recv(client, i2c_data, 2);
88 mutex_unlock(&compass_mutex); 88 mutex_unlock(&compass_mutex);
89 if (ret != 1) { 89 if (ret < 0) {
90 dev_warn(dev, "i2c read data cmd failed\n"); 90 dev_warn(dev, "i2c read data cmd failed\n");
91 return ret; 91 return ret;
92 } 92 }
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 380ba806495d..a19cb710a246 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -735,6 +735,7 @@ static struct pci_device_id pch_phub_pcidev_id[] = {
735 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, }, 735 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, },
736 { } 736 { }
737}; 737};
738MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id);
738 739
739static struct pci_driver pch_phub_driver = { 740static struct pci_driver pch_phub_driver = {
740 .name = "pch_phub", 741 .name = "pch_phub",
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
new file mode 100644
index 000000000000..ec3b8c911833
--- /dev/null
+++ b/drivers/misc/spear13xx_pcie_gadget.c
@@ -0,0 +1,908 @@
1/*
2 * drivers/misc/spear13xx_pcie_gadget.c
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Pratyush Anand<pratyush.anand@st.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/clk.h>
13#include <linux/slab.h>
14#include <linux/delay.h>
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/irq.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/pci_regs.h>
22#include <linux/configfs.h>
23#include <mach/pcie.h>
24#include <mach/misc_regs.h>
25
26#define IN0_MEM_SIZE (200 * 1024 * 1024 - 1)
27/* In current implementation address translation is done using IN0 only.
28 * So IN1 start address and IN0 end address has been kept same
29*/
30#define IN1_MEM_SIZE (0 * 1024 * 1024 - 1)
31#define IN_IO_SIZE (20 * 1024 * 1024 - 1)
32#define IN_CFG0_SIZE (12 * 1024 * 1024 - 1)
33#define IN_CFG1_SIZE (12 * 1024 * 1024 - 1)
34#define IN_MSG_SIZE (12 * 1024 * 1024 - 1)
35/* Keep default BAR size as 4K*/
36/* AORAM would be mapped by default*/
37#define INBOUND_ADDR_MASK (SPEAR13XX_SYSRAM1_SIZE - 1)
38
39#define INT_TYPE_NO_INT 0
40#define INT_TYPE_INTX 1
41#define INT_TYPE_MSI 2
42struct spear_pcie_gadget_config {
43 void __iomem *base;
44 void __iomem *va_app_base;
45 void __iomem *va_dbi_base;
46 char int_type[10];
47 ulong requested_msi;
48 ulong configured_msi;
49 ulong bar0_size;
50 ulong bar0_rw_offset;
51 void __iomem *va_bar0_address;
52};
53
54struct pcie_gadget_target {
55 struct configfs_subsystem subsys;
56 struct spear_pcie_gadget_config config;
57};
58
59struct pcie_gadget_target_attr {
60 struct configfs_attribute attr;
61 ssize_t (*show)(struct spear_pcie_gadget_config *config,
62 char *buf);
63 ssize_t (*store)(struct spear_pcie_gadget_config *config,
64 const char *buf,
65 size_t count);
66};
67
68static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg)
69{
70 /* Enable DBI access */
71 writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID),
72 &app_reg->slv_armisc);
73 writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID),
74 &app_reg->slv_awmisc);
75
76}
77
78static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg)
79{
80 /* disable DBI access */
81 writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
82 &app_reg->slv_armisc);
83 writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID),
84 &app_reg->slv_awmisc);
85
86}
87
88static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config,
89 int where, int size, u32 *val)
90{
91 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
92 ulong va_address;
93
94 /* Enable DBI access */
95 enable_dbi_access(app_reg);
96
97 va_address = (ulong)config->va_dbi_base + (where & ~0x3);
98
99 *val = readl(va_address);
100
101 if (size == 1)
102 *val = (*val >> (8 * (where & 3))) & 0xff;
103 else if (size == 2)
104 *val = (*val >> (8 * (where & 3))) & 0xffff;
105
106 /* Disable DBI access */
107 disable_dbi_access(app_reg);
108}
109
110static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config,
111 int where, int size, u32 val)
112{
113 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
114 ulong va_address;
115
116 /* Enable DBI access */
117 enable_dbi_access(app_reg);
118
119 va_address = (ulong)config->va_dbi_base + (where & ~0x3);
120
121 if (size == 4)
122 writel(val, va_address);
123 else if (size == 2)
124 writew(val, va_address + (where & 2));
125 else if (size == 1)
126 writeb(val, va_address + (where & 3));
127
128 /* Disable DBI access */
129 disable_dbi_access(app_reg);
130}
131
132#define PCI_FIND_CAP_TTL 48
133
134static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config,
135 u32 pos, int cap, int *ttl)
136{
137 u32 id;
138
139 while ((*ttl)--) {
140 spear_dbi_read_reg(config, pos, 1, &pos);
141 if (pos < 0x40)
142 break;
143 pos &= ~3;
144 spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id);
145 if (id == 0xff)
146 break;
147 if (id == cap)
148 return pos;
149 pos += PCI_CAP_LIST_NEXT;
150 }
151 return 0;
152}
153
154static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config,
155 u32 pos, int cap)
156{
157 int ttl = PCI_FIND_CAP_TTL;
158
159 return pci_find_own_next_cap_ttl(config, pos, cap, &ttl);
160}
161
162static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config,
163 u8 hdr_type)
164{
165 u32 status;
166
167 spear_dbi_read_reg(config, PCI_STATUS, 2, &status);
168 if (!(status & PCI_STATUS_CAP_LIST))
169 return 0;
170
171 switch (hdr_type) {
172 case PCI_HEADER_TYPE_NORMAL:
173 case PCI_HEADER_TYPE_BRIDGE:
174 return PCI_CAPABILITY_LIST;
175 case PCI_HEADER_TYPE_CARDBUS:
176 return PCI_CB_CAPABILITY_LIST;
177 default:
178 return 0;
179 }
180
181 return 0;
182}
183
184/*
185 * Tell if a device supports a given PCI capability.
186 * Returns the address of the requested capability structure within the
187 * device's PCI configuration space or 0 in case the device does not
188 * support it. Possible values for @cap:
189 *
190 * %PCI_CAP_ID_PM Power Management
191 * %PCI_CAP_ID_AGP Accelerated Graphics Port
192 * %PCI_CAP_ID_VPD Vital Product Data
193 * %PCI_CAP_ID_SLOTID Slot Identification
194 * %PCI_CAP_ID_MSI Message Signalled Interrupts
195 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
196 * %PCI_CAP_ID_PCIX PCI-X
197 * %PCI_CAP_ID_EXP PCI Express
198 */
199static int pci_find_own_capability(struct spear_pcie_gadget_config *config,
200 int cap)
201{
202 u32 pos;
203 u32 hdr_type;
204
205 spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type);
206
207 pos = pci_find_own_cap_start(config, hdr_type);
208 if (pos)
209 pos = pci_find_own_next_cap(config, pos, cap);
210
211 return pos;
212}
213
214static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id)
215{
216 return 0;
217}
218
219/*
220 * configfs interfaces show/store functions
221 */
222static ssize_t pcie_gadget_show_link(
223 struct spear_pcie_gadget_config *config,
224 char *buf)
225{
226 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
227
228 if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID))
229 return sprintf(buf, "UP");
230 else
231 return sprintf(buf, "DOWN");
232}
233
234static ssize_t pcie_gadget_store_link(
235 struct spear_pcie_gadget_config *config,
236 const char *buf, size_t count)
237{
238 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
239
240 if (sysfs_streq(buf, "UP"))
241 writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID),
242 &app_reg->app_ctrl_0);
243 else if (sysfs_streq(buf, "DOWN"))
244 writel(readl(&app_reg->app_ctrl_0)
245 & ~(1 << APP_LTSSM_ENABLE_ID),
246 &app_reg->app_ctrl_0);
247 else
248 return -EINVAL;
249 return count;
250}
251
252static ssize_t pcie_gadget_show_int_type(
253 struct spear_pcie_gadget_config *config,
254 char *buf)
255{
256 return sprintf(buf, "%s", config->int_type);
257}
258
259static ssize_t pcie_gadget_store_int_type(
260 struct spear_pcie_gadget_config *config,
261 const char *buf, size_t count)
262{
263 u32 cap, vec, flags;
264 ulong vector;
265
266 if (sysfs_streq(buf, "INTA"))
267 spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
268
269 else if (sysfs_streq(buf, "MSI")) {
270 vector = config->requested_msi;
271 vec = 0;
272 while (vector > 1) {
273 vector /= 2;
274 vec++;
275 }
276 spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0);
277 cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
278 spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
279 flags &= ~PCI_MSI_FLAGS_QMASK;
280 flags |= vec << 1;
281 spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags);
282 } else
283 return -EINVAL;
284
285 strcpy(config->int_type, buf);
286
287 return count;
288}
289
290static ssize_t pcie_gadget_show_no_of_msi(
291 struct spear_pcie_gadget_config *config,
292 char *buf)
293{
294 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
295 u32 cap, vec, flags;
296 ulong vector;
297
298 if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID))
299 != (1 << CFG_MSI_EN_ID))
300 vector = 0;
301 else {
302 cap = pci_find_own_capability(config, PCI_CAP_ID_MSI);
303 spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags);
304 flags &= ~PCI_MSI_FLAGS_QSIZE;
305 vec = flags >> 4;
306 vector = 1;
307 while (vec--)
308 vector *= 2;
309 }
310 config->configured_msi = vector;
311
312 return sprintf(buf, "%lu", vector);
313}
314
315static ssize_t pcie_gadget_store_no_of_msi(
316 struct spear_pcie_gadget_config *config,
317 const char *buf, size_t count)
318{
319 if (strict_strtoul(buf, 0, &config->requested_msi))
320 return -EINVAL;
321 if (config->requested_msi > 32)
322 config->requested_msi = 32;
323
324 return count;
325}
326
327static ssize_t pcie_gadget_store_inta(
328 struct spear_pcie_gadget_config *config,
329 const char *buf, size_t count)
330{
331 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
332 ulong en;
333
334 if (strict_strtoul(buf, 0, &en))
335 return -EINVAL;
336
337 if (en)
338 writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID),
339 &app_reg->app_ctrl_0);
340 else
341 writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID),
342 &app_reg->app_ctrl_0);
343
344 return count;
345}
346
347static ssize_t pcie_gadget_store_send_msi(
348 struct spear_pcie_gadget_config *config,
349 const char *buf, size_t count)
350{
351 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
352 ulong vector;
353 u32 ven_msi;
354
355 if (strict_strtoul(buf, 0, &vector))
356 return -EINVAL;
357
358 if (!config->configured_msi)
359 return -EINVAL;
360
361 if (vector >= config->configured_msi)
362 return -EINVAL;
363
364 ven_msi = readl(&app_reg->ven_msi_1);
365 ven_msi &= ~VEN_MSI_FUN_NUM_MASK;
366 ven_msi |= 0 << VEN_MSI_FUN_NUM_ID;
367 ven_msi &= ~VEN_MSI_TC_MASK;
368 ven_msi |= 0 << VEN_MSI_TC_ID;
369 ven_msi &= ~VEN_MSI_VECTOR_MASK;
370 ven_msi |= vector << VEN_MSI_VECTOR_ID;
371
372 /* generating interrupt for msi vector */
373 ven_msi |= VEN_MSI_REQ_EN;
374 writel(ven_msi, &app_reg->ven_msi_1);
375 udelay(1);
376 ven_msi &= ~VEN_MSI_REQ_EN;
377 writel(ven_msi, &app_reg->ven_msi_1);
378
379 return count;
380}
381
382static ssize_t pcie_gadget_show_vendor_id(
383 struct spear_pcie_gadget_config *config,
384 char *buf)
385{
386 u32 id;
387
388 spear_dbi_read_reg(config, PCI_VENDOR_ID, 2, &id);
389
390 return sprintf(buf, "%x", id);
391}
392
393static ssize_t pcie_gadget_store_vendor_id(
394 struct spear_pcie_gadget_config *config,
395 const char *buf, size_t count)
396{
397 ulong id;
398
399 if (strict_strtoul(buf, 0, &id))
400 return -EINVAL;
401
402 spear_dbi_write_reg(config, PCI_VENDOR_ID, 2, id);
403
404 return count;
405}
406
407static ssize_t pcie_gadget_show_device_id(
408 struct spear_pcie_gadget_config *config,
409 char *buf)
410{
411 u32 id;
412
413 spear_dbi_read_reg(config, PCI_DEVICE_ID, 2, &id);
414
415 return sprintf(buf, "%x", id);
416}
417
418static ssize_t pcie_gadget_store_device_id(
419 struct spear_pcie_gadget_config *config,
420 const char *buf, size_t count)
421{
422 ulong id;
423
424 if (strict_strtoul(buf, 0, &id))
425 return -EINVAL;
426
427 spear_dbi_write_reg(config, PCI_DEVICE_ID, 2, id);
428
429 return count;
430}
431
432static ssize_t pcie_gadget_show_bar0_size(
433 struct spear_pcie_gadget_config *config,
434 char *buf)
435{
436 return sprintf(buf, "%lx", config->bar0_size);
437}
438
439static ssize_t pcie_gadget_store_bar0_size(
440 struct spear_pcie_gadget_config *config,
441 const char *buf, size_t count)
442{
443 ulong size;
444 u32 pos, pos1;
445 u32 no_of_bit = 0;
446
447 if (strict_strtoul(buf, 0, &size))
448 return -EINVAL;
449 /* min bar size is 256 */
450 if (size <= 0x100)
451 size = 0x100;
452 /* max bar size is 1MB*/
453 else if (size >= 0x100000)
454 size = 0x100000;
455 else {
456 pos = 0;
457 pos1 = 0;
458 while (pos < 21) {
459 pos = find_next_bit((ulong *)&size, 21, pos);
460 if (pos != 21)
461 pos1 = pos + 1;
462 pos++;
463 no_of_bit++;
464 }
465 if (no_of_bit == 2)
466 pos1--;
467
468 size = 1 << pos1;
469 }
470 config->bar0_size = size;
471 spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1);
472
473 return count;
474}
475
476static ssize_t pcie_gadget_show_bar0_address(
477 struct spear_pcie_gadget_config *config,
478 char *buf)
479{
480 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
481
482 u32 address = readl(&app_reg->pim0_mem_addr_start);
483
484 return sprintf(buf, "%x", address);
485}
486
487static ssize_t pcie_gadget_store_bar0_address(
488 struct spear_pcie_gadget_config *config,
489 const char *buf, size_t count)
490{
491 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
492 ulong address;
493
494 if (strict_strtoul(buf, 0, &address))
495 return -EINVAL;
496
497 address &= ~(config->bar0_size - 1);
498 if (config->va_bar0_address)
499 iounmap(config->va_bar0_address);
500 config->va_bar0_address = ioremap(address, config->bar0_size);
501 if (!config->va_bar0_address)
502 return -ENOMEM;
503
504 writel(address, &app_reg->pim0_mem_addr_start);
505
506 return count;
507}
508
509static ssize_t pcie_gadget_show_bar0_rw_offset(
510 struct spear_pcie_gadget_config *config,
511 char *buf)
512{
513 return sprintf(buf, "%lx", config->bar0_rw_offset);
514}
515
516static ssize_t pcie_gadget_store_bar0_rw_offset(
517 struct spear_pcie_gadget_config *config,
518 const char *buf, size_t count)
519{
520 ulong offset;
521
522 if (strict_strtoul(buf, 0, &offset))
523 return -EINVAL;
524
525 if (offset % 4)
526 return -EINVAL;
527
528 config->bar0_rw_offset = offset;
529
530 return count;
531}
532
533static ssize_t pcie_gadget_show_bar0_data(
534 struct spear_pcie_gadget_config *config,
535 char *buf)
536{
537 ulong data;
538
539 if (!config->va_bar0_address)
540 return -ENOMEM;
541
542 data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset);
543
544 return sprintf(buf, "%lx", data);
545}
546
547static ssize_t pcie_gadget_store_bar0_data(
548 struct spear_pcie_gadget_config *config,
549 const char *buf, size_t count)
550{
551 ulong data;
552
553 if (strict_strtoul(buf, 0, &data))
554 return -EINVAL;
555
556 if (!config->va_bar0_address)
557 return -ENOMEM;
558
559 writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset);
560
561 return count;
562}
563
564/*
565 * Attribute definitions.
566 */
567
568#define PCIE_GADGET_TARGET_ATTR_RO(_name) \
569static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \
570 __CONFIGFS_ATTR(_name, S_IRUGO, pcie_gadget_show_##_name, NULL)
571
572#define PCIE_GADGET_TARGET_ATTR_WO(_name) \
573static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \
574 __CONFIGFS_ATTR(_name, S_IWUSR, NULL, pcie_gadget_store_##_name)
575
576#define PCIE_GADGET_TARGET_ATTR_RW(_name) \
577static struct pcie_gadget_target_attr pcie_gadget_target_##_name = \
578 __CONFIGFS_ATTR(_name, S_IRUGO | S_IWUSR, pcie_gadget_show_##_name, \
579 pcie_gadget_store_##_name)
580PCIE_GADGET_TARGET_ATTR_RW(link);
581PCIE_GADGET_TARGET_ATTR_RW(int_type);
582PCIE_GADGET_TARGET_ATTR_RW(no_of_msi);
583PCIE_GADGET_TARGET_ATTR_WO(inta);
584PCIE_GADGET_TARGET_ATTR_WO(send_msi);
585PCIE_GADGET_TARGET_ATTR_RW(vendor_id);
586PCIE_GADGET_TARGET_ATTR_RW(device_id);
587PCIE_GADGET_TARGET_ATTR_RW(bar0_size);
588PCIE_GADGET_TARGET_ATTR_RW(bar0_address);
589PCIE_GADGET_TARGET_ATTR_RW(bar0_rw_offset);
590PCIE_GADGET_TARGET_ATTR_RW(bar0_data);
591
592static struct configfs_attribute *pcie_gadget_target_attrs[] = {
593 &pcie_gadget_target_link.attr,
594 &pcie_gadget_target_int_type.attr,
595 &pcie_gadget_target_no_of_msi.attr,
596 &pcie_gadget_target_inta.attr,
597 &pcie_gadget_target_send_msi.attr,
598 &pcie_gadget_target_vendor_id.attr,
599 &pcie_gadget_target_device_id.attr,
600 &pcie_gadget_target_bar0_size.attr,
601 &pcie_gadget_target_bar0_address.attr,
602 &pcie_gadget_target_bar0_rw_offset.attr,
603 &pcie_gadget_target_bar0_data.attr,
604 NULL,
605};
606
607static struct pcie_gadget_target *to_target(struct config_item *item)
608{
609 return item ?
610 container_of(to_configfs_subsystem(to_config_group(item)),
611 struct pcie_gadget_target, subsys) : NULL;
612}
613
614/*
615 * Item operations and type for pcie_gadget_target.
616 */
617
618static ssize_t pcie_gadget_target_attr_show(struct config_item *item,
619 struct configfs_attribute *attr,
620 char *buf)
621{
622 ssize_t ret = -EINVAL;
623 struct pcie_gadget_target *target = to_target(item);
624 struct pcie_gadget_target_attr *t_attr =
625 container_of(attr, struct pcie_gadget_target_attr, attr);
626
627 if (t_attr->show)
628 ret = t_attr->show(&target->config, buf);
629 return ret;
630}
631
632static ssize_t pcie_gadget_target_attr_store(struct config_item *item,
633 struct configfs_attribute *attr,
634 const char *buf,
635 size_t count)
636{
637 ssize_t ret = -EINVAL;
638 struct pcie_gadget_target *target = to_target(item);
639 struct pcie_gadget_target_attr *t_attr =
640 container_of(attr, struct pcie_gadget_target_attr, attr);
641
642 if (t_attr->store)
643 ret = t_attr->store(&target->config, buf, count);
644 return ret;
645}
646
647static struct configfs_item_operations pcie_gadget_target_item_ops = {
648 .show_attribute = pcie_gadget_target_attr_show,
649 .store_attribute = pcie_gadget_target_attr_store,
650};
651
652static struct config_item_type pcie_gadget_target_type = {
653 .ct_attrs = pcie_gadget_target_attrs,
654 .ct_item_ops = &pcie_gadget_target_item_ops,
655 .ct_owner = THIS_MODULE,
656};
657
658static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config)
659{
660 struct pcie_app_reg __iomem *app_reg = config->va_app_base;
661
662 /*setup registers for outbound translation */
663
664 writel(config->base, &app_reg->in0_mem_addr_start);
665 writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE,
666 &app_reg->in0_mem_addr_limit);
667 writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start);
668 writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE,
669 &app_reg->in1_mem_addr_limit);
670 writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start);
671 writel(app_reg->in_io_addr_start + IN_IO_SIZE,
672 &app_reg->in_io_addr_limit);
673 writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start);
674 writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE,
675 &app_reg->in_cfg0_addr_limit);
676 writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start);
677 writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE,
678 &app_reg->in_cfg1_addr_limit);
679 writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start);
680 writel(app_reg->in_msg_addr_start + IN_MSG_SIZE,
681 &app_reg->in_msg_addr_limit);
682
683 writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start);
684 writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start);
685 writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start);
686
687 /*setup registers for inbound translation */
688
689 /* Keep AORAM mapped at BAR0 as default */
690 config->bar0_size = INBOUND_ADDR_MASK + 1;
691 spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK);
692 spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC);
693 config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE,
694 config->bar0_size);
695
696 writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start);
697 writel(0, &app_reg->pim1_mem_addr_start);
698 writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit);
699
700 writel(0x0, &app_reg->pim_io_addr_start);
701 writel(0x0, &app_reg->pim_io_addr_start);
702 writel(0x0, &app_reg->pim_rom_addr_start);
703
704 writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID)
705 | ((u32)1 << REG_TRANSLATION_ENABLE),
706 &app_reg->app_ctrl_0);
707 /* disable all rx interrupts */
708 writel(0, &app_reg->int_mask);
709
710 /* Select INTA as default*/
711 spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1);
712}
713
714static int __devinit spear_pcie_gadget_probe(struct platform_device *pdev)
715{
716 struct resource *res0, *res1;
717 unsigned int status = 0;
718 int irq;
719 struct clk *clk;
720 static struct pcie_gadget_target *target;
721 struct spear_pcie_gadget_config *config;
722 struct config_item *cg_item;
723 struct configfs_subsystem *subsys;
724
725 /* get resource for application registers*/
726
727 res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
728 if (!res0) {
729 dev_err(&pdev->dev, "no resource defined\n");
730 return -EBUSY;
731 }
732 if (!request_mem_region(res0->start, resource_size(res0),
733 pdev->name)) {
734 dev_err(&pdev->dev, "pcie gadget region already claimed\n");
735 return -EBUSY;
736 }
737 /* get resource for dbi registers*/
738
739 res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
740 if (!res1) {
741 dev_err(&pdev->dev, "no resource defined\n");
742 goto err_rel_res0;
743 }
744 if (!request_mem_region(res1->start, resource_size(res1),
745 pdev->name)) {
746 dev_err(&pdev->dev, "pcie gadget region already claimed\n");
747 goto err_rel_res0;
748 }
749
750 target = kzalloc(sizeof(*target), GFP_KERNEL);
751 if (!target) {
752 dev_err(&pdev->dev, "out of memory\n");
753 status = -ENOMEM;
754 goto err_rel_res;
755 }
756
757 cg_item = &target->subsys.su_group.cg_item;
758 sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id);
759 cg_item->ci_type = &pcie_gadget_target_type;
760 config = &target->config;
761 config->va_app_base = (void __iomem *)ioremap(res0->start,
762 resource_size(res0));
763 if (!config->va_app_base) {
764 dev_err(&pdev->dev, "ioremap fail\n");
765 status = -ENOMEM;
766 goto err_kzalloc;
767 }
768
769 config->base = (void __iomem *)res1->start;
770
771 config->va_dbi_base = (void __iomem *)ioremap(res1->start,
772 resource_size(res1));
773 if (!config->va_dbi_base) {
774 dev_err(&pdev->dev, "ioremap fail\n");
775 status = -ENOMEM;
776 goto err_iounmap_app;
777 }
778
779 dev_set_drvdata(&pdev->dev, target);
780
781 irq = platform_get_irq(pdev, 0);
782 if (irq < 0) {
783 dev_err(&pdev->dev, "no update irq?\n");
784 status = irq;
785 goto err_iounmap;
786 }
787
788 status = request_irq(irq, spear_pcie_gadget_irq, 0, pdev->name, NULL);
789 if (status) {
790 dev_err(&pdev->dev, "pcie gadget interrupt IRQ%d already \
791 claimed\n", irq);
792 goto err_iounmap;
793 }
794
795 /* Register configfs hooks */
796 subsys = &target->subsys;
797 config_group_init(&subsys->su_group);
798 mutex_init(&subsys->su_mutex);
799 status = configfs_register_subsystem(subsys);
800 if (status)
801 goto err_irq;
802
803 /*
804 * init basic pcie application registers
805 * do not enable clock if it is PCIE0.Ideally , all controller should
806 * have been independent from others with respect to clock. But PCIE1
807 * and 2 depends on PCIE0.So PCIE0 clk is provided during board init.
808 */
809 if (pdev->id == 1) {
810 /*
811 * Ideally CFG Clock should have been also enabled here. But
812 * it is done currently during board init routne
813 */
814 clk = clk_get_sys("pcie1", NULL);
815 if (IS_ERR(clk)) {
816 pr_err("%s:couldn't get clk for pcie1\n", __func__);
817 goto err_irq;
818 }
819 if (clk_enable(clk)) {
820 pr_err("%s:couldn't enable clk for pcie1\n", __func__);
821 goto err_irq;
822 }
823 } else if (pdev->id == 2) {
824 /*
825 * Ideally CFG Clock should have been also enabled here. But
826 * it is done currently during board init routne
827 */
828 clk = clk_get_sys("pcie2", NULL);
829 if (IS_ERR(clk)) {
830 pr_err("%s:couldn't get clk for pcie2\n", __func__);
831 goto err_irq;
832 }
833 if (clk_enable(clk)) {
834 pr_err("%s:couldn't enable clk for pcie2\n", __func__);
835 goto err_irq;
836 }
837 }
838 spear13xx_pcie_device_init(config);
839
840 return 0;
841err_irq:
842 free_irq(irq, NULL);
843err_iounmap:
844 iounmap(config->va_dbi_base);
845err_iounmap_app:
846 iounmap(config->va_app_base);
847err_kzalloc:
848 kfree(config);
849err_rel_res:
850 release_mem_region(res1->start, resource_size(res1));
851err_rel_res0:
852 release_mem_region(res0->start, resource_size(res0));
853 return status;
854}
855
856static int __devexit spear_pcie_gadget_remove(struct platform_device *pdev)
857{
858 struct resource *res0, *res1;
859 static struct pcie_gadget_target *target;
860 struct spear_pcie_gadget_config *config;
861 int irq;
862
863 res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
864 res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
865 irq = platform_get_irq(pdev, 0);
866 target = dev_get_drvdata(&pdev->dev);
867 config = &target->config;
868
869 free_irq(irq, NULL);
870 iounmap(config->va_dbi_base);
871 iounmap(config->va_app_base);
872 release_mem_region(res1->start, resource_size(res1));
873 release_mem_region(res0->start, resource_size(res0));
874 configfs_unregister_subsystem(&target->subsys);
875 kfree(target);
876
877 return 0;
878}
879
880static void spear_pcie_gadget_shutdown(struct platform_device *pdev)
881{
882}
883
884static struct platform_driver spear_pcie_gadget_driver = {
885 .probe = spear_pcie_gadget_probe,
886 .remove = spear_pcie_gadget_remove,
887 .shutdown = spear_pcie_gadget_shutdown,
888 .driver = {
889 .name = "pcie-gadget-spear",
890 .bus = &platform_bus_type
891 },
892};
893
894static int __init spear_pcie_gadget_init(void)
895{
896 return platform_driver_register(&spear_pcie_gadget_driver);
897}
898module_init(spear_pcie_gadget_init);
899
900static void __exit spear_pcie_gadget_exit(void)
901{
902 platform_driver_unregister(&spear_pcie_gadget_driver);
903}
904module_exit(spear_pcie_gadget_exit);
905
906MODULE_ALIAS("pcie-gadget-spear");
907MODULE_AUTHOR("Pratyush Anand");
908MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 379d2ffe4c87..2e032f0e8cf4 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1417,7 +1417,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1417 if (res == NULL || irq < 0) 1417 if (res == NULL || irq < 0)
1418 return -ENXIO; 1418 return -ENXIO;
1419 1419
1420 res = request_mem_region(res->start, res->end - res->start + 1, 1420 res = request_mem_region(res->start, resource_size(res),
1421 pdev->name); 1421 pdev->name);
1422 if (res == NULL) 1422 if (res == NULL)
1423 return -EBUSY; 1423 return -EBUSY;
@@ -1457,7 +1457,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1457 1457
1458 host->irq = irq; 1458 host->irq = irq;
1459 host->phys_base = host->mem_res->start; 1459 host->phys_base = host->mem_res->start;
1460 host->virt_base = ioremap(res->start, res->end - res->start + 1); 1460 host->virt_base = ioremap(res->start, resource_size(res));
1461 if (!host->virt_base) 1461 if (!host->virt_base)
1462 goto err_ioremap; 1462 goto err_ioremap;
1463 1463
@@ -1514,7 +1514,7 @@ err_free_mmc_host:
1514err_ioremap: 1514err_ioremap:
1515 kfree(host); 1515 kfree(host);
1516err_free_mem_region: 1516err_free_mem_region:
1517 release_mem_region(res->start, res->end - res->start + 1); 1517 release_mem_region(res->start, resource_size(res));
1518 return ret; 1518 return ret;
1519} 1519}
1520 1520
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 158c0ee53b2c..259ece047afc 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2047,8 +2047,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2047 2047
2048 res->start += pdata->reg_offset; 2048 res->start += pdata->reg_offset;
2049 res->end += pdata->reg_offset; 2049 res->end += pdata->reg_offset;
2050 res = request_mem_region(res->start, res->end - res->start + 1, 2050 res = request_mem_region(res->start, resource_size(res), pdev->name);
2051 pdev->name);
2052 if (res == NULL) 2051 if (res == NULL)
2053 return -EBUSY; 2052 return -EBUSY;
2054 2053
@@ -2287,7 +2286,7 @@ err1:
2287err_alloc: 2286err_alloc:
2288 omap_hsmmc_gpio_free(pdata); 2287 omap_hsmmc_gpio_free(pdata);
2289err: 2288err:
2290 release_mem_region(res->start, res->end - res->start + 1); 2289 release_mem_region(res->start, resource_size(res));
2291 return ret; 2290 return ret;
2292} 2291}
2293 2292
@@ -2324,7 +2323,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2324 2323
2325 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2324 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2326 if (res) 2325 if (res)
2327 release_mem_region(res->start, res->end - res->start + 1); 2326 release_mem_region(res->start, resource_size(res));
2328 platform_set_drvdata(pdev, NULL); 2327 platform_set_drvdata(pdev, NULL);
2329 2328
2330 return 0; 2329 return 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 1a6e9eb7af43..338bea147c64 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2130,7 +2130,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2130} 2130}
2131 2131
2132/* 2132/*
2133* First release a slave and than destroy the bond if no more slaves are left. 2133* First release a slave and then destroy the bond if no more slaves are left.
2134* Must be under rtnl_lock when this function is called. 2134* Must be under rtnl_lock when this function is called.
2135*/ 2135*/
2136static int bond_release_and_destroy(struct net_device *bond_dev, 2136static int bond_release_and_destroy(struct net_device *bond_dev,
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 14050786218a..110eda01843c 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -633,9 +633,6 @@ static void c_can_start(struct net_device *dev)
633{ 633{
634 struct c_can_priv *priv = netdev_priv(dev); 634 struct c_can_priv *priv = netdev_priv(dev);
635 635
636 /* enable status change, error and module interrupts */
637 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
638
639 /* basic c_can configuration */ 636 /* basic c_can configuration */
640 c_can_chip_config(dev); 637 c_can_chip_config(dev);
641 638
@@ -643,6 +640,9 @@ static void c_can_start(struct net_device *dev)
643 640
644 /* reset tx helper pointers */ 641 /* reset tx helper pointers */
645 priv->tx_next = priv->tx_echo = 0; 642 priv->tx_next = priv->tx_echo = 0;
643
644 /* enable status change, error and module interrupts */
645 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
646} 646}
647 647
648static void c_can_stop(struct net_device *dev) 648static void c_can_stop(struct net_device *dev)
diff --git a/drivers/net/ftmac100.c b/drivers/net/ftmac100.c
index 1d6f4b8d393a..a31661948c42 100644
--- a/drivers/net/ftmac100.c
+++ b/drivers/net/ftmac100.c
@@ -1102,7 +1102,7 @@ static int ftmac100_probe(struct platform_device *pdev)
1102 goto err_req_mem; 1102 goto err_req_mem;
1103 } 1103 }
1104 1104
1105 priv->base = ioremap(res->start, res->end - res->start); 1105 priv->base = ioremap(res->start, resource_size(res));
1106 if (!priv->base) { 1106 if (!priv->base) {
1107 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 1107 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
1108 err = -EIO; 1108 err = -EIO;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index ccb231c4d933..2a0ad9a501bb 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -949,6 +949,11 @@ static void gfar_detect_errata(struct gfar_private *priv)
949 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0)) 949 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
950 priv->errata |= GFAR_ERRATA_A002; 950 priv->errata |= GFAR_ERRATA_A002;
951 951
952 /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
953 if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
954 (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
955 priv->errata |= GFAR_ERRATA_12;
956
952 if (priv->errata) 957 if (priv->errata)
953 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n", 958 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
954 priv->errata); 959 priv->errata);
@@ -2154,8 +2159,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2154 /* Set up checksumming */ 2159 /* Set up checksumming */
2155 if (CHECKSUM_PARTIAL == skb->ip_summed) { 2160 if (CHECKSUM_PARTIAL == skb->ip_summed) {
2156 fcb = gfar_add_fcb(skb); 2161 fcb = gfar_add_fcb(skb);
2157 lstatus |= BD_LFLAG(TXBD_TOE); 2162 /* as specified by errata */
2158 gfar_tx_checksum(skb, fcb); 2163 if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
2164 && ((unsigned long)fcb % 0x20) > 0x18)) {
2165 __skb_pull(skb, GMAC_FCB_LEN);
2166 skb_checksum_help(skb);
2167 } else {
2168 lstatus |= BD_LFLAG(TXBD_TOE);
2169 gfar_tx_checksum(skb, fcb);
2170 }
2159 } 2171 }
2160 2172
2161 if (vlan_tx_tag_present(skb)) { 2173 if (vlan_tx_tag_present(skb)) {
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 54de4135e932..ec5d595ce2e2 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -1039,6 +1039,7 @@ enum gfar_errata {
1039 GFAR_ERRATA_74 = 0x01, 1039 GFAR_ERRATA_74 = 0x01,
1040 GFAR_ERRATA_76 = 0x02, 1040 GFAR_ERRATA_76 = 0x02,
1041 GFAR_ERRATA_A002 = 0x04, 1041 GFAR_ERRATA_A002 = 0x04,
1042 GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
1042}; 1043};
1043 1044
1044/* Struct stolen almost completely (and shamelessly) from the FCC enet source 1045/* Struct stolen almost completely (and shamelessly) from the FCC enet source
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 5b37d3c191e4..78e34e9e4f00 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -39,8 +39,11 @@ struct macvlan_port {
39 struct list_head vlans; 39 struct list_head vlans;
40 struct rcu_head rcu; 40 struct rcu_head rcu;
41 bool passthru; 41 bool passthru;
42 int count;
42}; 43};
43 44
45static void macvlan_port_destroy(struct net_device *dev);
46
44#define macvlan_port_get_rcu(dev) \ 47#define macvlan_port_get_rcu(dev) \
45 ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data)) 48 ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data))
46#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data) 49#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data)
@@ -457,8 +460,13 @@ static int macvlan_init(struct net_device *dev)
457static void macvlan_uninit(struct net_device *dev) 460static void macvlan_uninit(struct net_device *dev)
458{ 461{
459 struct macvlan_dev *vlan = netdev_priv(dev); 462 struct macvlan_dev *vlan = netdev_priv(dev);
463 struct macvlan_port *port = vlan->port;
460 464
461 free_percpu(vlan->pcpu_stats); 465 free_percpu(vlan->pcpu_stats);
466
467 port->count -= 1;
468 if (!port->count)
469 macvlan_port_destroy(port->dev);
462} 470}
463 471
464static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, 472static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
@@ -691,12 +699,13 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
691 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 699 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
692 700
693 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 701 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
694 if (!list_empty(&port->vlans)) 702 if (port->count)
695 return -EINVAL; 703 return -EINVAL;
696 port->passthru = true; 704 port->passthru = true;
697 memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN); 705 memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
698 } 706 }
699 707
708 port->count += 1;
700 err = register_netdevice(dev); 709 err = register_netdevice(dev);
701 if (err < 0) 710 if (err < 0)
702 goto destroy_port; 711 goto destroy_port;
@@ -707,7 +716,8 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
707 return 0; 716 return 0;
708 717
709destroy_port: 718destroy_port:
710 if (list_empty(&port->vlans)) 719 port->count -= 1;
720 if (!port->count)
711 macvlan_port_destroy(lowerdev); 721 macvlan_port_destroy(lowerdev);
712 722
713 return err; 723 return err;
@@ -725,13 +735,9 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
725void macvlan_dellink(struct net_device *dev, struct list_head *head) 735void macvlan_dellink(struct net_device *dev, struct list_head *head)
726{ 736{
727 struct macvlan_dev *vlan = netdev_priv(dev); 737 struct macvlan_dev *vlan = netdev_priv(dev);
728 struct macvlan_port *port = vlan->port;
729 738
730 list_del(&vlan->list); 739 list_del(&vlan->list);
731 unregister_netdevice_queue(dev, head); 740 unregister_netdevice_queue(dev, head);
732
733 if (list_empty(&port->vlans))
734 macvlan_port_destroy(port->dev);
735} 741}
736EXPORT_SYMBOL_GPL(macvlan_dellink); 742EXPORT_SYMBOL_GPL(macvlan_dellink);
737 743
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 40fa59e2fd5c..32678b6c6b39 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -9501,7 +9501,7 @@ static struct niu_parent * __devinit niu_new_parent(struct niu *np,
9501 struct niu_parent *p; 9501 struct niu_parent *p;
9502 int i; 9502 int i;
9503 9503
9504 plat_dev = platform_device_register_simple("niu", niu_parent_index, 9504 plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
9505 NULL, 0); 9505 NULL, 0);
9506 if (IS_ERR(plat_dev)) 9506 if (IS_ERR(plat_dev))
9507 return NULL; 9507 return NULL;
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 43583309a65d..31e9407a0739 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -129,7 +129,7 @@ static void *z_comp_alloc(unsigned char *options, int opt_len)
129 129
130 state->strm.next_in = NULL; 130 state->strm.next_in = NULL;
131 state->w_size = w_size; 131 state->w_size = w_size;
132 state->strm.workspace = vmalloc(zlib_deflate_workspacesize()); 132 state->strm.workspace = vmalloc(zlib_deflate_workspacesize(-w_size, 8));
133 if (state->strm.workspace == NULL) 133 if (state->strm.workspace == NULL)
134 goto out_free; 134 goto out_free;
135 135
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 105d7f0630cc..2de9b90c5f8f 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -171,7 +171,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
171 if (skb->ip_summed == CHECKSUM_NONE) 171 if (skb->ip_summed == CHECKSUM_NONE)
172 skb->ip_summed = rcv_priv->ip_summed; 172 skb->ip_summed = rcv_priv->ip_summed;
173 173
174 length = skb->len + ETH_HLEN; 174 length = skb->len;
175 if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS) 175 if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
176 goto rx_drop; 176 goto rx_drop;
177 177
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index ad3d099bf5c1..c9784705f6ac 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1031,6 +1031,7 @@ static int __devinit acer_backlight_init(struct device *dev)
1031 struct backlight_device *bd; 1031 struct backlight_device *bd;
1032 1032
1033 memset(&props, 0, sizeof(struct backlight_properties)); 1033 memset(&props, 0, sizeof(struct backlight_properties));
1034 props.type = BACKLIGHT_PLATFORM;
1034 props.max_brightness = max_brightness; 1035 props.max_brightness = max_brightness;
1035 bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops, 1036 bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops,
1036 &props); 1037 &props);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index f3aa6a7fdab6..5a6f7d7575d6 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -667,6 +667,7 @@ static int asus_backlight_init(struct asus_laptop *asus)
667 667
668 memset(&props, 0, sizeof(struct backlight_properties)); 668 memset(&props, 0, sizeof(struct backlight_properties));
669 props.max_brightness = 15; 669 props.max_brightness = 15;
670 props.type = BACKLIGHT_PLATFORM;
670 671
671 bd = backlight_device_register(ASUS_LAPTOP_FILE, 672 bd = backlight_device_register(ASUS_LAPTOP_FILE,
672 &asus->platform_device->dev, asus, 673 &asus->platform_device->dev, asus,
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index fe495939c307..f503607c0645 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1507,6 +1507,7 @@ static int __init asus_acpi_init(void)
1507 } 1507 }
1508 1508
1509 memset(&props, 0, sizeof(struct backlight_properties)); 1509 memset(&props, 0, sizeof(struct backlight_properties));
1510 props.type = BACKLIGHT_PLATFORM;
1510 props.max_brightness = 15; 1511 props.max_brightness = 15;
1511 asus_backlight_device = backlight_device_register("asus", NULL, NULL, 1512 asus_backlight_device = backlight_device_register("asus", NULL, NULL,
1512 &asus_backlight_data, 1513 &asus_backlight_data,
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 911135425224..94f93b621d7b 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -564,6 +564,7 @@ static int cmpc_ipml_add(struct acpi_device *acpi)
564 return -ENOMEM; 564 return -ENOMEM;
565 565
566 memset(&props, 0, sizeof(struct backlight_properties)); 566 memset(&props, 0, sizeof(struct backlight_properties));
567 props.type = BACKLIGHT_PLATFORM;
567 props.max_brightness = 7; 568 props.max_brightness = 7;
568 ipml->bd = backlight_device_register("cmpc_bl", &acpi->dev, 569 ipml->bd = backlight_device_register("cmpc_bl", &acpi->dev,
569 acpi->handle, &cmpc_bl_ops, 570 acpi->handle, &cmpc_bl_ops,
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 034572b980c9..eb95878fa583 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -970,6 +970,7 @@ static int __init compal_init(void)
970 if (!acpi_video_backlight_support()) { 970 if (!acpi_video_backlight_support()) {
971 struct backlight_properties props; 971 struct backlight_properties props;
972 memset(&props, 0, sizeof(struct backlight_properties)); 972 memset(&props, 0, sizeof(struct backlight_properties));
973 props.type = BACKLIGHT_PLATFORM;
973 props.max_brightness = BACKLIGHT_LEVEL_MAX; 974 props.max_brightness = BACKLIGHT_LEVEL_MAX;
974 compalbl_device = backlight_device_register(DRIVER_NAME, 975 compalbl_device = backlight_device_register(DRIVER_NAME,
975 NULL, NULL, 976 NULL, NULL,
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index ad24ef36f9f7..de301aa8e5c3 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -671,6 +671,7 @@ static int __init dell_init(void)
671 if (max_intensity) { 671 if (max_intensity) {
672 struct backlight_properties props; 672 struct backlight_properties props;
673 memset(&props, 0, sizeof(struct backlight_properties)); 673 memset(&props, 0, sizeof(struct backlight_properties));
674 props.type = BACKLIGHT_PLATFORM;
674 props.max_brightness = max_intensity; 675 props.max_brightness = max_intensity;
675 dell_backlight_device = backlight_device_register("dell_backlight", 676 dell_backlight_device = backlight_device_register("dell_backlight",
676 &platform_device->dev, 677 &platform_device->dev,
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 49d9ad708f89..6605beac0d0e 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1147,6 +1147,7 @@ static int eeepc_backlight_init(struct eeepc_laptop *eeepc)
1147 struct backlight_device *bd; 1147 struct backlight_device *bd;
1148 1148
1149 memset(&props, 0, sizeof(struct backlight_properties)); 1149 memset(&props, 0, sizeof(struct backlight_properties));
1150 props.type = BACKLIGHT_PLATFORM;
1150 props.max_brightness = 15; 1151 props.max_brightness = 15;
1151 bd = backlight_device_register(EEEPC_LAPTOP_FILE, 1152 bd = backlight_device_register(EEEPC_LAPTOP_FILE,
1152 &eeepc->platform_device->dev, eeepc, 1153 &eeepc->platform_device->dev, eeepc,
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 95e3b0948e9c..493054c2dbe1 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -1128,6 +1128,7 @@ static int __init fujitsu_init(void)
1128 1128
1129 memset(&props, 0, sizeof(struct backlight_properties)); 1129 memset(&props, 0, sizeof(struct backlight_properties));
1130 max_brightness = fujitsu->max_brightness; 1130 max_brightness = fujitsu->max_brightness;
1131 props.type = BACKLIGHT_PLATFORM;
1131 props.max_brightness = max_brightness - 1; 1132 props.max_brightness = max_brightness - 1;
1132 fujitsu->bl_device = backlight_device_register("fujitsu-laptop", 1133 fujitsu->bl_device = backlight_device_register("fujitsu-laptop",
1133 NULL, NULL, 1134 NULL, NULL,
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 7e9bb6df9d39..142d38579314 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -804,6 +804,7 @@ static int __init msi_init(void)
804 } else { 804 } else {
805 struct backlight_properties props; 805 struct backlight_properties props;
806 memset(&props, 0, sizeof(struct backlight_properties)); 806 memset(&props, 0, sizeof(struct backlight_properties));
807 props.type = BACKLIGHT_PLATFORM;
807 props.max_brightness = MSI_LCD_LEVEL_MAX - 1; 808 props.max_brightness = MSI_LCD_LEVEL_MAX - 1;
808 msibl_device = backlight_device_register("msi-laptop-bl", NULL, 809 msibl_device = backlight_device_register("msi-laptop-bl", NULL,
809 NULL, &msibl_ops, 810 NULL, &msibl_ops,
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 35278ad7e628..d5419c9ec07a 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -254,6 +254,7 @@ static int __init msi_wmi_init(void)
254 if (!acpi_video_backlight_support()) { 254 if (!acpi_video_backlight_support()) {
255 struct backlight_properties props; 255 struct backlight_properties props;
256 memset(&props, 0, sizeof(struct backlight_properties)); 256 memset(&props, 0, sizeof(struct backlight_properties));
257 props.type = BACKLIGHT_PLATFORM;
257 props.max_brightness = ARRAY_SIZE(backlight_map) - 1; 258 props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
258 backlight = backlight_device_register(DRV_NAME, NULL, NULL, 259 backlight = backlight_device_register(DRV_NAME, NULL, NULL,
259 &msi_backlight_ops, 260 &msi_backlight_ops,
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index cc1e0ba104d7..05be30ee158b 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -602,6 +602,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
602 } 602 }
603 /* initialize backlight */ 603 /* initialize backlight */
604 memset(&props, 0, sizeof(struct backlight_properties)); 604 memset(&props, 0, sizeof(struct backlight_properties));
605 props.type = BACKLIGHT_PLATFORM;
605 props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT]; 606 props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT];
606 pcc->backlight = backlight_device_register("panasonic", NULL, pcc, 607 pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
607 &pcc_backlight_ops, &props); 608 &pcc_backlight_ops, &props);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 5e83370b0812..13d8d63bcca9 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1305,8 +1305,9 @@ static int sony_nc_add(struct acpi_device *device)
1305 "controlled by ACPI video driver\n"); 1305 "controlled by ACPI video driver\n");
1306 } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", 1306 } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
1307 &handle))) { 1307 &handle))) {
1308 struct backlight_properties props; 1308 struct backlight_properties props;
1309 memset(&props, 0, sizeof(struct backlight_properties)); 1309 memset(&props, 0, sizeof(struct backlight_properties));
1310 props.type = BACKLIGHT_PLATFORM;
1310 props.max_brightness = SONY_MAX_BRIGHTNESS - 1; 1311 props.max_brightness = SONY_MAX_BRIGHTNESS - 1;
1311 sony_backlight_device = backlight_device_register("sony", NULL, 1312 sony_backlight_device = backlight_device_register("sony", NULL,
1312 NULL, 1313 NULL,
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index eb9922385ef8..947bdcaa0ce9 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -6307,6 +6307,7 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
6307 return 1; 6307 return 1;
6308 6308
6309 memset(&props, 0, sizeof(struct backlight_properties)); 6309 memset(&props, 0, sizeof(struct backlight_properties));
6310 props.type = BACKLIGHT_PLATFORM;
6310 props.max_brightness = bright_maxlvl; 6311 props.max_brightness = bright_maxlvl;
6311 props.brightness = b & TP_EC_BACKLIGHT_LVLMSK; 6312 props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
6312 ibm_backlight_device = backlight_device_register(TPACPI_BACKLIGHT_DEV_NAME, 6313 ibm_backlight_device = backlight_device_register(TPACPI_BACKLIGHT_DEV_NAME,
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 209cced786c6..63f42a22e102 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1018,6 +1018,7 @@ static int __init toshiba_acpi_init(void)
1018 create_toshiba_proc_entries(); 1018 create_toshiba_proc_entries();
1019 } 1019 }
1020 1020
1021 props.type = BACKLIGHT_PLATFORM;
1021 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; 1022 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
1022 toshiba_backlight_device = backlight_device_register("toshiba", 1023 toshiba_backlight_device = backlight_device_register("toshiba",
1023 &toshiba_acpi.p_dev->dev, 1024 &toshiba_acpi.p_dev->dev,
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index 19bc73695475..fa4e0a5db3f8 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -142,7 +142,9 @@ void __pnp_remove_device(struct pnp_dev *dev);
142int pnp_check_port(struct pnp_dev *dev, struct resource *res); 142int pnp_check_port(struct pnp_dev *dev, struct resource *res);
143int pnp_check_mem(struct pnp_dev *dev, struct resource *res); 143int pnp_check_mem(struct pnp_dev *dev, struct resource *res);
144int pnp_check_irq(struct pnp_dev *dev, struct resource *res); 144int pnp_check_irq(struct pnp_dev *dev, struct resource *res);
145#ifdef CONFIG_ISA_DMA_API
145int pnp_check_dma(struct pnp_dev *dev, struct resource *res); 146int pnp_check_dma(struct pnp_dev *dev, struct resource *res);
147#endif
146 148
147char *pnp_resource_type_name(struct resource *res); 149char *pnp_resource_type_name(struct resource *res);
148void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc); 150void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc);
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index 0a15664eef1c..ed9ce507149a 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -171,6 +171,7 @@ __add:
171 return 0; 171 return 0;
172} 172}
173 173
174#ifdef CONFIG_ISA_DMA_API
174static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx) 175static int pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
175{ 176{
176 struct resource *res, local_res; 177 struct resource *res, local_res;
@@ -210,6 +211,7 @@ __add:
210 pnp_add_dma_resource(dev, res->start, res->flags); 211 pnp_add_dma_resource(dev, res->start, res->flags);
211 return 0; 212 return 0;
212} 213}
214#endif /* CONFIG_ISA_DMA_API */
213 215
214void pnp_init_resources(struct pnp_dev *dev) 216void pnp_init_resources(struct pnp_dev *dev)
215{ 217{
@@ -234,7 +236,8 @@ static void pnp_clean_resource_table(struct pnp_dev *dev)
234static int pnp_assign_resources(struct pnp_dev *dev, int set) 236static int pnp_assign_resources(struct pnp_dev *dev, int set)
235{ 237{
236 struct pnp_option *option; 238 struct pnp_option *option;
237 int nport = 0, nmem = 0, nirq = 0, ndma = 0; 239 int nport = 0, nmem = 0, nirq = 0;
240 int ndma __maybe_unused = 0;
238 int ret = 0; 241 int ret = 0;
239 242
240 pnp_dbg(&dev->dev, "pnp_assign_resources, try dependent set %d\n", set); 243 pnp_dbg(&dev->dev, "pnp_assign_resources, try dependent set %d\n", set);
@@ -256,9 +259,11 @@ static int pnp_assign_resources(struct pnp_dev *dev, int set)
256 case IORESOURCE_IRQ: 259 case IORESOURCE_IRQ:
257 ret = pnp_assign_irq(dev, &option->u.irq, nirq++); 260 ret = pnp_assign_irq(dev, &option->u.irq, nirq++);
258 break; 261 break;
262#ifdef CONFIG_ISA_DMA_API
259 case IORESOURCE_DMA: 263 case IORESOURCE_DMA:
260 ret = pnp_assign_dma(dev, &option->u.dma, ndma++); 264 ret = pnp_assign_dma(dev, &option->u.dma, ndma++);
261 break; 265 break;
266#endif
262 default: 267 default:
263 ret = -EINVAL; 268 ret = -EINVAL;
264 break; 269 break;
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index a925e6b63d72..b0ecacbe53b1 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -409,9 +409,9 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
409 return 1; 409 return 1;
410} 410}
411 411
412#ifdef CONFIG_ISA_DMA_API
412int pnp_check_dma(struct pnp_dev *dev, struct resource *res) 413int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
413{ 414{
414#ifndef CONFIG_IA64
415 int i; 415 int i;
416 struct pnp_dev *tdev; 416 struct pnp_dev *tdev;
417 struct resource *tres; 417 struct resource *tres;
@@ -466,11 +466,8 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
466 } 466 }
467 467
468 return 1; 468 return 1;
469#else
470 /* IA64 does not have legacy DMA */
471 return 0;
472#endif
473} 469}
470#endif /* CONFIG_ISA_DMA_API */
474 471
475unsigned long pnp_resource_type(struct resource *res) 472unsigned long pnp_resource_type(struct resource *res)
476{ 473{
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c
index b93af3ebb5ba..dcd39fba6ddd 100644
--- a/drivers/pps/generators/pps_gen_parport.c
+++ b/drivers/pps/generators/pps_gen_parport.c
@@ -216,11 +216,6 @@ static void parport_attach(struct parport *port)
216 216
217 hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 217 hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
218 device.timer.function = hrtimer_event; 218 device.timer.function = hrtimer_event;
219#ifdef CONFIG_PREEMPT_RT
220 /* hrtimer interrupt will run in the interrupt context with this */
221 device.timer.irqsafe = 1;
222#endif
223
224 hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS); 219 hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS);
225 220
226 return; 221 return;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4941cade319f..e1878877399c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -985,4 +985,14 @@ config RTC_DRV_LPC32XX
985 This driver can also be buillt as a module. If so, the module 985 This driver can also be buillt as a module. If so, the module
986 will be called rtc-lpc32xx. 986 will be called rtc-lpc32xx.
987 987
988config RTC_DRV_TEGRA
989 tristate "NVIDIA Tegra Internal RTC driver"
990 depends on RTC_CLASS && ARCH_TEGRA
991 help
992 If you say yes here you get support for the
993 Tegra 200 series internal RTC module.
994
995 This drive can also be built as a module. If so, the module
996 will be called rtc-tegra.
997
988endif # RTC_CLASS 998endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 5f6c3838dcf6..ca91c3c42e98 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o
91obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o 91obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
92obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o 92obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
93obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o 93obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
94obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
94obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o 95obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
95obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o 96obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
96obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o 97obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index d834a63ec4b0..e6e71deb188f 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -25,6 +25,7 @@
25#include <linux/bcd.h> 25#include <linux/bcd.h>
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/pm.h>
28 29
29#define DS1374_REG_TOD0 0x00 /* Time of Day */ 30#define DS1374_REG_TOD0 0x00 /* Time of Day */
30#define DS1374_REG_TOD1 0x01 31#define DS1374_REG_TOD1 0x01
@@ -409,32 +410,38 @@ static int __devexit ds1374_remove(struct i2c_client *client)
409} 410}
410 411
411#ifdef CONFIG_PM 412#ifdef CONFIG_PM
412static int ds1374_suspend(struct i2c_client *client, pm_message_t state) 413static int ds1374_suspend(struct device *dev)
413{ 414{
415 struct i2c_client *client = to_i2c_client(dev);
416
414 if (client->irq >= 0 && device_may_wakeup(&client->dev)) 417 if (client->irq >= 0 && device_may_wakeup(&client->dev))
415 enable_irq_wake(client->irq); 418 enable_irq_wake(client->irq);
416 return 0; 419 return 0;
417} 420}
418 421
419static int ds1374_resume(struct i2c_client *client) 422static int ds1374_resume(struct device *dev)
420{ 423{
424 struct i2c_client *client = to_i2c_client(dev);
425
421 if (client->irq >= 0 && device_may_wakeup(&client->dev)) 426 if (client->irq >= 0 && device_may_wakeup(&client->dev))
422 disable_irq_wake(client->irq); 427 disable_irq_wake(client->irq);
423 return 0; 428 return 0;
424} 429}
430
431static SIMPLE_DEV_PM_OPS(ds1374_pm, ds1374_suspend, ds1374_resume);
432
433#define DS1374_PM (&ds1374_pm)
425#else 434#else
426#define ds1374_suspend NULL 435#define DS1374_PM NULL
427#define ds1374_resume NULL
428#endif 436#endif
429 437
430static struct i2c_driver ds1374_driver = { 438static struct i2c_driver ds1374_driver = {
431 .driver = { 439 .driver = {
432 .name = "rtc-ds1374", 440 .name = "rtc-ds1374",
433 .owner = THIS_MODULE, 441 .owner = THIS_MODULE,
442 .pm = DS1374_PM,
434 }, 443 },
435 .probe = ds1374_probe, 444 .probe = ds1374_probe,
436 .suspend = ds1374_suspend,
437 .resume = ds1374_resume,
438 .remove = __devexit_p(ds1374_remove), 445 .remove = __devexit_p(ds1374_remove),
439 .id_table = ds1374_id, 446 .id_table = ds1374_id,
440}; 447};
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 3fffd708711f..fbabc773dded 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -468,7 +468,7 @@ ds1511_nvram_write(struct file *filp, struct kobject *kobj,
468static struct bin_attribute ds1511_nvram_attr = { 468static struct bin_attribute ds1511_nvram_attr = {
469 .attr = { 469 .attr = {
470 .name = "nvram", 470 .name = "nvram",
471 .mode = S_IRUGO | S_IWUGO, 471 .mode = S_IRUGO | S_IWUSR,
472 }, 472 },
473 .size = DS1511_RAM_MAX, 473 .size = DS1511_RAM_MAX,
474 .read = ds1511_nvram_read, 474 .read = ds1511_nvram_read,
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index 468200c38ecb..da8beb8cae51 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -39,6 +39,8 @@
39#define ISL1208_REG_SR_BAT (1<<1) /* battery */ 39#define ISL1208_REG_SR_BAT (1<<1) /* battery */
40#define ISL1208_REG_SR_RTCF (1<<0) /* rtc fail */ 40#define ISL1208_REG_SR_RTCF (1<<0) /* rtc fail */
41#define ISL1208_REG_INT 0x08 41#define ISL1208_REG_INT 0x08
42#define ISL1208_REG_INT_ALME (1<<6) /* alarm enable */
43#define ISL1208_REG_INT_IM (1<<7) /* interrupt/alarm mode */
42#define ISL1208_REG_09 0x09 /* reserved */ 44#define ISL1208_REG_09 0x09 /* reserved */
43#define ISL1208_REG_ATR 0x0a 45#define ISL1208_REG_ATR 0x0a
44#define ISL1208_REG_DTR 0x0b 46#define ISL1208_REG_DTR 0x0b
@@ -202,6 +204,30 @@ isl1208_i2c_set_usr(struct i2c_client *client, u16 usr)
202} 204}
203 205
204static int 206static int
207isl1208_rtc_toggle_alarm(struct i2c_client *client, int enable)
208{
209 int icr = i2c_smbus_read_byte_data(client, ISL1208_REG_INT);
210
211 if (icr < 0) {
212 dev_err(&client->dev, "%s: reading INT failed\n", __func__);
213 return icr;
214 }
215
216 if (enable)
217 icr |= ISL1208_REG_INT_ALME | ISL1208_REG_INT_IM;
218 else
219 icr &= ~(ISL1208_REG_INT_ALME | ISL1208_REG_INT_IM);
220
221 icr = i2c_smbus_write_byte_data(client, ISL1208_REG_INT, icr);
222 if (icr < 0) {
223 dev_err(&client->dev, "%s: writing INT failed\n", __func__);
224 return icr;
225 }
226
227 return 0;
228}
229
230static int
205isl1208_rtc_proc(struct device *dev, struct seq_file *seq) 231isl1208_rtc_proc(struct device *dev, struct seq_file *seq)
206{ 232{
207 struct i2c_client *const client = to_i2c_client(dev); 233 struct i2c_client *const client = to_i2c_client(dev);
@@ -288,9 +314,8 @@ isl1208_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
288{ 314{
289 struct rtc_time *const tm = &alarm->time; 315 struct rtc_time *const tm = &alarm->time;
290 u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, }; 316 u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, };
291 int sr; 317 int icr, yr, sr = isl1208_i2c_get_sr(client);
292 318
293 sr = isl1208_i2c_get_sr(client);
294 if (sr < 0) { 319 if (sr < 0) {
295 dev_err(&client->dev, "%s: reading SR failed\n", __func__); 320 dev_err(&client->dev, "%s: reading SR failed\n", __func__);
296 return sr; 321 return sr;
@@ -313,6 +338,73 @@ isl1208_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
313 bcd2bin(regs[ISL1208_REG_MOA - ISL1208_REG_SCA] & 0x1f) - 1; 338 bcd2bin(regs[ISL1208_REG_MOA - ISL1208_REG_SCA] & 0x1f) - 1;
314 tm->tm_wday = bcd2bin(regs[ISL1208_REG_DWA - ISL1208_REG_SCA] & 0x03); 339 tm->tm_wday = bcd2bin(regs[ISL1208_REG_DWA - ISL1208_REG_SCA] & 0x03);
315 340
341 /* The alarm doesn't store the year so get it from the rtc section */
342 yr = i2c_smbus_read_byte_data(client, ISL1208_REG_YR);
343 if (yr < 0) {
344 dev_err(&client->dev, "%s: reading RTC YR failed\n", __func__);
345 return yr;
346 }
347 tm->tm_year = bcd2bin(yr) + 100;
348
349 icr = i2c_smbus_read_byte_data(client, ISL1208_REG_INT);
350 if (icr < 0) {
351 dev_err(&client->dev, "%s: reading INT failed\n", __func__);
352 return icr;
353 }
354 alarm->enabled = !!(icr & ISL1208_REG_INT_ALME);
355
356 return 0;
357}
358
359static int
360isl1208_i2c_set_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
361{
362 struct rtc_time *alarm_tm = &alarm->time;
363 u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, };
364 const int offs = ISL1208_REG_SCA;
365 unsigned long rtc_secs, alarm_secs;
366 struct rtc_time rtc_tm;
367 int err, enable;
368
369 err = isl1208_i2c_read_time(client, &rtc_tm);
370 if (err)
371 return err;
372 err = rtc_tm_to_time(&rtc_tm, &rtc_secs);
373 if (err)
374 return err;
375 err = rtc_tm_to_time(alarm_tm, &alarm_secs);
376 if (err)
377 return err;
378
379 /* If the alarm time is before the current time disable the alarm */
380 if (!alarm->enabled || alarm_secs <= rtc_secs)
381 enable = 0x00;
382 else
383 enable = 0x80;
384
385 /* Program the alarm and enable it for each setting */
386 regs[ISL1208_REG_SCA - offs] = bin2bcd(alarm_tm->tm_sec) | enable;
387 regs[ISL1208_REG_MNA - offs] = bin2bcd(alarm_tm->tm_min) | enable;
388 regs[ISL1208_REG_HRA - offs] = bin2bcd(alarm_tm->tm_hour) |
389 ISL1208_REG_HR_MIL | enable;
390
391 regs[ISL1208_REG_DTA - offs] = bin2bcd(alarm_tm->tm_mday) | enable;
392 regs[ISL1208_REG_MOA - offs] = bin2bcd(alarm_tm->tm_mon + 1) | enable;
393 regs[ISL1208_REG_DWA - offs] = bin2bcd(alarm_tm->tm_wday & 7) | enable;
394
395 /* write ALARM registers */
396 err = isl1208_i2c_set_regs(client, offs, regs,
397 ISL1208_ALARM_SECTION_LEN);
398 if (err < 0) {
399 dev_err(&client->dev, "%s: writing ALARM section failed\n",
400 __func__);
401 return err;
402 }
403
404 err = isl1208_rtc_toggle_alarm(client, enable);
405 if (err)
406 return err;
407
316 return 0; 408 return 0;
317} 409}
318 410
@@ -391,12 +483,63 @@ isl1208_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
391 return isl1208_i2c_read_alarm(to_i2c_client(dev), alarm); 483 return isl1208_i2c_read_alarm(to_i2c_client(dev), alarm);
392} 484}
393 485
486static int
487isl1208_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
488{
489 return isl1208_i2c_set_alarm(to_i2c_client(dev), alarm);
490}
491
492static irqreturn_t
493isl1208_rtc_interrupt(int irq, void *data)
494{
495 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
496 struct i2c_client *client = data;
497 int handled = 0, sr, err;
498
499 /*
500 * I2C reads get NAK'ed if we read straight away after an interrupt?
501 * Using a mdelay/msleep didn't seem to help either, so we work around
502 * this by continually trying to read the register for a short time.
503 */
504 while (1) {
505 sr = isl1208_i2c_get_sr(client);
506 if (sr >= 0)
507 break;
508
509 if (time_after(jiffies, timeout)) {
510 dev_err(&client->dev, "%s: reading SR failed\n",
511 __func__);
512 return sr;
513 }
514 }
515
516 if (sr & ISL1208_REG_SR_ALM) {
517 dev_dbg(&client->dev, "alarm!\n");
518
519 /* Clear the alarm */
520 sr &= ~ISL1208_REG_SR_ALM;
521 sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
522 if (sr < 0)
523 dev_err(&client->dev, "%s: writing SR failed\n",
524 __func__);
525 else
526 handled = 1;
527
528 /* Disable the alarm */
529 err = isl1208_rtc_toggle_alarm(client, 0);
530 if (err)
531 return err;
532 }
533
534 return handled ? IRQ_HANDLED : IRQ_NONE;
535}
536
394static const struct rtc_class_ops isl1208_rtc_ops = { 537static const struct rtc_class_ops isl1208_rtc_ops = {
395 .proc = isl1208_rtc_proc, 538 .proc = isl1208_rtc_proc,
396 .read_time = isl1208_rtc_read_time, 539 .read_time = isl1208_rtc_read_time,
397 .set_time = isl1208_rtc_set_time, 540 .set_time = isl1208_rtc_set_time,
398 .read_alarm = isl1208_rtc_read_alarm, 541 .read_alarm = isl1208_rtc_read_alarm,
399 /*.set_alarm = isl1208_rtc_set_alarm, */ 542 .set_alarm = isl1208_rtc_set_alarm,
400}; 543};
401 544
402/* sysfs interface */ 545/* sysfs interface */
@@ -488,11 +631,29 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
488 dev_info(&client->dev, 631 dev_info(&client->dev,
489 "chip found, driver version " DRV_VERSION "\n"); 632 "chip found, driver version " DRV_VERSION "\n");
490 633
634 if (client->irq > 0) {
635 rc = request_threaded_irq(client->irq, NULL,
636 isl1208_rtc_interrupt,
637 IRQF_SHARED,
638 isl1208_driver.driver.name, client);
639 if (!rc) {
640 device_init_wakeup(&client->dev, 1);
641 enable_irq_wake(client->irq);
642 } else {
643 dev_err(&client->dev,
644 "Unable to request irq %d, no alarm support\n",
645 client->irq);
646 client->irq = 0;
647 }
648 }
649
491 rtc = rtc_device_register(isl1208_driver.driver.name, 650 rtc = rtc_device_register(isl1208_driver.driver.name,
492 &client->dev, &isl1208_rtc_ops, 651 &client->dev, &isl1208_rtc_ops,
493 THIS_MODULE); 652 THIS_MODULE);
494 if (IS_ERR(rtc)) 653 if (IS_ERR(rtc)) {
495 return PTR_ERR(rtc); 654 rc = PTR_ERR(rtc);
655 goto exit_free_irq;
656 }
496 657
497 i2c_set_clientdata(client, rtc); 658 i2c_set_clientdata(client, rtc);
498 659
@@ -514,6 +675,9 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
514 675
515exit_unregister: 676exit_unregister:
516 rtc_device_unregister(rtc); 677 rtc_device_unregister(rtc);
678exit_free_irq:
679 if (client->irq)
680 free_irq(client->irq, client);
517 681
518 return rc; 682 return rc;
519} 683}
@@ -525,6 +689,8 @@ isl1208_remove(struct i2c_client *client)
525 689
526 sysfs_remove_group(&client->dev.kobj, &isl1208_rtc_sysfs_files); 690 sysfs_remove_group(&client->dev.kobj, &isl1208_rtc_sysfs_files);
527 rtc_device_unregister(rtc); 691 rtc_device_unregister(rtc);
692 if (client->irq)
693 free_irq(client->irq, client);
528 694
529 return 0; 695 return 0;
530} 696}
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
new file mode 100644
index 000000000000..2fc31aac3f4e
--- /dev/null
+++ b/drivers/rtc/rtc-tegra.c
@@ -0,0 +1,488 @@
1/*
2 * An RTC driver for the NVIDIA Tegra 200 series internal RTC.
3 *
4 * Copyright (c) 2010, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/irq.h>
25#include <linux/io.h>
26#include <linux/delay.h>
27#include <linux/rtc.h>
28#include <linux/platform_device.h>
29
30/* set to 1 = busy every eight 32kHz clocks during copy of sec+msec to AHB */
31#define TEGRA_RTC_REG_BUSY 0x004
32#define TEGRA_RTC_REG_SECONDS 0x008
33/* when msec is read, the seconds are buffered into shadow seconds. */
34#define TEGRA_RTC_REG_SHADOW_SECONDS 0x00c
35#define TEGRA_RTC_REG_MILLI_SECONDS 0x010
36#define TEGRA_RTC_REG_SECONDS_ALARM0 0x014
37#define TEGRA_RTC_REG_SECONDS_ALARM1 0x018
38#define TEGRA_RTC_REG_MILLI_SECONDS_ALARM0 0x01c
39#define TEGRA_RTC_REG_INTR_MASK 0x028
40/* write 1 bits to clear status bits */
41#define TEGRA_RTC_REG_INTR_STATUS 0x02c
42
43/* bits in INTR_MASK */
44#define TEGRA_RTC_INTR_MASK_MSEC_CDN_ALARM (1<<4)
45#define TEGRA_RTC_INTR_MASK_SEC_CDN_ALARM (1<<3)
46#define TEGRA_RTC_INTR_MASK_MSEC_ALARM (1<<2)
47#define TEGRA_RTC_INTR_MASK_SEC_ALARM1 (1<<1)
48#define TEGRA_RTC_INTR_MASK_SEC_ALARM0 (1<<0)
49
50/* bits in INTR_STATUS */
51#define TEGRA_RTC_INTR_STATUS_MSEC_CDN_ALARM (1<<4)
52#define TEGRA_RTC_INTR_STATUS_SEC_CDN_ALARM (1<<3)
53#define TEGRA_RTC_INTR_STATUS_MSEC_ALARM (1<<2)
54#define TEGRA_RTC_INTR_STATUS_SEC_ALARM1 (1<<1)
55#define TEGRA_RTC_INTR_STATUS_SEC_ALARM0 (1<<0)
56
57struct tegra_rtc_info {
58 struct platform_device *pdev;
59 struct rtc_device *rtc_dev;
60 void __iomem *rtc_base; /* NULL if not initialized. */
61 int tegra_rtc_irq; /* alarm and periodic irq */
62 spinlock_t tegra_rtc_lock;
63};
64
65/* RTC hardware is busy when it is updating its values over AHB once
66 * every eight 32kHz clocks (~250uS).
67 * outside of these updates the CPU is free to write.
68 * CPU is always free to read.
69 */
70static inline u32 tegra_rtc_check_busy(struct tegra_rtc_info *info)
71{
72 return readl(info->rtc_base + TEGRA_RTC_REG_BUSY) & 1;
73}
74
75/* Wait for hardware to be ready for writing.
76 * This function tries to maximize the amount of time before the next update.
77 * It does this by waiting for the RTC to become busy with its periodic update,
78 * then returning once the RTC first becomes not busy.
79 * This periodic update (where the seconds and milliseconds are copied to the
80 * AHB side) occurs every eight 32kHz clocks (~250uS).
81 * The behavior of this function allows us to make some assumptions without
82 * introducing a race, because 250uS is plenty of time to read/write a value.
83 */
84static int tegra_rtc_wait_while_busy(struct device *dev)
85{
86 struct tegra_rtc_info *info = dev_get_drvdata(dev);
87
88 int retries = 500; /* ~490 us is the worst case, ~250 us is best. */
89
90 /* first wait for the RTC to become busy. this is when it
91 * posts its updated seconds+msec registers to AHB side. */
92 while (tegra_rtc_check_busy(info)) {
93 if (!retries--)
94 goto retry_failed;
95 udelay(1);
96 }
97
98 /* now we have about 250 us to manipulate registers */
99 return 0;
100
101retry_failed:
102 dev_err(dev, "write failed:retry count exceeded.\n");
103 return -ETIMEDOUT;
104}
105
106static int tegra_rtc_read_time(struct device *dev, struct rtc_time *tm)
107{
108 struct tegra_rtc_info *info = dev_get_drvdata(dev);
109 unsigned long sec, msec;
110 unsigned long sl_irq_flags;
111
112 /* RTC hardware copies seconds to shadow seconds when a read
113 * of milliseconds occurs. use a lock to keep other threads out. */
114 spin_lock_irqsave(&info->tegra_rtc_lock, sl_irq_flags);
115
116 msec = readl(info->rtc_base + TEGRA_RTC_REG_MILLI_SECONDS);
117 sec = readl(info->rtc_base + TEGRA_RTC_REG_SHADOW_SECONDS);
118
119 spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags);
120
121 rtc_time_to_tm(sec, tm);
122
123 dev_vdbg(dev, "time read as %lu. %d/%d/%d %d:%02u:%02u\n",
124 sec,
125 tm->tm_mon + 1,
126 tm->tm_mday,
127 tm->tm_year + 1900,
128 tm->tm_hour,
129 tm->tm_min,
130 tm->tm_sec
131 );
132
133 return 0;
134}
135
136static int tegra_rtc_set_time(struct device *dev, struct rtc_time *tm)
137{
138 struct tegra_rtc_info *info = dev_get_drvdata(dev);
139 unsigned long sec;
140 int ret;
141
142 /* convert tm to seconds. */
143 ret = rtc_valid_tm(tm);
144 if (ret)
145 return ret;
146
147 rtc_tm_to_time(tm, &sec);
148
149 dev_vdbg(dev, "time set to %lu. %d/%d/%d %d:%02u:%02u\n",
150 sec,
151 tm->tm_mon+1,
152 tm->tm_mday,
153 tm->tm_year+1900,
154 tm->tm_hour,
155 tm->tm_min,
156 tm->tm_sec
157 );
158
159 /* seconds only written if wait succeeded. */
160 ret = tegra_rtc_wait_while_busy(dev);
161 if (!ret)
162 writel(sec, info->rtc_base + TEGRA_RTC_REG_SECONDS);
163
164 dev_vdbg(dev, "time read back as %d\n",
165 readl(info->rtc_base + TEGRA_RTC_REG_SECONDS));
166
167 return ret;
168}
169
170static int tegra_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
171{
172 struct tegra_rtc_info *info = dev_get_drvdata(dev);
173 unsigned long sec;
174 unsigned tmp;
175
176 sec = readl(info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0);
177
178 if (sec == 0) {
179 /* alarm is disabled. */
180 alarm->enabled = 0;
181 alarm->time.tm_mon = -1;
182 alarm->time.tm_mday = -1;
183 alarm->time.tm_year = -1;
184 alarm->time.tm_hour = -1;
185 alarm->time.tm_min = -1;
186 alarm->time.tm_sec = -1;
187 } else {
188 /* alarm is enabled. */
189 alarm->enabled = 1;
190 rtc_time_to_tm(sec, &alarm->time);
191 }
192
193 tmp = readl(info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
194 alarm->pending = (tmp & TEGRA_RTC_INTR_STATUS_SEC_ALARM0) != 0;
195
196 return 0;
197}
198
199static int tegra_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
200{
201 struct tegra_rtc_info *info = dev_get_drvdata(dev);
202 unsigned status;
203 unsigned long sl_irq_flags;
204
205 tegra_rtc_wait_while_busy(dev);
206 spin_lock_irqsave(&info->tegra_rtc_lock, sl_irq_flags);
207
208 /* read the original value, and OR in the flag. */
209 status = readl(info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
210 if (enabled)
211 status |= TEGRA_RTC_INTR_MASK_SEC_ALARM0; /* set it */
212 else
213 status &= ~TEGRA_RTC_INTR_MASK_SEC_ALARM0; /* clear it */
214
215 writel(status, info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
216
217 spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags);
218
219 return 0;
220}
221
222static int tegra_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
223{
224 struct tegra_rtc_info *info = dev_get_drvdata(dev);
225 unsigned long sec;
226
227 if (alarm->enabled)
228 rtc_tm_to_time(&alarm->time, &sec);
229 else
230 sec = 0;
231
232 tegra_rtc_wait_while_busy(dev);
233 writel(sec, info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0);
234 dev_vdbg(dev, "alarm read back as %d\n",
235 readl(info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0));
236
237 /* if successfully written and alarm is enabled ... */
238 if (sec) {
239 tegra_rtc_alarm_irq_enable(dev, 1);
240
241 dev_vdbg(dev, "alarm set as %lu. %d/%d/%d %d:%02u:%02u\n",
242 sec,
243 alarm->time.tm_mon+1,
244 alarm->time.tm_mday,
245 alarm->time.tm_year+1900,
246 alarm->time.tm_hour,
247 alarm->time.tm_min,
248 alarm->time.tm_sec);
249 } else {
250 /* disable alarm if 0 or write error. */
251 dev_vdbg(dev, "alarm disabled\n");
252 tegra_rtc_alarm_irq_enable(dev, 0);
253 }
254
255 return 0;
256}
257
258static int tegra_rtc_proc(struct device *dev, struct seq_file *seq)
259{
260 if (!dev || !dev->driver)
261 return 0;
262
263 return seq_printf(seq, "name\t\t: %s\n", dev_name(dev));
264}
265
266static irqreturn_t tegra_rtc_irq_handler(int irq, void *data)
267{
268 struct device *dev = data;
269 struct tegra_rtc_info *info = dev_get_drvdata(dev);
270 unsigned long events = 0;
271 unsigned status;
272 unsigned long sl_irq_flags;
273
274 status = readl(info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
275 if (status) {
276 /* clear the interrupt masks and status on any irq. */
277 tegra_rtc_wait_while_busy(dev);
278 spin_lock_irqsave(&info->tegra_rtc_lock, sl_irq_flags);
279 writel(0, info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
280 writel(status, info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
281 spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags);
282 }
283
284 /* check if Alarm */
285 if ((status & TEGRA_RTC_INTR_STATUS_SEC_ALARM0))
286 events |= RTC_IRQF | RTC_AF;
287
288 /* check if Periodic */
289 if ((status & TEGRA_RTC_INTR_STATUS_SEC_CDN_ALARM))
290 events |= RTC_IRQF | RTC_PF;
291
292 rtc_update_irq(info->rtc_dev, 1, events);
293
294 return IRQ_HANDLED;
295}
296
297static struct rtc_class_ops tegra_rtc_ops = {
298 .read_time = tegra_rtc_read_time,
299 .set_time = tegra_rtc_set_time,
300 .read_alarm = tegra_rtc_read_alarm,
301 .set_alarm = tegra_rtc_set_alarm,
302 .proc = tegra_rtc_proc,
303 .alarm_irq_enable = tegra_rtc_alarm_irq_enable,
304};
305
306static int __devinit tegra_rtc_probe(struct platform_device *pdev)
307{
308 struct tegra_rtc_info *info;
309 struct resource *res;
310 int ret;
311
312 info = kzalloc(sizeof(struct tegra_rtc_info), GFP_KERNEL);
313 if (!info)
314 return -ENOMEM;
315
316 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
317 if (!res) {
318 dev_err(&pdev->dev,
319 "Unable to allocate resources for device.\n");
320 ret = -EBUSY;
321 goto err_free_info;
322 }
323
324 if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
325 dev_err(&pdev->dev,
326 "Unable to request mem region for device.\n");
327 ret = -EBUSY;
328 goto err_free_info;
329 }
330
331 info->tegra_rtc_irq = platform_get_irq(pdev, 0);
332 if (info->tegra_rtc_irq <= 0) {
333 ret = -EBUSY;
334 goto err_release_mem_region;
335 }
336
337 info->rtc_base = ioremap_nocache(res->start, resource_size(res));
338 if (!info->rtc_base) {
339 dev_err(&pdev->dev, "Unable to grab IOs for device.\n");
340 ret = -EBUSY;
341 goto err_release_mem_region;
342 }
343
344 /* set context info. */
345 info->pdev = pdev;
346 info->tegra_rtc_lock = __SPIN_LOCK_UNLOCKED(info->tegra_rtc_lock);
347
348 platform_set_drvdata(pdev, info);
349
350 /* clear out the hardware. */
351 writel(0, info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0);
352 writel(0xffffffff, info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
353 writel(0, info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
354
355 device_init_wakeup(&pdev->dev, 1);
356
357 info->rtc_dev = rtc_device_register(
358 pdev->name, &pdev->dev, &tegra_rtc_ops, THIS_MODULE);
359 if (IS_ERR(info->rtc_dev)) {
360 ret = PTR_ERR(info->rtc_dev);
361 info->rtc_dev = NULL;
362 dev_err(&pdev->dev,
363 "Unable to register device (err=%d).\n",
364 ret);
365 goto err_iounmap;
366 }
367
368 ret = request_irq(info->tegra_rtc_irq, tegra_rtc_irq_handler,
369 IRQF_TRIGGER_HIGH, "rtc alarm", &pdev->dev);
370 if (ret) {
371 dev_err(&pdev->dev,
372 "Unable to request interrupt for device (err=%d).\n",
373 ret);
374 goto err_dev_unreg;
375 }
376
377 dev_notice(&pdev->dev, "Tegra internal Real Time Clock\n");
378
379 return 0;
380
381err_dev_unreg:
382 rtc_device_unregister(info->rtc_dev);
383err_iounmap:
384 iounmap(info->rtc_base);
385err_release_mem_region:
386 release_mem_region(res->start, resource_size(res));
387err_free_info:
388 kfree(info);
389
390 return ret;
391}
392
393static int __devexit tegra_rtc_remove(struct platform_device *pdev)
394{
395 struct tegra_rtc_info *info = platform_get_drvdata(pdev);
396 struct resource *res;
397
398 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
399 if (!res)
400 return -EBUSY;
401
402 free_irq(info->tegra_rtc_irq, &pdev->dev);
403 rtc_device_unregister(info->rtc_dev);
404 iounmap(info->rtc_base);
405 release_mem_region(res->start, resource_size(res));
406 kfree(info);
407
408 platform_set_drvdata(pdev, NULL);
409
410 return 0;
411}
412
413#ifdef CONFIG_PM
414static int tegra_rtc_suspend(struct platform_device *pdev, pm_message_t state)
415{
416 struct device *dev = &pdev->dev;
417 struct tegra_rtc_info *info = platform_get_drvdata(pdev);
418
419 tegra_rtc_wait_while_busy(dev);
420
421 /* only use ALARM0 as a wake source. */
422 writel(0xffffffff, info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
423 writel(TEGRA_RTC_INTR_STATUS_SEC_ALARM0,
424 info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
425
426 dev_vdbg(dev, "alarm sec = %d\n",
427 readl(info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0));
428
429 dev_vdbg(dev, "Suspend (device_may_wakeup=%d) irq:%d\n",
430 device_may_wakeup(dev), info->tegra_rtc_irq);
431
432 /* leave the alarms on as a wake source. */
433 if (device_may_wakeup(dev))
434 enable_irq_wake(info->tegra_rtc_irq);
435
436 return 0;
437}
438
439static int tegra_rtc_resume(struct platform_device *pdev)
440{
441 struct device *dev = &pdev->dev;
442 struct tegra_rtc_info *info = platform_get_drvdata(pdev);
443
444 dev_vdbg(dev, "Resume (device_may_wakeup=%d)\n",
445 device_may_wakeup(dev));
446 /* alarms were left on as a wake source, turn them off. */
447 if (device_may_wakeup(dev))
448 disable_irq_wake(info->tegra_rtc_irq);
449
450 return 0;
451}
452#endif
453
454static void tegra_rtc_shutdown(struct platform_device *pdev)
455{
456 dev_vdbg(&pdev->dev, "disabling interrupts.\n");
457 tegra_rtc_alarm_irq_enable(&pdev->dev, 0);
458}
459
460MODULE_ALIAS("platform:tegra_rtc");
461static struct platform_driver tegra_rtc_driver = {
462 .remove = __devexit_p(tegra_rtc_remove),
463 .shutdown = tegra_rtc_shutdown,
464 .driver = {
465 .name = "tegra_rtc",
466 .owner = THIS_MODULE,
467 },
468#ifdef CONFIG_PM
469 .suspend = tegra_rtc_suspend,
470 .resume = tegra_rtc_resume,
471#endif
472};
473
474static int __init tegra_rtc_init(void)
475{
476 return platform_driver_probe(&tegra_rtc_driver, tegra_rtc_probe);
477}
478module_init(tegra_rtc_init);
479
480static void __exit tegra_rtc_exit(void)
481{
482 platform_driver_unregister(&tegra_rtc_driver);
483}
484module_exit(tegra_rtc_exit);
485
486MODULE_AUTHOR("Jon Mayo <jmayo@nvidia.com>");
487MODULE_DESCRIPTION("driver for Tegra internal RTC");
488MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3be5db5d6343..7ff61d76b4c5 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -597,6 +597,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
597 break; 597 break;
598 598
599 default: 599 default:
600 ret = BLKPREP_KILL;
600 goto out; 601 goto out;
601 } 602 }
602 603
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index b90c2cf3e247..750fe5045efa 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -574,6 +574,7 @@ static const struct backlight_ops dcon_bl_ops = {
574 574
575static struct backlight_properties dcon_bl_props = { 575static struct backlight_properties dcon_bl_props = {
576 .max_brightness = 15, 576 .max_brightness = 15,
577 .type = BACKLIGHT_RAW,
577 .power = FB_BLANK_UNBLANK, 578 .power = FB_BLANK_UNBLANK,
578}; 579};
579 580
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
index 6607a89ccb4b..25294462b8b6 100644
--- a/drivers/staging/samsung-laptop/samsung-laptop.c
+++ b/drivers/staging/samsung-laptop/samsung-laptop.c
@@ -781,6 +781,7 @@ static int __init samsung_init(void)
781 781
782 /* create a backlight device to talk to this one */ 782 /* create a backlight device to talk to this one */
783 memset(&props, 0, sizeof(struct backlight_properties)); 783 memset(&props, 0, sizeof(struct backlight_properties));
784 props.type = BACKLIGHT_PLATFORM;
784 props.max_brightness = sabi_config->max_brightness; 785 props.max_brightness = sabi_config->max_brightness;
785 backlight_device = backlight_device_register("samsung", &sdev->dev, 786 backlight_device = backlight_device_register("samsung", &sdev->dev,
786 NULL, &backlight_ops, 787 NULL, &backlight_ops,
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index d8210ca00720..b9451219528b 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -322,7 +322,7 @@ void tty_schedule_flip(struct tty_struct *tty)
322 if (tty->buf.tail != NULL) 322 if (tty->buf.tail != NULL)
323 tty->buf.tail->commit = tty->buf.tail->used; 323 tty->buf.tail->commit = tty->buf.tail->used;
324 spin_unlock_irqrestore(&tty->buf.lock, flags); 324 spin_unlock_irqrestore(&tty->buf.lock, flags);
325 schedule_delayed_work(&tty->buf.work, 1); 325 schedule_work(&tty->buf.work);
326} 326}
327EXPORT_SYMBOL(tty_schedule_flip); 327EXPORT_SYMBOL(tty_schedule_flip);
328 328
@@ -402,7 +402,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
402static void flush_to_ldisc(struct work_struct *work) 402static void flush_to_ldisc(struct work_struct *work)
403{ 403{
404 struct tty_struct *tty = 404 struct tty_struct *tty =
405 container_of(work, struct tty_struct, buf.work.work); 405 container_of(work, struct tty_struct, buf.work);
406 unsigned long flags; 406 unsigned long flags;
407 struct tty_ldisc *disc; 407 struct tty_ldisc *disc;
408 408
@@ -443,7 +443,7 @@ static void flush_to_ldisc(struct work_struct *work)
443 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) 443 if (test_bit(TTY_FLUSHPENDING, &tty->flags))
444 break; 444 break;
445 if (!tty->receive_room || seen_tail) { 445 if (!tty->receive_room || seen_tail) {
446 schedule_delayed_work(&tty->buf.work, 1); 446 schedule_work(&tty->buf.work);
447 break; 447 break;
448 } 448 }
449 if (count > tty->receive_room) 449 if (count > tty->receive_room)
@@ -481,7 +481,7 @@ static void flush_to_ldisc(struct work_struct *work)
481 */ 481 */
482void tty_flush_to_ldisc(struct tty_struct *tty) 482void tty_flush_to_ldisc(struct tty_struct *tty)
483{ 483{
484 flush_delayed_work(&tty->buf.work); 484 flush_work(&tty->buf.work);
485} 485}
486 486
487/** 487/**
@@ -506,9 +506,9 @@ void tty_flip_buffer_push(struct tty_struct *tty)
506 spin_unlock_irqrestore(&tty->buf.lock, flags); 506 spin_unlock_irqrestore(&tty->buf.lock, flags);
507 507
508 if (tty->low_latency) 508 if (tty->low_latency)
509 flush_to_ldisc(&tty->buf.work.work); 509 flush_to_ldisc(&tty->buf.work);
510 else 510 else
511 schedule_delayed_work(&tty->buf.work, 1); 511 schedule_work(&tty->buf.work);
512} 512}
513EXPORT_SYMBOL(tty_flip_buffer_push); 513EXPORT_SYMBOL(tty_flip_buffer_push);
514 514
@@ -529,6 +529,6 @@ void tty_buffer_init(struct tty_struct *tty)
529 tty->buf.tail = NULL; 529 tty->buf.tail = NULL;
530 tty->buf.free = NULL; 530 tty->buf.free = NULL;
531 tty->buf.memory_used = 0; 531 tty->buf.memory_used = 0;
532 INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc); 532 INIT_WORK(&tty->buf.work, flush_to_ldisc);
533} 533}
534 534
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 0fc564a97706..e19e13647116 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -529,7 +529,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
529static int tty_ldisc_halt(struct tty_struct *tty) 529static int tty_ldisc_halt(struct tty_struct *tty)
530{ 530{
531 clear_bit(TTY_LDISC, &tty->flags); 531 clear_bit(TTY_LDISC, &tty->flags);
532 return cancel_delayed_work_sync(&tty->buf.work); 532 return cancel_work_sync(&tty->buf.work);
533} 533}
534 534
535/** 535/**
@@ -542,7 +542,7 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
542{ 542{
543 flush_work_sync(&tty->hangup_work); 543 flush_work_sync(&tty->hangup_work);
544 flush_work_sync(&tty->SAK_work); 544 flush_work_sync(&tty->SAK_work);
545 flush_delayed_work_sync(&tty->buf.work); 545 flush_work_sync(&tty->buf.work);
546} 546}
547 547
548/** 548/**
@@ -722,9 +722,9 @@ enable:
722 /* Restart the work queue in case no characters kick it off. Safe if 722 /* Restart the work queue in case no characters kick it off. Safe if
723 already running */ 723 already running */
724 if (work) 724 if (work)
725 schedule_delayed_work(&tty->buf.work, 1); 725 schedule_work(&tty->buf.work);
726 if (o_work) 726 if (o_work)
727 schedule_delayed_work(&o_tty->buf.work, 1); 727 schedule_work(&o_tty->buf.work);
728 mutex_unlock(&tty->ldisc_mutex); 728 mutex_unlock(&tty->ldisc_mutex);
729 tty_unlock(); 729 tty_unlock();
730 return retval; 730 return retval;
@@ -830,12 +830,12 @@ void tty_ldisc_hangup(struct tty_struct *tty)
830 830
831 /* 831 /*
832 * this is like tty_ldisc_halt, but we need to give up 832 * this is like tty_ldisc_halt, but we need to give up
833 * the BTM before calling cancel_delayed_work_sync, 833 * the BTM before calling cancel_work_sync, which may
834 * which may need to wait for another function taking the BTM 834 * need to wait for another function taking the BTM
835 */ 835 */
836 clear_bit(TTY_LDISC, &tty->flags); 836 clear_bit(TTY_LDISC, &tty->flags);
837 tty_unlock(); 837 tty_unlock();
838 cancel_delayed_work_sync(&tty->buf.work); 838 cancel_work_sync(&tty->buf.work);
839 mutex_unlock(&tty->ldisc_mutex); 839 mutex_unlock(&tty->ldisc_mutex);
840 840
841 tty_lock(); 841 tty_lock();
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 1fa6ce3e4a23..68ab460a735c 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -282,6 +282,7 @@ static int appledisplay_probe(struct usb_interface *iface,
282 snprintf(bl_name, sizeof(bl_name), "appledisplay%d", 282 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
283 atomic_inc_return(&count_displays) - 1); 283 atomic_inc_return(&count_displays) - 1);
284 memset(&props, 0, sizeof(struct backlight_properties)); 284 memset(&props, 0, sizeof(struct backlight_properties));
285 props.type = BACKLIGHT_RAW;
285 props.max_brightness = 0xff; 286 props.max_brightness = 0xff;
286 pdata->bd = backlight_device_register(bl_name, NULL, pdata, 287 pdata->bd = backlight_device_register(bl_name, NULL, pdata,
287 &appledisplay_bl_data, &props); 288 &appledisplay_bl_data, &props);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f616cefc95ba..2f7c76a85e53 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -60,6 +60,7 @@ static int move_iovec_hdr(struct iovec *from, struct iovec *to,
60{ 60{
61 int seg = 0; 61 int seg = 0;
62 size_t size; 62 size_t size;
63
63 while (len && seg < iov_count) { 64 while (len && seg < iov_count) {
64 size = min(from->iov_len, len); 65 size = min(from->iov_len, len);
65 to->iov_base = from->iov_base; 66 to->iov_base = from->iov_base;
@@ -79,6 +80,7 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
79{ 80{
80 int seg = 0; 81 int seg = 0;
81 size_t size; 82 size_t size;
83
82 while (len && seg < iovcount) { 84 while (len && seg < iovcount) {
83 size = min(from->iov_len, len); 85 size = min(from->iov_len, len);
84 to->iov_base = from->iov_base; 86 to->iov_base = from->iov_base;
@@ -211,12 +213,13 @@ static int peek_head_len(struct sock *sk)
211{ 213{
212 struct sk_buff *head; 214 struct sk_buff *head;
213 int len = 0; 215 int len = 0;
216 unsigned long flags;
214 217
215 lock_sock(sk); 218 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
216 head = skb_peek(&sk->sk_receive_queue); 219 head = skb_peek(&sk->sk_receive_queue);
217 if (head) 220 if (likely(head))
218 len = head->len; 221 len = head->len;
219 release_sock(sk); 222 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
220 return len; 223 return len;
221} 224}
222 225
@@ -227,6 +230,7 @@ static int peek_head_len(struct sock *sk)
227 * @iovcount - returned count of io vectors we fill 230 * @iovcount - returned count of io vectors we fill
228 * @log - vhost log 231 * @log - vhost log
229 * @log_num - log offset 232 * @log_num - log offset
233 * @quota - headcount quota, 1 for big buffer
230 * returns number of buffer heads allocated, negative on error 234 * returns number of buffer heads allocated, negative on error
231 */ 235 */
232static int get_rx_bufs(struct vhost_virtqueue *vq, 236static int get_rx_bufs(struct vhost_virtqueue *vq,
@@ -234,7 +238,8 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
234 int datalen, 238 int datalen,
235 unsigned *iovcount, 239 unsigned *iovcount,
236 struct vhost_log *log, 240 struct vhost_log *log,
237 unsigned *log_num) 241 unsigned *log_num,
242 unsigned int quota)
238{ 243{
239 unsigned int out, in; 244 unsigned int out, in;
240 int seg = 0; 245 int seg = 0;
@@ -242,7 +247,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
242 unsigned d; 247 unsigned d;
243 int r, nlogs = 0; 248 int r, nlogs = 0;
244 249
245 while (datalen > 0) { 250 while (datalen > 0 && headcount < quota) {
246 if (unlikely(seg >= UIO_MAXIOV)) { 251 if (unlikely(seg >= UIO_MAXIOV)) {
247 r = -ENOBUFS; 252 r = -ENOBUFS;
248 goto err; 253 goto err;
@@ -282,117 +287,7 @@ err:
282 287
283/* Expects to be always run from workqueue - which acts as 288/* Expects to be always run from workqueue - which acts as
284 * read-size critical section for our kind of RCU. */ 289 * read-size critical section for our kind of RCU. */
285static void handle_rx_big(struct vhost_net *net) 290static void handle_rx(struct vhost_net *net)
286{
287 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
288 unsigned out, in, log, s;
289 int head;
290 struct vhost_log *vq_log;
291 struct msghdr msg = {
292 .msg_name = NULL,
293 .msg_namelen = 0,
294 .msg_control = NULL, /* FIXME: get and handle RX aux data. */
295 .msg_controllen = 0,
296 .msg_iov = vq->iov,
297 .msg_flags = MSG_DONTWAIT,
298 };
299
300 struct virtio_net_hdr hdr = {
301 .flags = 0,
302 .gso_type = VIRTIO_NET_HDR_GSO_NONE
303 };
304
305 size_t len, total_len = 0;
306 int err;
307 size_t hdr_size;
308 /* TODO: check that we are running from vhost_worker? */
309 struct socket *sock = rcu_dereference_check(vq->private_data, 1);
310 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
311 return;
312
313 mutex_lock(&vq->mutex);
314 vhost_disable_notify(vq);
315 hdr_size = vq->vhost_hlen;
316
317 vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
318 vq->log : NULL;
319
320 for (;;) {
321 head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
322 ARRAY_SIZE(vq->iov),
323 &out, &in,
324 vq_log, &log);
325 /* On error, stop handling until the next kick. */
326 if (unlikely(head < 0))
327 break;
328 /* OK, now we need to know about added descriptors. */
329 if (head == vq->num) {
330 if (unlikely(vhost_enable_notify(vq))) {
331 /* They have slipped one in as we were
332 * doing that: check again. */
333 vhost_disable_notify(vq);
334 continue;
335 }
336 /* Nothing new? Wait for eventfd to tell us
337 * they refilled. */
338 break;
339 }
340 /* We don't need to be notified again. */
341 if (out) {
342 vq_err(vq, "Unexpected descriptor format for RX: "
343 "out %d, int %d\n",
344 out, in);
345 break;
346 }
347 /* Skip header. TODO: support TSO/mergeable rx buffers. */
348 s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, in);
349 msg.msg_iovlen = in;
350 len = iov_length(vq->iov, in);
351 /* Sanity check */
352 if (!len) {
353 vq_err(vq, "Unexpected header len for RX: "
354 "%zd expected %zd\n",
355 iov_length(vq->hdr, s), hdr_size);
356 break;
357 }
358 err = sock->ops->recvmsg(NULL, sock, &msg,
359 len, MSG_DONTWAIT | MSG_TRUNC);
360 /* TODO: Check specific error and bomb out unless EAGAIN? */
361 if (err < 0) {
362 vhost_discard_vq_desc(vq, 1);
363 break;
364 }
365 /* TODO: Should check and handle checksum. */
366 if (err > len) {
367 pr_debug("Discarded truncated rx packet: "
368 " len %d > %zd\n", err, len);
369 vhost_discard_vq_desc(vq, 1);
370 continue;
371 }
372 len = err;
373 err = memcpy_toiovec(vq->hdr, (unsigned char *)&hdr, hdr_size);
374 if (err) {
375 vq_err(vq, "Unable to write vnet_hdr at addr %p: %d\n",
376 vq->iov->iov_base, err);
377 break;
378 }
379 len += hdr_size;
380 vhost_add_used_and_signal(&net->dev, vq, head, len);
381 if (unlikely(vq_log))
382 vhost_log_write(vq, vq_log, log, len);
383 total_len += len;
384 if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
385 vhost_poll_queue(&vq->poll);
386 break;
387 }
388 }
389
390 mutex_unlock(&vq->mutex);
391}
392
393/* Expects to be always run from workqueue - which acts as
394 * read-size critical section for our kind of RCU. */
395static void handle_rx_mergeable(struct vhost_net *net)
396{ 291{
397 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; 292 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
398 unsigned uninitialized_var(in), log; 293 unsigned uninitialized_var(in), log;
@@ -405,19 +300,18 @@ static void handle_rx_mergeable(struct vhost_net *net)
405 .msg_iov = vq->iov, 300 .msg_iov = vq->iov,
406 .msg_flags = MSG_DONTWAIT, 301 .msg_flags = MSG_DONTWAIT,
407 }; 302 };
408
409 struct virtio_net_hdr_mrg_rxbuf hdr = { 303 struct virtio_net_hdr_mrg_rxbuf hdr = {
410 .hdr.flags = 0, 304 .hdr.flags = 0,
411 .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE 305 .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
412 }; 306 };
413
414 size_t total_len = 0; 307 size_t total_len = 0;
415 int err, headcount; 308 int err, headcount, mergeable;
416 size_t vhost_hlen, sock_hlen; 309 size_t vhost_hlen, sock_hlen;
417 size_t vhost_len, sock_len; 310 size_t vhost_len, sock_len;
418 /* TODO: check that we are running from vhost_worker? */ 311 /* TODO: check that we are running from vhost_worker? */
419 struct socket *sock = rcu_dereference_check(vq->private_data, 1); 312 struct socket *sock = rcu_dereference_check(vq->private_data, 1);
420 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) 313
314 if (!sock)
421 return; 315 return;
422 316
423 mutex_lock(&vq->mutex); 317 mutex_lock(&vq->mutex);
@@ -427,12 +321,14 @@ static void handle_rx_mergeable(struct vhost_net *net)
427 321
428 vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ? 322 vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
429 vq->log : NULL; 323 vq->log : NULL;
324 mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);
430 325
431 while ((sock_len = peek_head_len(sock->sk))) { 326 while ((sock_len = peek_head_len(sock->sk))) {
432 sock_len += sock_hlen; 327 sock_len += sock_hlen;
433 vhost_len = sock_len + vhost_hlen; 328 vhost_len = sock_len + vhost_hlen;
434 headcount = get_rx_bufs(vq, vq->heads, vhost_len, 329 headcount = get_rx_bufs(vq, vq->heads, vhost_len,
435 &in, vq_log, &log); 330 &in, vq_log, &log,
331 likely(mergeable) ? UIO_MAXIOV : 1);
436 /* On error, stop handling until the next kick. */ 332 /* On error, stop handling until the next kick. */
437 if (unlikely(headcount < 0)) 333 if (unlikely(headcount < 0))
438 break; 334 break;
@@ -476,7 +372,7 @@ static void handle_rx_mergeable(struct vhost_net *net)
476 break; 372 break;
477 } 373 }
478 /* TODO: Should check and handle checksum. */ 374 /* TODO: Should check and handle checksum. */
479 if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF) && 375 if (likely(mergeable) &&
480 memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount, 376 memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount,
481 offsetof(typeof(hdr), num_buffers), 377 offsetof(typeof(hdr), num_buffers),
482 sizeof hdr.num_buffers)) { 378 sizeof hdr.num_buffers)) {
@@ -498,14 +394,6 @@ static void handle_rx_mergeable(struct vhost_net *net)
498 mutex_unlock(&vq->mutex); 394 mutex_unlock(&vq->mutex);
499} 395}
500 396
501static void handle_rx(struct vhost_net *net)
502{
503 if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF))
504 handle_rx_mergeable(net);
505 else
506 handle_rx_big(net);
507}
508
509static void handle_tx_kick(struct vhost_work *work) 397static void handle_tx_kick(struct vhost_work *work)
510{ 398{
511 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 399 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -654,6 +542,7 @@ static struct socket *get_raw_socket(int fd)
654 } uaddr; 542 } uaddr;
655 int uaddr_len = sizeof uaddr, r; 543 int uaddr_len = sizeof uaddr, r;
656 struct socket *sock = sockfd_lookup(fd, &r); 544 struct socket *sock = sockfd_lookup(fd, &r);
545
657 if (!sock) 546 if (!sock)
658 return ERR_PTR(-ENOTSOCK); 547 return ERR_PTR(-ENOTSOCK);
659 548
@@ -682,6 +571,7 @@ static struct socket *get_tap_socket(int fd)
682{ 571{
683 struct file *file = fget(fd); 572 struct file *file = fget(fd);
684 struct socket *sock; 573 struct socket *sock;
574
685 if (!file) 575 if (!file)
686 return ERR_PTR(-EBADF); 576 return ERR_PTR(-EBADF);
687 sock = tun_get_socket(file); 577 sock = tun_get_socket(file);
@@ -696,6 +586,7 @@ static struct socket *get_tap_socket(int fd)
696static struct socket *get_socket(int fd) 586static struct socket *get_socket(int fd)
697{ 587{
698 struct socket *sock; 588 struct socket *sock;
589
699 /* special case to disable backend */ 590 /* special case to disable backend */
700 if (fd == -1) 591 if (fd == -1)
701 return NULL; 592 return NULL;
@@ -741,9 +632,9 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
741 oldsock = rcu_dereference_protected(vq->private_data, 632 oldsock = rcu_dereference_protected(vq->private_data,
742 lockdep_is_held(&vq->mutex)); 633 lockdep_is_held(&vq->mutex));
743 if (sock != oldsock) { 634 if (sock != oldsock) {
744 vhost_net_disable_vq(n, vq); 635 vhost_net_disable_vq(n, vq);
745 rcu_assign_pointer(vq->private_data, sock); 636 rcu_assign_pointer(vq->private_data, sock);
746 vhost_net_enable_vq(n, vq); 637 vhost_net_enable_vq(n, vq);
747 } 638 }
748 639
749 mutex_unlock(&vq->mutex); 640 mutex_unlock(&vq->mutex);
@@ -768,6 +659,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
768 struct socket *tx_sock = NULL; 659 struct socket *tx_sock = NULL;
769 struct socket *rx_sock = NULL; 660 struct socket *rx_sock = NULL;
770 long err; 661 long err;
662
771 mutex_lock(&n->dev.mutex); 663 mutex_lock(&n->dev.mutex);
772 err = vhost_dev_check_owner(&n->dev); 664 err = vhost_dev_check_owner(&n->dev);
773 if (err) 665 if (err)
@@ -829,6 +721,7 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
829 struct vhost_vring_file backend; 721 struct vhost_vring_file backend;
830 u64 features; 722 u64 features;
831 int r; 723 int r;
724
832 switch (ioctl) { 725 switch (ioctl) {
833 case VHOST_NET_SET_BACKEND: 726 case VHOST_NET_SET_BACKEND:
834 if (copy_from_user(&backend, argp, sizeof backend)) 727 if (copy_from_user(&backend, argp, sizeof backend))
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ade0568c07a4..2ab291241635 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -41,8 +41,8 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
41 poll_table *pt) 41 poll_table *pt)
42{ 42{
43 struct vhost_poll *poll; 43 struct vhost_poll *poll;
44 poll = container_of(pt, struct vhost_poll, table);
45 44
45 poll = container_of(pt, struct vhost_poll, table);
46 poll->wqh = wqh; 46 poll->wqh = wqh;
47 add_wait_queue(wqh, &poll->wait); 47 add_wait_queue(wqh, &poll->wait);
48} 48}
@@ -85,6 +85,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
85void vhost_poll_start(struct vhost_poll *poll, struct file *file) 85void vhost_poll_start(struct vhost_poll *poll, struct file *file)
86{ 86{
87 unsigned long mask; 87 unsigned long mask;
88
88 mask = file->f_op->poll(file, &poll->table); 89 mask = file->f_op->poll(file, &poll->table);
89 if (mask) 90 if (mask)
90 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); 91 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
@@ -101,6 +102,7 @@ static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
101 unsigned seq) 102 unsigned seq)
102{ 103{
103 int left; 104 int left;
105
104 spin_lock_irq(&dev->work_lock); 106 spin_lock_irq(&dev->work_lock);
105 left = seq - work->done_seq; 107 left = seq - work->done_seq;
106 spin_unlock_irq(&dev->work_lock); 108 spin_unlock_irq(&dev->work_lock);
@@ -222,6 +224,7 @@ static int vhost_worker(void *data)
222static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) 224static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
223{ 225{
224 int i; 226 int i;
227
225 for (i = 0; i < dev->nvqs; ++i) { 228 for (i = 0; i < dev->nvqs; ++i) {
226 dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect * 229 dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
227 UIO_MAXIOV, GFP_KERNEL); 230 UIO_MAXIOV, GFP_KERNEL);
@@ -235,6 +238,7 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
235 goto err_nomem; 238 goto err_nomem;
236 } 239 }
237 return 0; 240 return 0;
241
238err_nomem: 242err_nomem:
239 for (; i >= 0; --i) { 243 for (; i >= 0; --i) {
240 kfree(dev->vqs[i].indirect); 244 kfree(dev->vqs[i].indirect);
@@ -247,6 +251,7 @@ err_nomem:
247static void vhost_dev_free_iovecs(struct vhost_dev *dev) 251static void vhost_dev_free_iovecs(struct vhost_dev *dev)
248{ 252{
249 int i; 253 int i;
254
250 for (i = 0; i < dev->nvqs; ++i) { 255 for (i = 0; i < dev->nvqs; ++i) {
251 kfree(dev->vqs[i].indirect); 256 kfree(dev->vqs[i].indirect);
252 dev->vqs[i].indirect = NULL; 257 dev->vqs[i].indirect = NULL;
@@ -296,26 +301,28 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
296} 301}
297 302
298struct vhost_attach_cgroups_struct { 303struct vhost_attach_cgroups_struct {
299 struct vhost_work work; 304 struct vhost_work work;
300 struct task_struct *owner; 305 struct task_struct *owner;
301 int ret; 306 int ret;
302}; 307};
303 308
304static void vhost_attach_cgroups_work(struct vhost_work *work) 309static void vhost_attach_cgroups_work(struct vhost_work *work)
305{ 310{
306 struct vhost_attach_cgroups_struct *s; 311 struct vhost_attach_cgroups_struct *s;
307 s = container_of(work, struct vhost_attach_cgroups_struct, work); 312
308 s->ret = cgroup_attach_task_all(s->owner, current); 313 s = container_of(work, struct vhost_attach_cgroups_struct, work);
314 s->ret = cgroup_attach_task_all(s->owner, current);
309} 315}
310 316
311static int vhost_attach_cgroups(struct vhost_dev *dev) 317static int vhost_attach_cgroups(struct vhost_dev *dev)
312{ 318{
313 struct vhost_attach_cgroups_struct attach; 319 struct vhost_attach_cgroups_struct attach;
314 attach.owner = current; 320
315 vhost_work_init(&attach.work, vhost_attach_cgroups_work); 321 attach.owner = current;
316 vhost_work_queue(dev, &attach.work); 322 vhost_work_init(&attach.work, vhost_attach_cgroups_work);
317 vhost_work_flush(dev, &attach.work); 323 vhost_work_queue(dev, &attach.work);
318 return attach.ret; 324 vhost_work_flush(dev, &attach.work);
325 return attach.ret;
319} 326}
320 327
321/* Caller should have device mutex */ 328/* Caller should have device mutex */
@@ -323,11 +330,13 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
323{ 330{
324 struct task_struct *worker; 331 struct task_struct *worker;
325 int err; 332 int err;
333
326 /* Is there an owner already? */ 334 /* Is there an owner already? */
327 if (dev->mm) { 335 if (dev->mm) {
328 err = -EBUSY; 336 err = -EBUSY;
329 goto err_mm; 337 goto err_mm;
330 } 338 }
339
331 /* No owner, become one */ 340 /* No owner, become one */
332 dev->mm = get_task_mm(current); 341 dev->mm = get_task_mm(current);
333 worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid); 342 worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
@@ -380,6 +389,7 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
380void vhost_dev_cleanup(struct vhost_dev *dev) 389void vhost_dev_cleanup(struct vhost_dev *dev)
381{ 390{
382 int i; 391 int i;
392
383 for (i = 0; i < dev->nvqs; ++i) { 393 for (i = 0; i < dev->nvqs; ++i) {
384 if (dev->vqs[i].kick && dev->vqs[i].handle_kick) { 394 if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
385 vhost_poll_stop(&dev->vqs[i].poll); 395 vhost_poll_stop(&dev->vqs[i].poll);
@@ -421,6 +431,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
421static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) 431static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
422{ 432{
423 u64 a = addr / VHOST_PAGE_SIZE / 8; 433 u64 a = addr / VHOST_PAGE_SIZE / 8;
434
424 /* Make sure 64 bit math will not overflow. */ 435 /* Make sure 64 bit math will not overflow. */
425 if (a > ULONG_MAX - (unsigned long)log_base || 436 if (a > ULONG_MAX - (unsigned long)log_base ||
426 a + (unsigned long)log_base > ULONG_MAX) 437 a + (unsigned long)log_base > ULONG_MAX)
@@ -461,6 +472,7 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
461 int log_all) 472 int log_all)
462{ 473{
463 int i; 474 int i;
475
464 for (i = 0; i < d->nvqs; ++i) { 476 for (i = 0; i < d->nvqs; ++i) {
465 int ok; 477 int ok;
466 mutex_lock(&d->vqs[i].mutex); 478 mutex_lock(&d->vqs[i].mutex);
@@ -527,6 +539,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
527{ 539{
528 struct vhost_memory mem, *newmem, *oldmem; 540 struct vhost_memory mem, *newmem, *oldmem;
529 unsigned long size = offsetof(struct vhost_memory, regions); 541 unsigned long size = offsetof(struct vhost_memory, regions);
542
530 if (copy_from_user(&mem, m, size)) 543 if (copy_from_user(&mem, m, size))
531 return -EFAULT; 544 return -EFAULT;
532 if (mem.padding) 545 if (mem.padding)
@@ -544,7 +557,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
544 return -EFAULT; 557 return -EFAULT;
545 } 558 }
546 559
547 if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) { 560 if (!memory_access_ok(d, newmem,
561 vhost_has_feature(d, VHOST_F_LOG_ALL))) {
548 kfree(newmem); 562 kfree(newmem);
549 return -EFAULT; 563 return -EFAULT;
550 } 564 }
@@ -560,6 +574,7 @@ static int init_used(struct vhost_virtqueue *vq,
560 struct vring_used __user *used) 574 struct vring_used __user *used)
561{ 575{
562 int r = put_user(vq->used_flags, &used->flags); 576 int r = put_user(vq->used_flags, &used->flags);
577
563 if (r) 578 if (r)
564 return r; 579 return r;
565 return get_user(vq->last_used_idx, &used->idx); 580 return get_user(vq->last_used_idx, &used->idx);
@@ -849,6 +864,7 @@ static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
849{ 864{
850 struct vhost_memory_region *reg; 865 struct vhost_memory_region *reg;
851 int i; 866 int i;
867
852 /* linear search is not brilliant, but we really have on the order of 6 868 /* linear search is not brilliant, but we really have on the order of 6
853 * regions in practice */ 869 * regions in practice */
854 for (i = 0; i < mem->nregions; ++i) { 870 for (i = 0; i < mem->nregions; ++i) {
@@ -871,6 +887,7 @@ static int set_bit_to_user(int nr, void __user *addr)
871 void *base; 887 void *base;
872 int bit = nr + (log % PAGE_SIZE) * 8; 888 int bit = nr + (log % PAGE_SIZE) * 8;
873 int r; 889 int r;
890
874 r = get_user_pages_fast(log, 1, 1, &page); 891 r = get_user_pages_fast(log, 1, 1, &page);
875 if (r < 0) 892 if (r < 0)
876 return r; 893 return r;
@@ -888,6 +905,7 @@ static int log_write(void __user *log_base,
888{ 905{
889 u64 write_page = write_address / VHOST_PAGE_SIZE; 906 u64 write_page = write_address / VHOST_PAGE_SIZE;
890 int r; 907 int r;
908
891 if (!write_length) 909 if (!write_length)
892 return 0; 910 return 0;
893 write_length += write_address % VHOST_PAGE_SIZE; 911 write_length += write_address % VHOST_PAGE_SIZE;
@@ -1037,8 +1055,8 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1037 i, count); 1055 i, count);
1038 return -EINVAL; 1056 return -EINVAL;
1039 } 1057 }
1040 if (unlikely(memcpy_fromiovec((unsigned char *)&desc, vq->indirect, 1058 if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
1041 sizeof desc))) { 1059 vq->indirect, sizeof desc))) {
1042 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", 1060 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1043 i, (size_t)indirect->addr + i * sizeof desc); 1061 i, (size_t)indirect->addr + i * sizeof desc);
1044 return -EINVAL; 1062 return -EINVAL;
@@ -1153,7 +1171,7 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1153 i, vq->num, head); 1171 i, vq->num, head);
1154 return -EINVAL; 1172 return -EINVAL;
1155 } 1173 }
1156 ret = copy_from_user(&desc, vq->desc + i, sizeof desc); 1174 ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
1157 if (unlikely(ret)) { 1175 if (unlikely(ret)) {
1158 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", 1176 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1159 i, vq->desc + i); 1177 i, vq->desc + i);
@@ -1317,6 +1335,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1317void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1335void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1318{ 1336{
1319 __u16 flags; 1337 __u16 flags;
1338
1320 /* Flush out used index updates. This is paired 1339 /* Flush out used index updates. This is paired
1321 * with the barrier that the Guest executes when enabling 1340 * with the barrier that the Guest executes when enabling
1322 * interrupts. */ 1341 * interrupts. */
@@ -1361,6 +1380,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
1361{ 1380{
1362 u16 avail_idx; 1381 u16 avail_idx;
1363 int r; 1382 int r;
1383
1364 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) 1384 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1365 return false; 1385 return false;
1366 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; 1386 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
@@ -1387,6 +1407,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
1387void vhost_disable_notify(struct vhost_virtqueue *vq) 1407void vhost_disable_notify(struct vhost_virtqueue *vq)
1388{ 1408{
1389 int r; 1409 int r;
1410
1390 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) 1411 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1391 return; 1412 return;
1392 vq->used_flags |= VRING_USED_F_NO_NOTIFY; 1413 vq->used_flags |= VRING_USED_F_NO_NOTIFY;
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index bac163450216..4b4e8dadd6b2 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -127,6 +127,7 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo)
127 return; 127 return;
128 128
129 memset(&props, 0, sizeof(struct backlight_properties)); 129 memset(&props, 0, sizeof(struct backlight_properties));
130 props.type = BACKLIGHT_RAW;
130 props.max_brightness = 0xff; 131 props.max_brightness = 0xff;
131 bl = backlight_device_register("backlight", &sinfo->pdev->dev, sinfo, 132 bl = backlight_device_register("backlight", &sinfo->pdev->dev, sinfo,
132 &atmel_lcdc_bl_ops, &props); 133 &atmel_lcdc_bl_ops, &props);
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 4cb6a576c567..b0b2ac335347 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1818,6 +1818,7 @@ static void aty128_bl_init(struct aty128fb_par *par)
1818 snprintf(name, sizeof(name), "aty128bl%d", info->node); 1818 snprintf(name, sizeof(name), "aty128bl%d", info->node);
1819 1819
1820 memset(&props, 0, sizeof(struct backlight_properties)); 1820 memset(&props, 0, sizeof(struct backlight_properties));
1821 props.type = BACKLIGHT_RAW;
1821 props.max_brightness = FB_BACKLIGHT_LEVELS - 1; 1822 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
1822 bd = backlight_device_register(name, info->dev, par, &aty128_bl_data, 1823 bd = backlight_device_register(name, info->dev, par, &aty128_bl_data,
1823 &props); 1824 &props);
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 94e293fce1d2..d437b3daf1f5 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2241,6 +2241,7 @@ static void aty_bl_init(struct atyfb_par *par)
2241 snprintf(name, sizeof(name), "atybl%d", info->node); 2241 snprintf(name, sizeof(name), "atybl%d", info->node);
2242 2242
2243 memset(&props, 0, sizeof(struct backlight_properties)); 2243 memset(&props, 0, sizeof(struct backlight_properties));
2244 props.type = BACKLIGHT_RAW;
2244 props.max_brightness = FB_BACKLIGHT_LEVELS - 1; 2245 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
2245 bd = backlight_device_register(name, info->dev, par, &aty_bl_data, 2246 bd = backlight_device_register(name, info->dev, par, &aty_bl_data,
2246 &props); 2247 &props);
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index 9b811ddbce83..db572df7e1ef 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -158,6 +158,7 @@ void radeonfb_bl_init(struct radeonfb_info *rinfo)
158 snprintf(name, sizeof(name), "radeonbl%d", rinfo->info->node); 158 snprintf(name, sizeof(name), "radeonbl%d", rinfo->info->node);
159 159
160 memset(&props, 0, sizeof(struct backlight_properties)); 160 memset(&props, 0, sizeof(struct backlight_properties));
161 props.type = BACKLIGHT_RAW;
161 props.max_brightness = FB_BACKLIGHT_LEVELS - 1; 162 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
162 bd = backlight_device_register(name, rinfo->info->dev, pdata, 163 bd = backlight_device_register(name, rinfo->info->dev, pdata,
163 &radeon_bl_data, &props); 164 &radeon_bl_data, &props);
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index b224396b86d5..e59623a15f3f 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -227,6 +227,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
227 } 227 }
228 228
229 memset(&props, 0, sizeof(struct backlight_properties)); 229 memset(&props, 0, sizeof(struct backlight_properties));
230 props.type = BACKLIGHT_RAW;
230 props.max_brightness = MAX_BRIGHTNESS; 231 props.max_brightness = MAX_BRIGHTNESS;
231 bl = backlight_device_register(name, &pdev->dev, data, 232 bl = backlight_device_register(name, &pdev->dev, data,
232 &pm860x_backlight_ops, &props); 233 &pm860x_backlight_ops, &props);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index e54a337227ea..0c9373bedd1f 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -109,6 +109,14 @@ config LCD_S6E63M0
109 If you have an S6E63M0 LCD Panel, say Y to enable its 109 If you have an S6E63M0 LCD Panel, say Y to enable its
110 LCD control driver. 110 LCD control driver.
111 111
112config LCD_LD9040
113 tristate "LD9040 AMOLED LCD Driver"
114 depends on SPI && BACKLIGHT_CLASS_DEVICE
115 default n
116 help
117 If you have an LD9040 Panel, say Y to enable its
118 control driver.
119
112endif # LCD_CLASS_DEVICE 120endif # LCD_CLASS_DEVICE
113 121
114# 122#
@@ -236,12 +244,12 @@ config BACKLIGHT_MAX8925
236 If you have a LCD backlight connected to the WLED output of MAX8925 244 If you have a LCD backlight connected to the WLED output of MAX8925
237 WLED output, say Y here to enable this driver. 245 WLED output, say Y here to enable this driver.
238 246
239config BACKLIGHT_MBP_NVIDIA 247config BACKLIGHT_APPLE
240 tristate "MacBook Pro Nvidia Backlight Driver" 248 tristate "Apple Backlight Driver"
241 depends on X86 249 depends on X86 && ACPI
242 help 250 help
243 If you have an Apple Macbook Pro with Nvidia graphics hardware say Y 251 If you have an Intel-based Apple say Y to enable a driver for its
244 to enable a driver for its backlight 252 backlight.
245 253
246config BACKLIGHT_TOSA 254config BACKLIGHT_TOSA
247 tristate "Sharp SL-6000 Backlight Driver" 255 tristate "Sharp SL-6000 Backlight Driver"
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 44c0f81ad85d..b9ca8490df87 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
12obj-$(CONFIG_LCD_TDO24M) += tdo24m.o 12obj-$(CONFIG_LCD_TDO24M) += tdo24m.o
13obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o 13obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
14obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o 14obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o
15obj-$(CONFIG_LCD_LD9040) += ld9040.o
15 16
16obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o 17obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
17obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o 18obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
@@ -26,7 +27,7 @@ obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
26obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o 27obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
27obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o 28obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
28obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o 29obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
29obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o 30obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
30obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o 31obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
31obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o 32obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
32obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o 33obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 9f436e014f85..af3119707dbf 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -303,6 +303,7 @@ static int __devinit adp5520_bl_probe(struct platform_device *pdev)
303 mutex_init(&data->lock); 303 mutex_init(&data->lock);
304 304
305 memset(&props, 0, sizeof(struct backlight_properties)); 305 memset(&props, 0, sizeof(struct backlight_properties));
306 props.type = BACKLIGHT_RAW;
306 props.max_brightness = ADP5020_MAX_BRIGHTNESS; 307 props.max_brightness = ADP5020_MAX_BRIGHTNESS;
307 bl = backlight_device_register(pdev->name, data->master, data, 308 bl = backlight_device_register(pdev->name, data->master, data,
308 &adp5520_bl_ops, &props); 309 &adp5520_bl_ops, &props);
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 734c650a47c4..d2a96a421ffd 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -709,6 +709,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
709 i2c_set_clientdata(client, data); 709 i2c_set_clientdata(client, data);
710 710
711 memset(&props, 0, sizeof(props)); 711 memset(&props, 0, sizeof(props));
712 props.type = BACKLIGHT_RAW;
712 props.max_brightness = ADP8860_MAX_BRIGHTNESS; 713 props.max_brightness = ADP8860_MAX_BRIGHTNESS;
713 714
714 mutex_init(&data->lock); 715 mutex_init(&data->lock);
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
index fe9af129c5dd..c861c41af442 100644
--- a/drivers/video/backlight/adx_bl.c
+++ b/drivers/video/backlight/adx_bl.c
@@ -104,6 +104,7 @@ static int __devinit adx_backlight_probe(struct platform_device *pdev)
104 } 104 }
105 105
106 memset(&props, 0, sizeof(struct backlight_properties)); 106 memset(&props, 0, sizeof(struct backlight_properties));
107 props.type = BACKLIGHT_RAW;
107 props.max_brightness = 0xff; 108 props.max_brightness = 0xff;
108 bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, 109 bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev,
109 bl, &adx_backlight_ops, &props); 110 bl, &adx_backlight_ops, &props);
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
new file mode 100644
index 000000000000..be98d152b7fd
--- /dev/null
+++ b/drivers/video/backlight/apple_bl.c
@@ -0,0 +1,241 @@
1/*
2 * Backlight Driver for Intel-based Apples
3 *
4 * Copyright (c) Red Hat <mjg@redhat.com>
5 * Based on code from Pommed:
6 * Copyright (C) 2006 Nicolas Boichat <nicolas @boichat.ch>
7 * Copyright (C) 2006 Felipe Alfaro Solana <felipe_alfaro @linuxmail.org>
8 * Copyright (C) 2007 Julien BLACHE <jb@jblache.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This driver triggers SMIs which cause the firmware to change the
15 * backlight brightness. This is icky in many ways, but it's impractical to
16 * get at the firmware code in order to figure out what it's actually doing.
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/backlight.h>
23#include <linux/err.h>
24#include <linux/io.h>
25#include <linux/pci.h>
26#include <linux/acpi.h>
27
28static struct backlight_device *apple_backlight_device;
29
30struct hw_data {
31 /* I/O resource to allocate. */
32 unsigned long iostart;
33 unsigned long iolen;
34 /* Backlight operations structure. */
35 const struct backlight_ops backlight_ops;
36 void (*set_brightness)(int);
37};
38
39static const struct hw_data *hw_data;
40
41#define DRIVER "apple_backlight: "
42
43/* Module parameters. */
44static int debug;
45module_param_named(debug, debug, int, 0644);
46MODULE_PARM_DESC(debug, "Set to one to enable debugging messages.");
47
48/*
49 * Implementation for machines with Intel chipset.
50 */
51static void intel_chipset_set_brightness(int intensity)
52{
53 outb(0x04 | (intensity << 4), 0xb3);
54 outb(0xbf, 0xb2);
55}
56
57static int intel_chipset_send_intensity(struct backlight_device *bd)
58{
59 int intensity = bd->props.brightness;
60
61 if (debug)
62 printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
63 intensity);
64
65 intel_chipset_set_brightness(intensity);
66 return 0;
67}
68
69static int intel_chipset_get_intensity(struct backlight_device *bd)
70{
71 int intensity;
72
73 outb(0x03, 0xb3);
74 outb(0xbf, 0xb2);
75 intensity = inb(0xb3) >> 4;
76
77 if (debug)
78 printk(KERN_DEBUG DRIVER "read brightness of %d\n",
79 intensity);
80
81 return intensity;
82}
83
84static const struct hw_data intel_chipset_data = {
85 .iostart = 0xb2,
86 .iolen = 2,
87 .backlight_ops = {
88 .options = BL_CORE_SUSPENDRESUME,
89 .get_brightness = intel_chipset_get_intensity,
90 .update_status = intel_chipset_send_intensity,
91 },
92 .set_brightness = intel_chipset_set_brightness,
93};
94
95/*
96 * Implementation for machines with Nvidia chipset.
97 */
98static void nvidia_chipset_set_brightness(int intensity)
99{
100 outb(0x04 | (intensity << 4), 0x52f);
101 outb(0xbf, 0x52e);
102}
103
104static int nvidia_chipset_send_intensity(struct backlight_device *bd)
105{
106 int intensity = bd->props.brightness;
107
108 if (debug)
109 printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
110 intensity);
111
112 nvidia_chipset_set_brightness(intensity);
113 return 0;
114}
115
116static int nvidia_chipset_get_intensity(struct backlight_device *bd)
117{
118 int intensity;
119
120 outb(0x03, 0x52f);
121 outb(0xbf, 0x52e);
122 intensity = inb(0x52f) >> 4;
123
124 if (debug)
125 printk(KERN_DEBUG DRIVER "read brightness of %d\n",
126 intensity);
127
128 return intensity;
129}
130
131static const struct hw_data nvidia_chipset_data = {
132 .iostart = 0x52e,
133 .iolen = 2,
134 .backlight_ops = {
135 .options = BL_CORE_SUSPENDRESUME,
136 .get_brightness = nvidia_chipset_get_intensity,
137 .update_status = nvidia_chipset_send_intensity
138 },
139 .set_brightness = nvidia_chipset_set_brightness,
140};
141
142static int __devinit apple_bl_add(struct acpi_device *dev)
143{
144 struct backlight_properties props;
145 struct pci_dev *host;
146 int intensity;
147
148 host = pci_get_bus_and_slot(0, 0);
149
150 if (!host) {
151 printk(KERN_ERR DRIVER "unable to find PCI host\n");
152 return -ENODEV;
153 }
154
155 if (host->vendor == PCI_VENDOR_ID_INTEL)
156 hw_data = &intel_chipset_data;
157 else if (host->vendor == PCI_VENDOR_ID_NVIDIA)
158 hw_data = &nvidia_chipset_data;
159
160 pci_dev_put(host);
161
162 if (!hw_data) {
163 printk(KERN_ERR DRIVER "unknown hardware\n");
164 return -ENODEV;
165 }
166
167 /* Check that the hardware responds - this may not work under EFI */
168
169 intensity = hw_data->backlight_ops.get_brightness(NULL);
170
171 if (!intensity) {
172 hw_data->set_brightness(1);
173 if (!hw_data->backlight_ops.get_brightness(NULL))
174 return -ENODEV;
175
176 hw_data->set_brightness(0);
177 }
178
179 if (!request_region(hw_data->iostart, hw_data->iolen,
180 "Apple backlight"))
181 return -ENXIO;
182
183 memset(&props, 0, sizeof(struct backlight_properties));
184 props.type = BACKLIGHT_PLATFORM;
185 props.max_brightness = 15;
186 apple_backlight_device = backlight_device_register("apple_backlight",
187 NULL, NULL, &hw_data->backlight_ops, &props);
188
189 if (IS_ERR(apple_backlight_device)) {
190 release_region(hw_data->iostart, hw_data->iolen);
191 return PTR_ERR(apple_backlight_device);
192 }
193
194 apple_backlight_device->props.brightness =
195 hw_data->backlight_ops.get_brightness(apple_backlight_device);
196 backlight_update_status(apple_backlight_device);
197
198 return 0;
199}
200
201static int __devexit apple_bl_remove(struct acpi_device *dev, int type)
202{
203 backlight_device_unregister(apple_backlight_device);
204
205 release_region(hw_data->iostart, hw_data->iolen);
206 hw_data = NULL;
207 return 0;
208}
209
210static const struct acpi_device_id apple_bl_ids[] = {
211 {"APP0002", 0},
212 {"", 0},
213};
214
215static struct acpi_driver apple_bl_driver = {
216 .name = "Apple backlight",
217 .ids = apple_bl_ids,
218 .ops = {
219 .add = apple_bl_add,
220 .remove = apple_bl_remove,
221 },
222};
223
224static int __init apple_bl_init(void)
225{
226 return acpi_bus_register_driver(&apple_bl_driver);
227}
228
229static void __exit apple_bl_exit(void)
230{
231 acpi_bus_unregister_driver(&apple_bl_driver);
232}
233
234module_init(apple_bl_init);
235module_exit(apple_bl_exit);
236
237MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
238MODULE_DESCRIPTION("Apple Backlight Driver");
239MODULE_LICENSE("GPL");
240MODULE_DEVICE_TABLE(acpi, apple_bl_ids);
241MODULE_ALIAS("mbp_nvidia_bl");
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index e6a66dab088c..0443a4f71858 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -168,6 +168,7 @@ static int atmel_pwm_bl_probe(struct platform_device *pdev)
168 } 168 }
169 169
170 memset(&props, 0, sizeof(struct backlight_properties)); 170 memset(&props, 0, sizeof(struct backlight_properties));
171 props.type = BACKLIGHT_RAW;
171 props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min; 172 props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min;
172 bldev = backlight_device_register("atmel-pwm-bl", &pdev->dev, pwmbl, 173 bldev = backlight_device_register("atmel-pwm-bl", &pdev->dev, pwmbl,
173 &atmel_pwm_bl_ops, &props); 174 &atmel_pwm_bl_ops, &props);
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 08703299ef61..80d292fb92d8 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -19,6 +19,12 @@
19#include <asm/backlight.h> 19#include <asm/backlight.h>
20#endif 20#endif
21 21
22static const char const *backlight_types[] = {
23 [BACKLIGHT_RAW] = "raw",
24 [BACKLIGHT_PLATFORM] = "platform",
25 [BACKLIGHT_FIRMWARE] = "firmware",
26};
27
22#if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \ 28#if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \
23 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)) 29 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE))
24/* This callback gets called when something important happens inside a 30/* This callback gets called when something important happens inside a
@@ -169,6 +175,14 @@ static ssize_t backlight_store_brightness(struct device *dev,
169 return rc; 175 return rc;
170} 176}
171 177
178static ssize_t backlight_show_type(struct device *dev,
179 struct device_attribute *attr, char *buf)
180{
181 struct backlight_device *bd = to_backlight_device(dev);
182
183 return sprintf(buf, "%s\n", backlight_types[bd->props.type]);
184}
185
172static ssize_t backlight_show_max_brightness(struct device *dev, 186static ssize_t backlight_show_max_brightness(struct device *dev,
173 struct device_attribute *attr, char *buf) 187 struct device_attribute *attr, char *buf)
174{ 188{
@@ -234,6 +248,7 @@ static struct device_attribute bl_device_attributes[] = {
234 __ATTR(actual_brightness, 0444, backlight_show_actual_brightness, 248 __ATTR(actual_brightness, 0444, backlight_show_actual_brightness,
235 NULL), 249 NULL),
236 __ATTR(max_brightness, 0444, backlight_show_max_brightness, NULL), 250 __ATTR(max_brightness, 0444, backlight_show_max_brightness, NULL),
251 __ATTR(type, 0444, backlight_show_type, NULL),
237 __ATTR_NULL, 252 __ATTR_NULL,
238}; 253};
239 254
@@ -292,9 +307,16 @@ struct backlight_device *backlight_device_register(const char *name,
292 dev_set_drvdata(&new_bd->dev, devdata); 307 dev_set_drvdata(&new_bd->dev, devdata);
293 308
294 /* Set default properties */ 309 /* Set default properties */
295 if (props) 310 if (props) {
296 memcpy(&new_bd->props, props, 311 memcpy(&new_bd->props, props,
297 sizeof(struct backlight_properties)); 312 sizeof(struct backlight_properties));
313 if (props->type <= 0 || props->type >= BACKLIGHT_TYPE_MAX) {
314 WARN(1, "%s: invalid backlight type", name);
315 new_bd->props.type = BACKLIGHT_RAW;
316 }
317 } else {
318 new_bd->props.type = BACKLIGHT_RAW;
319 }
298 320
299 rc = device_register(&new_bd->dev); 321 rc = device_register(&new_bd->dev);
300 if (rc) { 322 if (rc) {
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 1e71c35083bb..af6098396fe6 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -562,6 +562,7 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
562 lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA; 562 lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA;
563 563
564 memset(&props, 0, sizeof(struct backlight_properties)); 564 memset(&props, 0, sizeof(struct backlight_properties));
565 props.type = BACKLIGHT_RAW;
565 props.max_brightness = pdata->max_intensity; 566 props.max_brightness = pdata->max_intensity;
566 lcd->bl_dev = backlight_device_register("corgi_bl", &spi->dev, lcd, 567 lcd->bl_dev = backlight_device_register("corgi_bl", &spi->dev, lcd,
567 &corgi_bl_ops, &props); 568 &corgi_bl_ops, &props);
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index 397d15eb1ea8..6c8c54041fae 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -193,6 +193,7 @@ static int cr_backlight_probe(struct platform_device *pdev)
193 } 193 }
194 194
195 memset(&props, 0, sizeof(struct backlight_properties)); 195 memset(&props, 0, sizeof(struct backlight_properties));
196 props.type = BACKLIGHT_RAW;
196 bdp = backlight_device_register("cr-backlight", &pdev->dev, NULL, 197 bdp = backlight_device_register("cr-backlight", &pdev->dev, NULL,
197 &cr_backlight_ops, &props); 198 &cr_backlight_ops, &props);
198 if (IS_ERR(bdp)) { 199 if (IS_ERR(bdp)) {
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 87659ed79bd7..62043f12a5a4 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -136,6 +136,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
136 da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2, 136 da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2,
137 DA9034_WLED_ISET(pdata->output_current)); 137 DA9034_WLED_ISET(pdata->output_current));
138 138
139 props.type = BACKLIGHT_RAW;
139 props.max_brightness = max_brightness; 140 props.max_brightness = max_brightness;
140 bl = backlight_device_register(pdev->name, data->da903x_dev, data, 141 bl = backlight_device_register(pdev->name, data->da903x_dev, data,
141 &da903x_backlight_ops, &props); 142 &da903x_backlight_ops, &props);
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index b0cc49184803..9f1e389d51d2 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -87,6 +87,7 @@ static int __init ep93xxbl_probe(struct platform_device *dev)
87 ep93xxbl->mmio = EP93XX_RASTER_BRIGHTNESS; 87 ep93xxbl->mmio = EP93XX_RASTER_BRIGHTNESS;
88 88
89 memset(&props, 0, sizeof(struct backlight_properties)); 89 memset(&props, 0, sizeof(struct backlight_properties));
90 props.type = BACKLIGHT_RAW;
90 props.max_brightness = EP93XX_MAX_BRIGHT; 91 props.max_brightness = EP93XX_MAX_BRIGHT;
91 bl = backlight_device_register(dev->name, &dev->dev, ep93xxbl, 92 bl = backlight_device_register(dev->name, &dev->dev, ep93xxbl,
92 &ep93xxbl_ops, &props); 93 &ep93xxbl_ops, &props);
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 312ca619735d..8c6befd65a33 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -91,6 +91,7 @@ static int genericbl_probe(struct platform_device *pdev)
91 name = machinfo->name; 91 name = machinfo->name;
92 92
93 memset(&props, 0, sizeof(struct backlight_properties)); 93 memset(&props, 0, sizeof(struct backlight_properties));
94 props.type = BACKLIGHT_RAW;
94 props.max_brightness = machinfo->max_intensity; 95 props.max_brightness = machinfo->max_intensity;
95 bd = backlight_device_register(name, &pdev->dev, NULL, &genericbl_ops, 96 bd = backlight_device_register(name, &pdev->dev, NULL, &genericbl_ops,
96 &props); 97 &props);
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 267d23f8d645..38aa00272141 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -109,6 +109,7 @@ static int __devinit hp680bl_probe(struct platform_device *pdev)
109 struct backlight_device *bd; 109 struct backlight_device *bd;
110 110
111 memset(&props, 0, sizeof(struct backlight_properties)); 111 memset(&props, 0, sizeof(struct backlight_properties));
112 props.type = BACKLIGHT_RAW;
112 props.max_brightness = HP680_MAX_INTENSITY; 113 props.max_brightness = HP680_MAX_INTENSITY;
113 bd = backlight_device_register("hp680-bl", &pdev->dev, NULL, 114 bd = backlight_device_register("hp680-bl", &pdev->dev, NULL,
114 &hp680bl_ops, &props); 115 &hp680bl_ops, &props);
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 2f177b3a4885..de65d80159be 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -106,6 +106,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
106 struct backlight_device *bd; 106 struct backlight_device *bd;
107 107
108 memset(&props, 0, sizeof(struct backlight_properties)); 108 memset(&props, 0, sizeof(struct backlight_properties));
109 props.type = BACKLIGHT_RAW;
109 props.max_brightness = BL_MAX_BRIGHT; 110 props.max_brightness = BL_MAX_BRIGHT;
110 bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL, 111 bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL,
111 &jornada_bl_ops, &props); 112 &jornada_bl_ops, &props);
@@ -146,12 +147,12 @@ static struct platform_driver jornada_bl_driver = {
146 }, 147 },
147}; 148};
148 149
149int __init jornada_bl_init(void) 150static int __init jornada_bl_init(void)
150{ 151{
151 return platform_driver_register(&jornada_bl_driver); 152 return platform_driver_register(&jornada_bl_driver);
152} 153}
153 154
154void __exit jornada_bl_exit(void) 155static void __exit jornada_bl_exit(void)
155{ 156{
156 platform_driver_unregister(&jornada_bl_driver); 157 platform_driver_unregister(&jornada_bl_driver);
157} 158}
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index cbbb167fd268..d2ff658b4144 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -135,12 +135,12 @@ static struct platform_driver jornada_lcd_driver = {
135 }, 135 },
136}; 136};
137 137
138int __init jornada_lcd_init(void) 138static int __init jornada_lcd_init(void)
139{ 139{
140 return platform_driver_register(&jornada_lcd_driver); 140 return platform_driver_register(&jornada_lcd_driver);
141} 141}
142 142
143void __exit jornada_lcd_exit(void) 143static void __exit jornada_lcd_exit(void)
144{ 144{
145 platform_driver_unregister(&jornada_lcd_driver); 145 platform_driver_unregister(&jornada_lcd_driver);
146} 146}
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index f439a8632287..72dd5556a35b 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -149,6 +149,7 @@ static int kb3886bl_probe(struct platform_device *pdev)
149 machinfo->limit_mask = -1; 149 machinfo->limit_mask = -1;
150 150
151 memset(&props, 0, sizeof(struct backlight_properties)); 151 memset(&props, 0, sizeof(struct backlight_properties));
152 props.type = BACKLIGHT_RAW;
152 props.max_brightness = machinfo->max_intensity; 153 props.max_brightness = machinfo->max_intensity;
153 kb3886_backlight_device = backlight_device_register("kb3886-bl", 154 kb3886_backlight_device = backlight_device_register("kb3886-bl",
154 &pdev->dev, NULL, 155 &pdev->dev, NULL,
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
new file mode 100644
index 000000000000..7281b2506a67
--- /dev/null
+++ b/drivers/video/backlight/ld9040.c
@@ -0,0 +1,819 @@
1/*
2 * ld9040 AMOLED LCD panel driver.
3 *
4 * Copyright (c) 2011 Samsung Electronics
5 * Author: Donghwa Lee <dh09.lee@samsung.com>
6 * Derived from drivers/video/backlight/s6e63m0.c
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/wait.h>
24#include <linux/fb.h>
25#include <linux/delay.h>
26#include <linux/gpio.h>
27#include <linux/spi/spi.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
30#include <linux/kernel.h>
31#include <linux/lcd.h>
32#include <linux/backlight.h>
33
34#include "ld9040_gamma.h"
35
36#define SLEEPMSEC 0x1000
37#define ENDDEF 0x2000
38#define DEFMASK 0xFF00
39#define COMMAND_ONLY 0xFE
40#define DATA_ONLY 0xFF
41
42#define MIN_BRIGHTNESS 0
43#define MAX_BRIGHTNESS 24
44#define power_is_on(pwr) ((pwr) <= FB_BLANK_NORMAL)
45
46struct ld9040 {
47 struct device *dev;
48 struct spi_device *spi;
49 unsigned int power;
50 unsigned int current_brightness;
51
52 struct lcd_device *ld;
53 struct backlight_device *bd;
54 struct lcd_platform_data *lcd_pd;
55};
56
57static const unsigned short seq_swreset[] = {
58 0x01, COMMAND_ONLY,
59 ENDDEF, 0x00
60};
61
62static const unsigned short seq_user_setting[] = {
63 0xF0, 0x5A,
64
65 DATA_ONLY, 0x5A,
66 ENDDEF, 0x00
67};
68
69static const unsigned short seq_elvss_on[] = {
70 0xB1, 0x0D,
71
72 DATA_ONLY, 0x00,
73 DATA_ONLY, 0x16,
74 ENDDEF, 0x00
75};
76
77static const unsigned short seq_gtcon[] = {
78 0xF7, 0x09,
79
80 DATA_ONLY, 0x00,
81 DATA_ONLY, 0x00,
82 ENDDEF, 0x00
83};
84
85static const unsigned short seq_panel_condition[] = {
86 0xF8, 0x05,
87
88 DATA_ONLY, 0x65,
89 DATA_ONLY, 0x96,
90 DATA_ONLY, 0x71,
91 DATA_ONLY, 0x7D,
92 DATA_ONLY, 0x19,
93 DATA_ONLY, 0x3B,
94 DATA_ONLY, 0x0D,
95 DATA_ONLY, 0x19,
96 DATA_ONLY, 0x7E,
97 DATA_ONLY, 0x0D,
98 DATA_ONLY, 0xE2,
99 DATA_ONLY, 0x00,
100 DATA_ONLY, 0x00,
101 DATA_ONLY, 0x7E,
102 DATA_ONLY, 0x7D,
103 DATA_ONLY, 0x07,
104 DATA_ONLY, 0x07,
105 DATA_ONLY, 0x20,
106 DATA_ONLY, 0x20,
107 DATA_ONLY, 0x20,
108 DATA_ONLY, 0x02,
109 DATA_ONLY, 0x02,
110 ENDDEF, 0x00
111};
112
113static const unsigned short seq_gamma_set1[] = {
114 0xF9, 0x00,
115
116 DATA_ONLY, 0xA7,
117 DATA_ONLY, 0xB4,
118 DATA_ONLY, 0xAE,
119 DATA_ONLY, 0xBF,
120 DATA_ONLY, 0x00,
121 DATA_ONLY, 0x91,
122 DATA_ONLY, 0x00,
123 DATA_ONLY, 0xB2,
124 DATA_ONLY, 0xB4,
125 DATA_ONLY, 0xAA,
126 DATA_ONLY, 0xBB,
127 DATA_ONLY, 0x00,
128 DATA_ONLY, 0xAC,
129 DATA_ONLY, 0x00,
130 DATA_ONLY, 0xB3,
131 DATA_ONLY, 0xB1,
132 DATA_ONLY, 0xAA,
133 DATA_ONLY, 0xBC,
134 DATA_ONLY, 0x00,
135 DATA_ONLY, 0xB3,
136 ENDDEF, 0x00
137};
138
139static const unsigned short seq_gamma_ctrl[] = {
140 0xFB, 0x02,
141
142 DATA_ONLY, 0x5A,
143 ENDDEF, 0x00
144};
145
146static const unsigned short seq_gamma_start[] = {
147 0xF9, COMMAND_ONLY,
148
149 ENDDEF, 0x00
150};
151
152static const unsigned short seq_apon[] = {
153 0xF3, 0x00,
154
155 DATA_ONLY, 0x00,
156 DATA_ONLY, 0x00,
157 DATA_ONLY, 0x0A,
158 DATA_ONLY, 0x02,
159 ENDDEF, 0x00
160};
161
162static const unsigned short seq_display_ctrl[] = {
163 0xF2, 0x02,
164
165 DATA_ONLY, 0x08,
166 DATA_ONLY, 0x08,
167 DATA_ONLY, 0x10,
168 DATA_ONLY, 0x10,
169 ENDDEF, 0x00
170};
171
172static const unsigned short seq_manual_pwr[] = {
173 0xB0, 0x04,
174 ENDDEF, 0x00
175};
176
177static const unsigned short seq_pwr_ctrl[] = {
178 0xF4, 0x0A,
179
180 DATA_ONLY, 0x87,
181 DATA_ONLY, 0x25,
182 DATA_ONLY, 0x6A,
183 DATA_ONLY, 0x44,
184 DATA_ONLY, 0x02,
185 DATA_ONLY, 0x88,
186 ENDDEF, 0x00
187};
188
189static const unsigned short seq_sleep_out[] = {
190 0x11, COMMAND_ONLY,
191 ENDDEF, 0x00
192};
193
194static const unsigned short seq_sleep_in[] = {
195 0x10, COMMAND_ONLY,
196 ENDDEF, 0x00
197};
198
199static const unsigned short seq_display_on[] = {
200 0x29, COMMAND_ONLY,
201 ENDDEF, 0x00
202};
203
204static const unsigned short seq_display_off[] = {
205 0x28, COMMAND_ONLY,
206 ENDDEF, 0x00
207};
208
209static const unsigned short seq_vci1_1st_en[] = {
210 0xF3, 0x10,
211
212 DATA_ONLY, 0x00,
213 DATA_ONLY, 0x00,
214 DATA_ONLY, 0x00,
215 DATA_ONLY, 0x02,
216 ENDDEF, 0x00
217};
218
219static const unsigned short seq_vl1_en[] = {
220 0xF3, 0x11,
221
222 DATA_ONLY, 0x00,
223 DATA_ONLY, 0x00,
224 DATA_ONLY, 0x00,
225 DATA_ONLY, 0x02,
226 ENDDEF, 0x00
227};
228
229static const unsigned short seq_vl2_en[] = {
230 0xF3, 0x13,
231
232 DATA_ONLY, 0x00,
233 DATA_ONLY, 0x00,
234 DATA_ONLY, 0x00,
235 DATA_ONLY, 0x02,
236 ENDDEF, 0x00
237};
238
239static const unsigned short seq_vci1_2nd_en[] = {
240 0xF3, 0x33,
241
242 DATA_ONLY, 0x00,
243 DATA_ONLY, 0x00,
244 DATA_ONLY, 0x00,
245 DATA_ONLY, 0x02,
246 ENDDEF, 0x00
247};
248
249static const unsigned short seq_vl3_en[] = {
250 0xF3, 0x37,
251
252 DATA_ONLY, 0x00,
253 DATA_ONLY, 0x00,
254 DATA_ONLY, 0x00,
255 DATA_ONLY, 0x02,
256 ENDDEF, 0x00
257};
258
259static const unsigned short seq_vreg1_amp_en[] = {
260 0xF3, 0x37,
261
262 DATA_ONLY, 0x01,
263 DATA_ONLY, 0x00,
264 DATA_ONLY, 0x00,
265 DATA_ONLY, 0x02,
266 ENDDEF, 0x00
267};
268
269static const unsigned short seq_vgh_amp_en[] = {
270 0xF3, 0x37,
271
272 DATA_ONLY, 0x11,
273 DATA_ONLY, 0x00,
274 DATA_ONLY, 0x00,
275 DATA_ONLY, 0x02,
276 ENDDEF, 0x00
277};
278
279static const unsigned short seq_vgl_amp_en[] = {
280 0xF3, 0x37,
281
282 DATA_ONLY, 0x31,
283 DATA_ONLY, 0x00,
284 DATA_ONLY, 0x00,
285 DATA_ONLY, 0x02,
286 ENDDEF, 0x00
287};
288
289static const unsigned short seq_vmos_amp_en[] = {
290 0xF3, 0x37,
291
292 DATA_ONLY, 0xB1,
293 DATA_ONLY, 0x00,
294 DATA_ONLY, 0x00,
295 DATA_ONLY, 0x03,
296 ENDDEF, 0x00
297};
298
299static const unsigned short seq_vint_amp_en[] = {
300 0xF3, 0x37,
301
302 DATA_ONLY, 0xF1,
303 /* DATA_ONLY, 0x71, VMOS/VBL/VBH not used */
304 DATA_ONLY, 0x00,
305 DATA_ONLY, 0x00,
306 DATA_ONLY, 0x03,
307 /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
308 ENDDEF, 0x00
309};
310
311static const unsigned short seq_vbh_amp_en[] = {
312 0xF3, 0x37,
313
314 DATA_ONLY, 0xF9,
315 DATA_ONLY, 0x00,
316 DATA_ONLY, 0x00,
317 DATA_ONLY, 0x03,
318 ENDDEF, 0x00
319};
320
321static const unsigned short seq_vbl_amp_en[] = {
322 0xF3, 0x37,
323
324 DATA_ONLY, 0xFD,
325 DATA_ONLY, 0x00,
326 DATA_ONLY, 0x00,
327 DATA_ONLY, 0x03,
328 ENDDEF, 0x00
329};
330
331static const unsigned short seq_gam_amp_en[] = {
332 0xF3, 0x37,
333
334 DATA_ONLY, 0xFF,
335 /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
336 DATA_ONLY, 0x00,
337 DATA_ONLY, 0x00,
338 DATA_ONLY, 0x03,
339 /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
340 ENDDEF, 0x00
341};
342
343static const unsigned short seq_sd_amp_en[] = {
344 0xF3, 0x37,
345
346 DATA_ONLY, 0xFF,
347 /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
348 DATA_ONLY, 0x80,
349 DATA_ONLY, 0x00,
350 DATA_ONLY, 0x03,
351 /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
352 ENDDEF, 0x00
353};
354
355static const unsigned short seq_gls_en[] = {
356 0xF3, 0x37,
357
358 DATA_ONLY, 0xFF,
359 /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
360 DATA_ONLY, 0x81,
361 DATA_ONLY, 0x00,
362 DATA_ONLY, 0x03,
363 /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
364 ENDDEF, 0x00
365};
366
367static const unsigned short seq_els_en[] = {
368 0xF3, 0x37,
369
370 DATA_ONLY, 0xFF,
371 /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
372 DATA_ONLY, 0x83,
373 DATA_ONLY, 0x00,
374 DATA_ONLY, 0x03,
375 /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
376 ENDDEF, 0x00
377};
378
379static const unsigned short seq_el_on[] = {
380 0xF3, 0x37,
381
382 DATA_ONLY, 0xFF,
383 /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
384 DATA_ONLY, 0x87,
385 DATA_ONLY, 0x00,
386 DATA_ONLY, 0x03,
387 /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
388 ENDDEF, 0x00
389};
390
391static int ld9040_spi_write_byte(struct ld9040 *lcd, int addr, int data)
392{
393 u16 buf[1];
394 struct spi_message msg;
395
396 struct spi_transfer xfer = {
397 .len = 2,
398 .tx_buf = buf,
399 };
400
401 buf[0] = (addr << 8) | data;
402
403 spi_message_init(&msg);
404 spi_message_add_tail(&xfer, &msg);
405
406 return spi_sync(lcd->spi, &msg);
407}
408
409static int ld9040_spi_write(struct ld9040 *lcd, unsigned char address,
410 unsigned char command)
411{
412 int ret = 0;
413
414 if (address != DATA_ONLY)
415 ret = ld9040_spi_write_byte(lcd, 0x0, address);
416 if (command != COMMAND_ONLY)
417 ret = ld9040_spi_write_byte(lcd, 0x1, command);
418
419 return ret;
420}
421
422static int ld9040_panel_send_sequence(struct ld9040 *lcd,
423 const unsigned short *wbuf)
424{
425 int ret = 0, i = 0;
426
427 while ((wbuf[i] & DEFMASK) != ENDDEF) {
428 if ((wbuf[i] & DEFMASK) != SLEEPMSEC) {
429 ret = ld9040_spi_write(lcd, wbuf[i], wbuf[i+1]);
430 if (ret)
431 break;
432 } else
433 udelay(wbuf[i+1]*1000);
434 i += 2;
435 }
436
437 return ret;
438}
439
440static int _ld9040_gamma_ctl(struct ld9040 *lcd, const unsigned int *gamma)
441{
442 unsigned int i = 0;
443 int ret = 0;
444
445 /* start gamma table updating. */
446 ret = ld9040_panel_send_sequence(lcd, seq_gamma_start);
447 if (ret) {
448 dev_err(lcd->dev, "failed to disable gamma table updating.\n");
449 goto gamma_err;
450 }
451
452 for (i = 0 ; i < GAMMA_TABLE_COUNT; i++) {
453 ret = ld9040_spi_write(lcd, DATA_ONLY, gamma[i]);
454 if (ret) {
455 dev_err(lcd->dev, "failed to set gamma table.\n");
456 goto gamma_err;
457 }
458 }
459
460 /* update gamma table. */
461 ret = ld9040_panel_send_sequence(lcd, seq_gamma_ctrl);
462 if (ret)
463 dev_err(lcd->dev, "failed to update gamma table.\n");
464
465gamma_err:
466 return ret;
467}
468
469static int ld9040_gamma_ctl(struct ld9040 *lcd, int gamma)
470{
471 int ret = 0;
472
473 ret = _ld9040_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
474
475 return ret;
476}
477
478
479static int ld9040_ldi_init(struct ld9040 *lcd)
480{
481 int ret, i;
482 static const unsigned short *init_seq[] = {
483 seq_user_setting,
484 seq_panel_condition,
485 seq_display_ctrl,
486 seq_manual_pwr,
487 seq_elvss_on,
488 seq_gtcon,
489 seq_gamma_set1,
490 seq_gamma_ctrl,
491 seq_sleep_out,
492 };
493
494 for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
495 ret = ld9040_panel_send_sequence(lcd, init_seq[i]);
496 /* workaround: minimum delay time for transferring CMD */
497 udelay(300);
498 if (ret)
499 break;
500 }
501
502 return ret;
503}
504
505static int ld9040_ldi_enable(struct ld9040 *lcd)
506{
507 int ret = 0;
508
509 ret = ld9040_panel_send_sequence(lcd, seq_display_on);
510
511 return ret;
512}
513
514static int ld9040_ldi_disable(struct ld9040 *lcd)
515{
516 int ret;
517
518 ret = ld9040_panel_send_sequence(lcd, seq_display_off);
519 ret = ld9040_panel_send_sequence(lcd, seq_sleep_in);
520
521 return ret;
522}
523
524static int ld9040_power_on(struct ld9040 *lcd)
525{
526 int ret = 0;
527 struct lcd_platform_data *pd = NULL;
528 pd = lcd->lcd_pd;
529 if (!pd) {
530 dev_err(lcd->dev, "platform data is NULL.\n");
531 return -EFAULT;
532 }
533
534 if (!pd->power_on) {
535 dev_err(lcd->dev, "power_on is NULL.\n");
536 return -EFAULT;
537 } else {
538 pd->power_on(lcd->ld, 1);
539 mdelay(pd->power_on_delay);
540 }
541
542 if (!pd->reset) {
543 dev_err(lcd->dev, "reset is NULL.\n");
544 return -EFAULT;
545 } else {
546 pd->reset(lcd->ld);
547 mdelay(pd->reset_delay);
548 }
549
550 ret = ld9040_ldi_init(lcd);
551 if (ret) {
552 dev_err(lcd->dev, "failed to initialize ldi.\n");
553 return ret;
554 }
555
556 ret = ld9040_ldi_enable(lcd);
557 if (ret) {
558 dev_err(lcd->dev, "failed to enable ldi.\n");
559 return ret;
560 }
561
562 return 0;
563}
564
565static int ld9040_power_off(struct ld9040 *lcd)
566{
567 int ret = 0;
568 struct lcd_platform_data *pd = NULL;
569
570 pd = lcd->lcd_pd;
571 if (!pd) {
572 dev_err(lcd->dev, "platform data is NULL.\n");
573 return -EFAULT;
574 }
575
576 ret = ld9040_ldi_disable(lcd);
577 if (ret) {
578 dev_err(lcd->dev, "lcd setting failed.\n");
579 return -EIO;
580 }
581
582 mdelay(pd->power_off_delay);
583
584 if (!pd->power_on) {
585 dev_err(lcd->dev, "power_on is NULL.\n");
586 return -EFAULT;
587 } else
588 pd->power_on(lcd->ld, 0);
589
590 return 0;
591}
592
593static int ld9040_power(struct ld9040 *lcd, int power)
594{
595 int ret = 0;
596
597 if (power_is_on(power) && !power_is_on(lcd->power))
598 ret = ld9040_power_on(lcd);
599 else if (!power_is_on(power) && power_is_on(lcd->power))
600 ret = ld9040_power_off(lcd);
601
602 if (!ret)
603 lcd->power = power;
604
605 return ret;
606}
607
608static int ld9040_set_power(struct lcd_device *ld, int power)
609{
610 struct ld9040 *lcd = lcd_get_data(ld);
611
612 if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
613 power != FB_BLANK_NORMAL) {
614 dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
615 return -EINVAL;
616 }
617
618 return ld9040_power(lcd, power);
619}
620
621static int ld9040_get_power(struct lcd_device *ld)
622{
623 struct ld9040 *lcd = lcd_get_data(ld);
624
625 return lcd->power;
626}
627
628static int ld9040_get_brightness(struct backlight_device *bd)
629{
630 return bd->props.brightness;
631}
632
633static int ld9040_set_brightness(struct backlight_device *bd)
634{
635 int ret = 0, brightness = bd->props.brightness;
636 struct ld9040 *lcd = bl_get_data(bd);
637
638 if (brightness < MIN_BRIGHTNESS ||
639 brightness > bd->props.max_brightness) {
640 dev_err(&bd->dev, "lcd brightness should be %d to %d.\n",
641 MIN_BRIGHTNESS, MAX_BRIGHTNESS);
642 return -EINVAL;
643 }
644
645 ret = ld9040_gamma_ctl(lcd, bd->props.brightness);
646 if (ret) {
647 dev_err(&bd->dev, "lcd brightness setting failed.\n");
648 return -EIO;
649 }
650
651 return ret;
652}
653
654static struct lcd_ops ld9040_lcd_ops = {
655 .set_power = ld9040_set_power,
656 .get_power = ld9040_get_power,
657};
658
659static const struct backlight_ops ld9040_backlight_ops = {
660 .get_brightness = ld9040_get_brightness,
661 .update_status = ld9040_set_brightness,
662};
663
664
665static int ld9040_probe(struct spi_device *spi)
666{
667 int ret = 0;
668 struct ld9040 *lcd = NULL;
669 struct lcd_device *ld = NULL;
670 struct backlight_device *bd = NULL;
671
672 lcd = kzalloc(sizeof(struct ld9040), GFP_KERNEL);
673 if (!lcd)
674 return -ENOMEM;
675
676 /* ld9040 lcd panel uses 3-wire 9bits SPI Mode. */
677 spi->bits_per_word = 9;
678
679 ret = spi_setup(spi);
680 if (ret < 0) {
681 dev_err(&spi->dev, "spi setup failed.\n");
682 goto out_free_lcd;
683 }
684
685 lcd->spi = spi;
686 lcd->dev = &spi->dev;
687
688 lcd->lcd_pd = spi->dev.platform_data;
689 if (!lcd->lcd_pd) {
690 dev_err(&spi->dev, "platform data is NULL.\n");
691 goto out_free_lcd;
692 }
693
694 ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
695 if (IS_ERR(ld)) {
696 ret = PTR_ERR(ld);
697 goto out_free_lcd;
698 }
699
700 lcd->ld = ld;
701
702 bd = backlight_device_register("ld9040-bl", &spi->dev,
703 lcd, &ld9040_backlight_ops, NULL);
704 if (IS_ERR(ld)) {
705 ret = PTR_ERR(ld);
706 goto out_free_lcd;
707 }
708
709 bd->props.max_brightness = MAX_BRIGHTNESS;
710 bd->props.brightness = MAX_BRIGHTNESS;
711 lcd->bd = bd;
712
713 /*
714 * if lcd panel was on from bootloader like u-boot then
715 * do not lcd on.
716 */
717 if (!lcd->lcd_pd->lcd_enabled) {
718 /*
719 * if lcd panel was off from bootloader then
720 * current lcd status is powerdown and then
721 * it enables lcd panel.
722 */
723 lcd->power = FB_BLANK_POWERDOWN;
724
725 ld9040_power(lcd, FB_BLANK_UNBLANK);
726 } else
727 lcd->power = FB_BLANK_UNBLANK;
728
729 dev_set_drvdata(&spi->dev, lcd);
730
731 dev_info(&spi->dev, "ld9040 panel driver has been probed.\n");
732 return 0;
733
734out_free_lcd:
735 kfree(lcd);
736 return ret;
737}
738
739static int __devexit ld9040_remove(struct spi_device *spi)
740{
741 struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
742
743 ld9040_power(lcd, FB_BLANK_POWERDOWN);
744 lcd_device_unregister(lcd->ld);
745 kfree(lcd);
746
747 return 0;
748}
749
750#if defined(CONFIG_PM)
751static int ld9040_suspend(struct spi_device *spi, pm_message_t mesg)
752{
753 int ret = 0;
754 struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
755
756 dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
757
758 /*
759 * when lcd panel is suspend, lcd panel becomes off
760 * regardless of status.
761 */
762 ret = ld9040_power(lcd, FB_BLANK_POWERDOWN);
763
764 return ret;
765}
766
767static int ld9040_resume(struct spi_device *spi)
768{
769 int ret = 0;
770 struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
771
772 lcd->power = FB_BLANK_POWERDOWN;
773
774 ret = ld9040_power(lcd, FB_BLANK_UNBLANK);
775
776 return ret;
777}
778#else
779#define ld9040_suspend NULL
780#define ld9040_resume NULL
781#endif
782
783/* Power down all displays on reboot, poweroff or halt. */
784static void ld9040_shutdown(struct spi_device *spi)
785{
786 struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
787
788 ld9040_power(lcd, FB_BLANK_POWERDOWN);
789}
790
791static struct spi_driver ld9040_driver = {
792 .driver = {
793 .name = "ld9040",
794 .bus = &spi_bus_type,
795 .owner = THIS_MODULE,
796 },
797 .probe = ld9040_probe,
798 .remove = __devexit_p(ld9040_remove),
799 .shutdown = ld9040_shutdown,
800 .suspend = ld9040_suspend,
801 .resume = ld9040_resume,
802};
803
804static int __init ld9040_init(void)
805{
806 return spi_register_driver(&ld9040_driver);
807}
808
809static void __exit ld9040_exit(void)
810{
811 spi_unregister_driver(&ld9040_driver);
812}
813
814module_init(ld9040_init);
815module_exit(ld9040_exit);
816
817MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
818MODULE_DESCRIPTION("ld9040 LCD Driver");
819MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/ld9040_gamma.h b/drivers/video/backlight/ld9040_gamma.h
new file mode 100644
index 000000000000..038d9c86ec03
--- /dev/null
+++ b/drivers/video/backlight/ld9040_gamma.h
@@ -0,0 +1,200 @@
1/*
2 * Gamma level definitions.
3 *
4 * Copyright (c) 2011 Samsung Electronics
5 * InKi Dae <inki.dae@samsung.com>
6 * Donghwa Lee <dh09.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef _LD9040_BRIGHTNESS_H
14#define _LD9040_BRIGHTNESS_H
15
16#define MAX_GAMMA_LEVEL 25
17#define GAMMA_TABLE_COUNT 21
18
19/* gamma value: 2.2 */
20static const unsigned int ld9040_22_300[] = {
21 0x00, 0xa7, 0xb4, 0xae, 0xbf, 0x00, 0x91,
22 0x00, 0xb2, 0xb4, 0xaa, 0xbb, 0x00, 0xac,
23 0x00, 0xb3, 0xb1, 0xaa, 0xbc, 0x00, 0xb3
24};
25
26static const unsigned int ld9040_22_290[] = {
27 0x00, 0xa9, 0xb7, 0xae, 0xbd, 0x00, 0x89,
28 0x00, 0xb7, 0xb6, 0xa8, 0xba, 0x00, 0xa4,
29 0x00, 0xb1, 0xb4, 0xaa, 0xbb, 0x00, 0xaa
30};
31
32static const unsigned int ld9040_22_280[] = {
33 0x00, 0xa9, 0xb6, 0xad, 0xbf, 0x00, 0x86,
34 0x00, 0xb8, 0xb5, 0xa8, 0xbc, 0x00, 0xa0,
35 0x00, 0xb3, 0xb3, 0xa9, 0xbc, 0x00, 0xa7
36};
37
38static const unsigned int ld9040_22_270[] = {
39 0x00, 0xa8, 0xb8, 0xae, 0xbe, 0x00, 0x84,
40 0x00, 0xb9, 0xb7, 0xa8, 0xbc, 0x00, 0x9d,
41 0x00, 0xb2, 0xb5, 0xaa, 0xbc, 0x00, 0xa4
42
43};
44static const unsigned int ld9040_22_260[] = {
45 0x00, 0xa4, 0xb8, 0xb0, 0xbf, 0x00, 0x80,
46 0x00, 0xb8, 0xb6, 0xaa, 0xbc, 0x00, 0x9a,
47 0x00, 0xb0, 0xb5, 0xab, 0xbd, 0x00, 0xa0
48};
49
50static const unsigned int ld9040_22_250[] = {
51 0x00, 0xa4, 0xb9, 0xaf, 0xc1, 0x00, 0x7d,
52 0x00, 0xb9, 0xb6, 0xaa, 0xbb, 0x00, 0x97,
53 0x00, 0xb1, 0xb5, 0xaa, 0xbf, 0x00, 0x9d
54};
55
56static const unsigned int ld9040_22_240[] = {
57 0x00, 0xa2, 0xb9, 0xaf, 0xc2, 0x00, 0x7a,
58 0x00, 0xb9, 0xb7, 0xaa, 0xbd, 0x00, 0x94,
59 0x00, 0xb0, 0xb5, 0xab, 0xbf, 0x00, 0x9a
60};
61
62static const unsigned int ld9040_22_230[] = {
63 0x00, 0xa0, 0xb9, 0xaf, 0xc3, 0x00, 0x77,
64 0x00, 0xb9, 0xb7, 0xab, 0xbe, 0x00, 0x90,
65 0x00, 0xb0, 0xb6, 0xab, 0xbf, 0x00, 0x97
66};
67
68static const unsigned int ld9040_22_220[] = {
69 0x00, 0x9e, 0xba, 0xb0, 0xc2, 0x00, 0x75,
70 0x00, 0xb9, 0xb8, 0xab, 0xbe, 0x00, 0x8e,
71 0x00, 0xb0, 0xb6, 0xac, 0xbf, 0x00, 0x94
72};
73
74static const unsigned int ld9040_22_210[] = {
75 0x00, 0x9c, 0xb9, 0xb0, 0xc4, 0x00, 0x72,
76 0x00, 0xb8, 0xb8, 0xac, 0xbf, 0x00, 0x8a,
77 0x00, 0xb0, 0xb6, 0xac, 0xc0, 0x00, 0x91
78};
79
80static const unsigned int ld9040_22_200[] = {
81 0x00, 0x9a, 0xba, 0xb1, 0xc4, 0x00, 0x6f,
82 0x00, 0xb8, 0xb8, 0xad, 0xc0, 0x00, 0x86,
83 0x00, 0xb0, 0xb7, 0xad, 0xc0, 0x00, 0x8d
84};
85
86static const unsigned int ld9040_22_190[] = {
87 0x00, 0x97, 0xba, 0xb2, 0xc5, 0x00, 0x6c,
88 0x00, 0xb8, 0xb8, 0xae, 0xc1, 0x00, 0x82,
89 0x00, 0xb0, 0xb6, 0xae, 0xc2, 0x00, 0x89
90};
91
92static const unsigned int ld9040_22_180[] = {
93 0x00, 0x93, 0xba, 0xb3, 0xc5, 0x00, 0x69,
94 0x00, 0xb8, 0xb9, 0xae, 0xc1, 0x00, 0x7f,
95 0x00, 0xb0, 0xb6, 0xae, 0xc3, 0x00, 0x85
96};
97
98static const unsigned int ld9040_22_170[] = {
99 0x00, 0x8b, 0xb9, 0xb3, 0xc7, 0x00, 0x65,
100 0x00, 0xb7, 0xb8, 0xaf, 0xc3, 0x00, 0x7a,
101 0x00, 0x80, 0xb6, 0xae, 0xc4, 0x00, 0x81
102};
103
104static const unsigned int ld9040_22_160[] = {
105 0x00, 0x89, 0xba, 0xb3, 0xc8, 0x00, 0x62,
106 0x00, 0xb6, 0xba, 0xaf, 0xc3, 0x00, 0x76,
107 0x00, 0xaf, 0xb7, 0xae, 0xc4, 0x00, 0x7e
108};
109
110static const unsigned int ld9040_22_150[] = {
111 0x00, 0x82, 0xba, 0xb4, 0xc7, 0x00, 0x5f,
112 0x00, 0xb5, 0xba, 0xb0, 0xc3, 0x00, 0x72,
113 0x00, 0xae, 0xb8, 0xb0, 0xc3, 0x00, 0x7a
114};
115
116static const unsigned int ld9040_22_140[] = {
117 0x00, 0x7b, 0xbb, 0xb4, 0xc8, 0x00, 0x5b,
118 0x00, 0xb5, 0xba, 0xb1, 0xc4, 0x00, 0x6e,
119 0x00, 0xae, 0xb9, 0xb0, 0xc5, 0x00, 0x75
120};
121
122static const unsigned int ld9040_22_130[] = {
123 0x00, 0x71, 0xbb, 0xb5, 0xc8, 0x00, 0x57,
124 0x00, 0xb5, 0xbb, 0xb0, 0xc5, 0x00, 0x6a,
125 0x00, 0xae, 0xb9, 0xb1, 0xc6, 0x00, 0x70
126};
127
128static const unsigned int ld9040_22_120[] = {
129 0x00, 0x47, 0xba, 0xb6, 0xca, 0x00, 0x53,
130 0x00, 0xb5, 0xbb, 0xb3, 0xc6, 0x00, 0x65,
131 0x00, 0xae, 0xb8, 0xb3, 0xc7, 0x00, 0x6c
132};
133
134static const unsigned int ld9040_22_110[] = {
135 0x00, 0x13, 0xbb, 0xb7, 0xca, 0x00, 0x4f,
136 0x00, 0xb4, 0xbb, 0xb3, 0xc7, 0x00, 0x60,
137 0x00, 0xad, 0xb8, 0xb4, 0xc7, 0x00, 0x67
138};
139
140static const unsigned int ld9040_22_100[] = {
141 0x00, 0x13, 0xba, 0xb8, 0xcb, 0x00, 0x4b,
142 0x00, 0xb3, 0xbc, 0xb4, 0xc7, 0x00, 0x5c,
143 0x00, 0xac, 0xb8, 0xb4, 0xc8, 0x00, 0x62
144};
145
146static const unsigned int ld9040_22_90[] = {
147 0x00, 0x13, 0xb9, 0xb8, 0xcd, 0x00, 0x46,
148 0x00, 0xb1, 0xbc, 0xb5, 0xc8, 0x00, 0x56,
149 0x00, 0xaa, 0xb8, 0xb4, 0xc9, 0x00, 0x5d
150};
151
152static const unsigned int ld9040_22_80[] = {
153 0x00, 0x13, 0xba, 0xb9, 0xcd, 0x00, 0x41,
154 0x00, 0xb0, 0xbe, 0xb5, 0xc9, 0x00, 0x51,
155 0x00, 0xa9, 0xb9, 0xb5, 0xca, 0x00, 0x57
156};
157
158static const unsigned int ld9040_22_70[] = {
159 0x00, 0x13, 0xb9, 0xb9, 0xd0, 0x00, 0x3c,
160 0x00, 0xaf, 0xbf, 0xb6, 0xcb, 0x00, 0x4b,
161 0x00, 0xa8, 0xb9, 0xb5, 0xcc, 0x00, 0x52
162};
163
164static const unsigned int ld9040_22_50[] = {
165 0x00, 0x13, 0xb2, 0xba, 0xd2, 0x00, 0x30,
166 0x00, 0xaf, 0xc0, 0xb8, 0xcd, 0x00, 0x3d,
167 0x00, 0xa8, 0xb8, 0xb7, 0xcd, 0x00, 0x44
168};
169
170struct ld9040_gamma {
171 unsigned int *gamma_22_table[MAX_GAMMA_LEVEL];
172} gamma_table = {
173 .gamma_22_table[0] = (unsigned int *)&ld9040_22_50,
174 .gamma_22_table[1] = (unsigned int *)&ld9040_22_70,
175 .gamma_22_table[2] = (unsigned int *)&ld9040_22_80,
176 .gamma_22_table[3] = (unsigned int *)&ld9040_22_90,
177 .gamma_22_table[4] = (unsigned int *)&ld9040_22_100,
178 .gamma_22_table[5] = (unsigned int *)&ld9040_22_110,
179 .gamma_22_table[6] = (unsigned int *)&ld9040_22_120,
180 .gamma_22_table[7] = (unsigned int *)&ld9040_22_130,
181 .gamma_22_table[8] = (unsigned int *)&ld9040_22_140,
182 .gamma_22_table[9] = (unsigned int *)&ld9040_22_150,
183 .gamma_22_table[10] = (unsigned int *)&ld9040_22_160,
184 .gamma_22_table[11] = (unsigned int *)&ld9040_22_170,
185 .gamma_22_table[12] = (unsigned int *)&ld9040_22_180,
186 .gamma_22_table[13] = (unsigned int *)&ld9040_22_190,
187 .gamma_22_table[14] = (unsigned int *)&ld9040_22_200,
188 .gamma_22_table[15] = (unsigned int *)&ld9040_22_210,
189 .gamma_22_table[16] = (unsigned int *)&ld9040_22_220,
190 .gamma_22_table[17] = (unsigned int *)&ld9040_22_230,
191 .gamma_22_table[18] = (unsigned int *)&ld9040_22_240,
192 .gamma_22_table[19] = (unsigned int *)&ld9040_22_250,
193 .gamma_22_table[20] = (unsigned int *)&ld9040_22_260,
194 .gamma_22_table[21] = (unsigned int *)&ld9040_22_270,
195 .gamma_22_table[22] = (unsigned int *)&ld9040_22_280,
196 .gamma_22_table[23] = (unsigned int *)&ld9040_22_290,
197 .gamma_22_table[24] = (unsigned int *)&ld9040_22_300,
198};
199
200#endif
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index d2f59015d517..bbca3127071e 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -184,6 +184,7 @@ static int locomolcd_probe(struct locomo_dev *ldev)
184 local_irq_restore(flags); 184 local_irq_restore(flags);
185 185
186 memset(&props, 0, sizeof(struct backlight_properties)); 186 memset(&props, 0, sizeof(struct backlight_properties));
187 props.type = BACKLIGHT_RAW;
187 props.max_brightness = 4; 188 props.max_brightness = 4;
188 locomolcd_bl_device = backlight_device_register("locomo-bl", 189 locomolcd_bl_device = backlight_device_register("locomo-bl",
189 &ldev->dev, NULL, 190 &ldev->dev, NULL,
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 209acc105cbc..07e8e273ced0 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -136,6 +136,7 @@ static int __devinit max8925_backlight_probe(struct platform_device *pdev)
136 data->current_brightness = 0; 136 data->current_brightness = 0;
137 137
138 memset(&props, 0, sizeof(struct backlight_properties)); 138 memset(&props, 0, sizeof(struct backlight_properties));
139 props.type = BACKLIGHT_RAW;
139 props.max_brightness = MAX_BRIGHTNESS; 140 props.max_brightness = MAX_BRIGHTNESS;
140 bl = backlight_device_register(name, &pdev->dev, data, 141 bl = backlight_device_register(name, &pdev->dev, data,
141 &max8925_backlight_ops, &props); 142 &max8925_backlight_ops, &props);
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
deleted file mode 100644
index 1485f7345f49..000000000000
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ /dev/null
@@ -1,400 +0,0 @@
1/*
2 * Backlight Driver for Nvidia 8600 in Macbook Pro
3 *
4 * Copyright (c) Red Hat <mjg@redhat.com>
5 * Based on code from Pommed:
6 * Copyright (C) 2006 Nicolas Boichat <nicolas @boichat.ch>
7 * Copyright (C) 2006 Felipe Alfaro Solana <felipe_alfaro @linuxmail.org>
8 * Copyright (C) 2007 Julien BLACHE <jb@jblache.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This driver triggers SMIs which cause the firmware to change the
15 * backlight brightness. This is icky in many ways, but it's impractical to
16 * get at the firmware code in order to figure out what it's actually doing.
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/backlight.h>
24#include <linux/err.h>
25#include <linux/dmi.h>
26#include <linux/io.h>
27
28static struct backlight_device *mbp_backlight_device;
29
30/* Structure to be passed to the DMI_MATCH function. */
31struct dmi_match_data {
32 /* I/O resource to allocate. */
33 unsigned long iostart;
34 unsigned long iolen;
35 /* Backlight operations structure. */
36 const struct backlight_ops backlight_ops;
37};
38
39/* Module parameters. */
40static int debug;
41module_param_named(debug, debug, int, 0644);
42MODULE_PARM_DESC(debug, "Set to one to enable debugging messages.");
43
44/*
45 * Implementation for MacBooks with Intel chipset.
46 */
47static int intel_chipset_send_intensity(struct backlight_device *bd)
48{
49 int intensity = bd->props.brightness;
50
51 if (debug)
52 printk(KERN_DEBUG "mbp_nvidia_bl: setting brightness to %d\n",
53 intensity);
54
55 outb(0x04 | (intensity << 4), 0xb3);
56 outb(0xbf, 0xb2);
57 return 0;
58}
59
60static int intel_chipset_get_intensity(struct backlight_device *bd)
61{
62 int intensity;
63
64 outb(0x03, 0xb3);
65 outb(0xbf, 0xb2);
66 intensity = inb(0xb3) >> 4;
67
68 if (debug)
69 printk(KERN_DEBUG "mbp_nvidia_bl: read brightness of %d\n",
70 intensity);
71
72 return intensity;
73}
74
75static const struct dmi_match_data intel_chipset_data = {
76 .iostart = 0xb2,
77 .iolen = 2,
78 .backlight_ops = {
79 .options = BL_CORE_SUSPENDRESUME,
80 .get_brightness = intel_chipset_get_intensity,
81 .update_status = intel_chipset_send_intensity,
82 }
83};
84
85/*
86 * Implementation for MacBooks with Nvidia chipset.
87 */
88static int nvidia_chipset_send_intensity(struct backlight_device *bd)
89{
90 int intensity = bd->props.brightness;
91
92 if (debug)
93 printk(KERN_DEBUG "mbp_nvidia_bl: setting brightness to %d\n",
94 intensity);
95
96 outb(0x04 | (intensity << 4), 0x52f);
97 outb(0xbf, 0x52e);
98 return 0;
99}
100
101static int nvidia_chipset_get_intensity(struct backlight_device *bd)
102{
103 int intensity;
104
105 outb(0x03, 0x52f);
106 outb(0xbf, 0x52e);
107 intensity = inb(0x52f) >> 4;
108
109 if (debug)
110 printk(KERN_DEBUG "mbp_nvidia_bl: read brightness of %d\n",
111 intensity);
112
113 return intensity;
114}
115
116static const struct dmi_match_data nvidia_chipset_data = {
117 .iostart = 0x52e,
118 .iolen = 2,
119 .backlight_ops = {
120 .options = BL_CORE_SUSPENDRESUME,
121 .get_brightness = nvidia_chipset_get_intensity,
122 .update_status = nvidia_chipset_send_intensity
123 }
124};
125
126/*
127 * DMI matching.
128 */
129static /* const */ struct dmi_match_data *driver_data;
130
131static int mbp_dmi_match(const struct dmi_system_id *id)
132{
133 driver_data = id->driver_data;
134
135 printk(KERN_INFO "mbp_nvidia_bl: %s detected\n", id->ident);
136 return 1;
137}
138
139static const struct dmi_system_id __initdata mbp_device_table[] = {
140 {
141 .callback = mbp_dmi_match,
142 .ident = "MacBook 1,1",
143 .matches = {
144 DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
145 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
146 },
147 .driver_data = (void *)&intel_chipset_data,
148 },
149 {
150 .callback = mbp_dmi_match,
151 .ident = "MacBook 2,1",
152 .matches = {
153 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
154 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"),
155 },
156 .driver_data = (void *)&intel_chipset_data,
157 },
158 {
159 .callback = mbp_dmi_match,
160 .ident = "MacBook 3,1",
161 .matches = {
162 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
163 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook3,1"),
164 },
165 .driver_data = (void *)&intel_chipset_data,
166 },
167 {
168 .callback = mbp_dmi_match,
169 .ident = "MacBook 4,1",
170 .matches = {
171 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
172 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,1"),
173 },
174 .driver_data = (void *)&intel_chipset_data,
175 },
176 {
177 .callback = mbp_dmi_match,
178 .ident = "MacBook 4,2",
179 .matches = {
180 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
181 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,2"),
182 },
183 .driver_data = (void *)&intel_chipset_data,
184 },
185 {
186 .callback = mbp_dmi_match,
187 .ident = "MacBookPro 1,1",
188 .matches = {
189 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
190 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,1"),
191 },
192 .driver_data = (void *)&intel_chipset_data,
193 },
194 {
195 .callback = mbp_dmi_match,
196 .ident = "MacBookPro 1,2",
197 .matches = {
198 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
199 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,2"),
200 },
201 .driver_data = (void *)&intel_chipset_data,
202 },
203 {
204 .callback = mbp_dmi_match,
205 .ident = "MacBookPro 2,1",
206 .matches = {
207 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
208 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,1"),
209 },
210 .driver_data = (void *)&intel_chipset_data,
211 },
212 {
213 .callback = mbp_dmi_match,
214 .ident = "MacBookPro 2,2",
215 .matches = {
216 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
217 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2"),
218 },
219 .driver_data = (void *)&intel_chipset_data,
220 },
221 {
222 .callback = mbp_dmi_match,
223 .ident = "MacBookPro 3,1",
224 .matches = {
225 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
226 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3,1"),
227 },
228 .driver_data = (void *)&intel_chipset_data,
229 },
230 {
231 .callback = mbp_dmi_match,
232 .ident = "MacBookPro 3,2",
233 .matches = {
234 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
235 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3,2"),
236 },
237 .driver_data = (void *)&intel_chipset_data,
238 },
239 {
240 .callback = mbp_dmi_match,
241 .ident = "MacBookPro 4,1",
242 .matches = {
243 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
244 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro4,1"),
245 },
246 .driver_data = (void *)&intel_chipset_data,
247 },
248 {
249 .callback = mbp_dmi_match,
250 .ident = "MacBookAir 1,1",
251 .matches = {
252 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
253 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir1,1"),
254 },
255 .driver_data = (void *)&intel_chipset_data,
256 },
257 {
258 .callback = mbp_dmi_match,
259 .ident = "MacBook 5,1",
260 .matches = {
261 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
262 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,1"),
263 },
264 .driver_data = (void *)&nvidia_chipset_data,
265 },
266 {
267 .callback = mbp_dmi_match,
268 .ident = "MacBook 5,2",
269 .matches = {
270 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
271 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
272 },
273 .driver_data = (void *)&nvidia_chipset_data,
274 },
275 {
276 .callback = mbp_dmi_match,
277 .ident = "MacBook 6,1",
278 .matches = {
279 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
280 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
281 },
282 .driver_data = (void *)&nvidia_chipset_data,
283 },
284 {
285 .callback = mbp_dmi_match,
286 .ident = "MacBookAir 2,1",
287 .matches = {
288 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
289 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir2,1"),
290 },
291 .driver_data = (void *)&nvidia_chipset_data,
292 },
293 {
294 .callback = mbp_dmi_match,
295 .ident = "MacBookPro 5,1",
296 .matches = {
297 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
298 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,1"),
299 },
300 .driver_data = (void *)&nvidia_chipset_data,
301 },
302 {
303 .callback = mbp_dmi_match,
304 .ident = "MacBookPro 5,2",
305 .matches = {
306 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
307 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,2"),
308 },
309 .driver_data = (void *)&nvidia_chipset_data,
310 },
311 {
312 .callback = mbp_dmi_match,
313 .ident = "MacBookPro 5,3",
314 .matches = {
315 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
316 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3"),
317 },
318 .driver_data = (void *)&nvidia_chipset_data,
319 },
320 {
321 .callback = mbp_dmi_match,
322 .ident = "MacBookPro 5,4",
323 .matches = {
324 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
325 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4"),
326 },
327 .driver_data = (void *)&nvidia_chipset_data,
328 },
329 {
330 .callback = mbp_dmi_match,
331 .ident = "MacBookPro 5,5",
332 .matches = {
333 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
334 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,5"),
335 },
336 .driver_data = (void *)&nvidia_chipset_data,
337 },
338 {
339 .callback = mbp_dmi_match,
340 .ident = "MacBookAir 3,1",
341 .matches = {
342 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
343 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,1"),
344 },
345 .driver_data = (void *)&nvidia_chipset_data,
346 },
347 {
348 .callback = mbp_dmi_match,
349 .ident = "MacBookAir 3,2",
350 .matches = {
351 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
352 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,2"),
353 },
354 .driver_data = (void *)&nvidia_chipset_data,
355 },
356 { }
357};
358
359static int __init mbp_init(void)
360{
361 struct backlight_properties props;
362 if (!dmi_check_system(mbp_device_table))
363 return -ENODEV;
364
365 if (!request_region(driver_data->iostart, driver_data->iolen,
366 "Macbook Pro backlight"))
367 return -ENXIO;
368
369 memset(&props, 0, sizeof(struct backlight_properties));
370 props.max_brightness = 15;
371 mbp_backlight_device = backlight_device_register("mbp_backlight", NULL,
372 NULL,
373 &driver_data->backlight_ops,
374 &props);
375 if (IS_ERR(mbp_backlight_device)) {
376 release_region(driver_data->iostart, driver_data->iolen);
377 return PTR_ERR(mbp_backlight_device);
378 }
379
380 mbp_backlight_device->props.brightness =
381 driver_data->backlight_ops.get_brightness(mbp_backlight_device);
382 backlight_update_status(mbp_backlight_device);
383
384 return 0;
385}
386
387static void __exit mbp_exit(void)
388{
389 backlight_device_unregister(mbp_backlight_device);
390
391 release_region(driver_data->iostart, driver_data->iolen);
392}
393
394module_init(mbp_init);
395module_exit(mbp_exit);
396
397MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
398MODULE_DESCRIPTION("Nvidia-based Macbook Pro Backlight Driver");
399MODULE_LICENSE("GPL");
400MODULE_DEVICE_TABLE(dmi, mbp_device_table);
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index d3bc56296c8d..08d26a72394c 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -146,6 +146,7 @@ static int omapbl_probe(struct platform_device *pdev)
146 return -ENOMEM; 146 return -ENOMEM;
147 147
148 memset(&props, 0, sizeof(struct backlight_properties)); 148 memset(&props, 0, sizeof(struct backlight_properties));
149 props.type = BACKLIGHT_RAW;
149 props.max_brightness = OMAPBL_MAX_INTENSITY; 150 props.max_brightness = OMAPBL_MAX_INTENSITY;
150 dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops, 151 dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops,
151 &props); 152 &props);
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index 3c424f7efdcc..ef5628d60563 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -112,6 +112,7 @@ static int __devinit pcf50633_bl_probe(struct platform_device *pdev)
112 if (!pcf_bl) 112 if (!pcf_bl)
113 return -ENOMEM; 113 return -ENOMEM;
114 114
115 bl_props.type = BACKLIGHT_RAW;
115 bl_props.max_brightness = 0x3f; 116 bl_props.max_brightness = 0x3f;
116 bl_props.power = FB_BLANK_UNBLANK; 117 bl_props.power = FB_BLANK_UNBLANK;
117 118
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 809278c90738..6af183d6465e 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -84,6 +84,7 @@ static int progearbl_probe(struct platform_device *pdev)
84 pci_write_config_byte(sb_dev, SB_MPS1, temp | 0x20); 84 pci_write_config_byte(sb_dev, SB_MPS1, temp | 0x20);
85 85
86 memset(&props, 0, sizeof(struct backlight_properties)); 86 memset(&props, 0, sizeof(struct backlight_properties));
87 props.type = BACKLIGHT_RAW;
87 props.max_brightness = HW_LEVEL_MAX - HW_LEVEL_MIN; 88 props.max_brightness = HW_LEVEL_MAX - HW_LEVEL_MIN;
88 progear_backlight_device = backlight_device_register("progear-bl", 89 progear_backlight_device = backlight_device_register("progear-bl",
89 &pdev->dev, NULL, 90 &pdev->dev, NULL,
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 21866ec69656..b8f38ec6eb18 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -28,6 +28,7 @@ struct pwm_bl_data {
28 unsigned int lth_brightness; 28 unsigned int lth_brightness;
29 int (*notify)(struct device *, 29 int (*notify)(struct device *,
30 int brightness); 30 int brightness);
31 int (*check_fb)(struct device *, struct fb_info *);
31}; 32};
32 33
33static int pwm_backlight_update_status(struct backlight_device *bl) 34static int pwm_backlight_update_status(struct backlight_device *bl)
@@ -62,9 +63,18 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
62 return bl->props.brightness; 63 return bl->props.brightness;
63} 64}
64 65
66static int pwm_backlight_check_fb(struct backlight_device *bl,
67 struct fb_info *info)
68{
69 struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
70
71 return !pb->check_fb || pb->check_fb(pb->dev, info);
72}
73
65static const struct backlight_ops pwm_backlight_ops = { 74static const struct backlight_ops pwm_backlight_ops = {
66 .update_status = pwm_backlight_update_status, 75 .update_status = pwm_backlight_update_status,
67 .get_brightness = pwm_backlight_get_brightness, 76 .get_brightness = pwm_backlight_get_brightness,
77 .check_fb = pwm_backlight_check_fb,
68}; 78};
69 79
70static int pwm_backlight_probe(struct platform_device *pdev) 80static int pwm_backlight_probe(struct platform_device *pdev)
@@ -95,6 +105,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
95 105
96 pb->period = data->pwm_period_ns; 106 pb->period = data->pwm_period_ns;
97 pb->notify = data->notify; 107 pb->notify = data->notify;
108 pb->check_fb = data->check_fb;
98 pb->lth_brightness = data->lth_brightness * 109 pb->lth_brightness = data->lth_brightness *
99 (data->pwm_period_ns / data->max_brightness); 110 (data->pwm_period_ns / data->max_brightness);
100 pb->dev = &pdev->dev; 111 pb->dev = &pdev->dev;
@@ -108,6 +119,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
108 dev_dbg(&pdev->dev, "got pwm for backlight\n"); 119 dev_dbg(&pdev->dev, "got pwm for backlight\n");
109 120
110 memset(&props, 0, sizeof(struct backlight_properties)); 121 memset(&props, 0, sizeof(struct backlight_properties));
122 props.type = BACKLIGHT_RAW;
111 props.max_brightness = data->max_brightness; 123 props.max_brightness = data->max_brightness;
112 bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, pb, 124 bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, pb,
113 &pwm_backlight_ops, &props); 125 &pwm_backlight_ops, &props);
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index 5927db0da999..322040f686c2 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -778,6 +778,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
778 778
779 bd->props.max_brightness = MAX_BRIGHTNESS; 779 bd->props.max_brightness = MAX_BRIGHTNESS;
780 bd->props.brightness = MAX_BRIGHTNESS; 780 bd->props.brightness = MAX_BRIGHTNESS;
781 bd->props.type = BACKLIGHT_RAW;
781 lcd->bd = bd; 782 lcd->bd = bd;
782 783
783 /* 784 /*
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 2a04b382ec48..425a7365470b 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -102,6 +102,7 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
102 data->i2c = client; 102 data->i2c = client;
103 103
104 memset(&props, 0, sizeof(struct backlight_properties)); 104 memset(&props, 0, sizeof(struct backlight_properties));
105 props.type = BACKLIGHT_RAW;
105 props.max_brightness = 512 - 1; 106 props.max_brightness = 512 - 1;
106 data->bl = backlight_device_register("tosa-bl", &client->dev, data, 107 data->bl = backlight_device_register("tosa-bl", &client->dev, data,
107 &bl_ops, &props); 108 &bl_ops, &props);
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 08fd87f3aecc..d4c6eb248ff9 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -193,6 +193,7 @@ static int wm831x_backlight_probe(struct platform_device *pdev)
193 data->current_brightness = 0; 193 data->current_brightness = 0;
194 data->isink_reg = isink_reg; 194 data->isink_reg = isink_reg;
195 195
196 props.type = BACKLIGHT_RAW;
196 props.max_brightness = max_isel; 197 props.max_brightness = max_isel;
197 bl = backlight_device_register("wm831x", &pdev->dev, data, 198 bl = backlight_device_register("wm831x", &pdev->dev, data,
198 &wm831x_backlight_ops, &props); 199 &wm831x_backlight_ops, &props);
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index e7d0f525041e..2464b910b590 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -649,6 +649,7 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
649 } 649 }
650#ifndef NO_BL_SUPPORT 650#ifndef NO_BL_SUPPORT
651 memset(&props, 0, sizeof(struct backlight_properties)); 651 memset(&props, 0, sizeof(struct backlight_properties));
652 props.type = BACKLIGHT_RAW;
652 props.max_brightness = 255; 653 props.max_brightness = 255;
653 bl_dev = backlight_device_register("bf54x-bl", NULL, NULL, 654 bl_dev = backlight_device_register("bf54x-bl", NULL, NULL,
654 &bfin_lq043fb_bl_ops, &props); 655 &bfin_lq043fb_bl_ops, &props);
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 3cf77676947c..d8de29f0dd8d 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -545,6 +545,7 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
545 } 545 }
546#ifndef NO_BL_SUPPORT 546#ifndef NO_BL_SUPPORT
547 memset(&props, 0, sizeof(struct backlight_properties)); 547 memset(&props, 0, sizeof(struct backlight_properties));
548 props.type = BACKLIGHT_RAW;
548 props.max_brightness = 255; 549 props.max_brightness = 255;
549 bl_dev = backlight_device_register("bf52x-bl", NULL, NULL, 550 bl_dev = backlight_device_register("bf52x-bl", NULL, NULL,
550 &bfin_lq043fb_bl_ops, &props); 551 &bfin_lq043fb_bl_ops, &props);
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 69bd4a581d4a..ef72cb483834 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -499,6 +499,7 @@ static void imxfb_init_backlight(struct imxfb_info *fbi)
499 499
500 memset(&props, 0, sizeof(struct backlight_properties)); 500 memset(&props, 0, sizeof(struct backlight_properties));
501 props.max_brightness = 0xff; 501 props.max_brightness = 0xff;
502 props.type = BACKLIGHT_RAW;
502 writel(fbi->pwmr, fbi->regs + LCDC_PWMR); 503 writel(fbi->pwmr, fbi->regs + LCDC_PWMR);
503 504
504 bl = backlight_device_register("imxfb-bl", &fbi->pdev->dev, fbi, 505 bl = backlight_device_register("imxfb-bl", &fbi->pdev->dev, fbi,
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
index 6aac6d1b937b..8471008aa6ff 100644
--- a/drivers/video/nvidia/nv_backlight.c
+++ b/drivers/video/nvidia/nv_backlight.c
@@ -111,6 +111,7 @@ void nvidia_bl_init(struct nvidia_par *par)
111 snprintf(name, sizeof(name), "nvidiabl%d", info->node); 111 snprintf(name, sizeof(name), "nvidiabl%d", info->node);
112 112
113 memset(&props, 0, sizeof(struct backlight_properties)); 113 memset(&props, 0, sizeof(struct backlight_properties));
114 props.type = BACKLIGHT_RAW;
114 props.max_brightness = FB_BACKLIGHT_LEVELS - 1; 115 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
115 bd = backlight_device_register(name, info->dev, par, &nvidia_bl_ops, 116 bd = backlight_device_register(name, info->dev, par, &nvidia_bl_ops,
116 &props); 117 &props);
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index e77310653207..7e04c921aa2a 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -534,6 +534,7 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
534 534
535 props.fb_blank = FB_BLANK_UNBLANK; 535 props.fb_blank = FB_BLANK_UNBLANK;
536 props.power = FB_BLANK_UNBLANK; 536 props.power = FB_BLANK_UNBLANK;
537 props.type = BACKLIGHT_RAW;
537 538
538 bldev = backlight_device_register("acx565akm", &md->spi->dev, 539 bldev = backlight_device_register("acx565akm", &md->spi->dev,
539 md, &acx565akm_bl_ops, &props); 540 md, &acx565akm_bl_ops, &props);
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
index 9a138f650e05..d2b35d2df2a6 100644
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -99,6 +99,7 @@ static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
99 99
100 memset(&props, 0, sizeof(struct backlight_properties)); 100 memset(&props, 0, sizeof(struct backlight_properties));
101 props.max_brightness = dssdev->max_backlight_level; 101 props.max_brightness = dssdev->max_backlight_level;
102 props.type = BACKLIGHT_RAW;
102 103
103 bl = backlight_device_register("sharp-ls", &dssdev->dev, dssdev, 104 bl = backlight_device_register("sharp-ls", &dssdev->dev, dssdev,
104 &sharp_ls_bl_ops, &props); 105 &sharp_ls_bl_ops, &props);
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 61026f96ad20..c74e8b778ba1 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -729,6 +729,8 @@ static int taal_probe(struct omap_dss_device *dssdev)
729 props.max_brightness = 255; 729 props.max_brightness = 255;
730 else 730 else
731 props.max_brightness = 127; 731 props.max_brightness = 127;
732
733 props.type = BACKLIGHT_RAW;
732 bldev = backlight_device_register("taal", &dssdev->dev, dssdev, 734 bldev = backlight_device_register("taal", &dssdev->dev, dssdev,
733 &taal_bl_ops, &props); 735 &taal_bl_ops, &props);
734 if (IS_ERR(bldev)) { 736 if (IS_ERR(bldev)) {
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index da388186d617..d8ab7be4fd6b 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -355,6 +355,7 @@ static void riva_bl_init(struct riva_par *par)
355 snprintf(name, sizeof(name), "rivabl%d", info->node); 355 snprintf(name, sizeof(name), "rivabl%d", info->node);
356 356
357 memset(&props, 0, sizeof(struct backlight_properties)); 357 memset(&props, 0, sizeof(struct backlight_properties));
358 props.type = BACKLIGHT_RAW;
358 props.max_brightness = FB_BACKLIGHT_LEVELS - 1; 359 props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
359 bd = backlight_device_register(name, info->dev, par, &riva_bl_ops, 360 bd = backlight_device_register(name, info->dev, par, &riva_bl_ops,
360 &props); 361 &props);
diff --git a/drivers/video/via/viafbdev.h b/drivers/video/via/viafbdev.h
index d66f963e930e..137996dc547e 100644
--- a/drivers/video/via/viafbdev.h
+++ b/drivers/video/via/viafbdev.h
@@ -94,9 +94,6 @@ extern int viafb_LCD_ON;
94extern int viafb_DVI_ON; 94extern int viafb_DVI_ON;
95extern int viafb_hotplug; 95extern int viafb_hotplug;
96 96
97extern int strict_strtoul(const char *cp, unsigned int base,
98 unsigned long *res);
99
100u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information 97u8 viafb_gpio_i2c_read_lvds(struct lvds_setting_information
101 *plvds_setting_info, struct lvds_chip_information 98 *plvds_setting_info, struct lvds_chip_information
102 *plvds_chip_info, u8 index); 99 *plvds_chip_info, u8 index);
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 515455296378..33aa116732c8 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -262,7 +262,7 @@ static int v9fs_xattr_get_acl(struct dentry *dentry, const char *name,
262 if (strcmp(name, "") != 0) 262 if (strcmp(name, "") != 0)
263 return -EINVAL; 263 return -EINVAL;
264 264
265 v9ses = v9fs_inode2v9ses(dentry->d_inode); 265 v9ses = v9fs_dentry2v9ses(dentry);
266 /* 266 /*
267 * We allow set/get/list of acl when access=client is not specified 267 * We allow set/get/list of acl when access=client is not specified
268 */ 268 */
@@ -312,7 +312,7 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
312 if (strcmp(name, "") != 0) 312 if (strcmp(name, "") != 0)
313 return -EINVAL; 313 return -EINVAL;
314 314
315 v9ses = v9fs_inode2v9ses(dentry->d_inode); 315 v9ses = v9fs_dentry2v9ses(dentry);
316 /* 316 /*
317 * set the attribute on the remote. Without even looking at the 317 * set the attribute on the remote. Without even looking at the
318 * xattr value. We leave it to the server to validate 318 * xattr value. We leave it to the server to validate
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index cd63e002d826..0ee594569dcc 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -134,7 +134,7 @@ static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry,
134 struct v9fs_session_info *v9ses; 134 struct v9fs_session_info *v9ses;
135 struct p9_fid *fid, *old_fid = NULL; 135 struct p9_fid *fid, *old_fid = NULL;
136 136
137 v9ses = v9fs_inode2v9ses(dentry->d_inode); 137 v9ses = v9fs_dentry2v9ses(dentry);
138 access = v9ses->flags & V9FS_ACCESS_MASK; 138 access = v9ses->flags & V9FS_ACCESS_MASK;
139 fid = v9fs_fid_find(dentry, uid, any); 139 fid = v9fs_fid_find(dentry, uid, any);
140 if (fid) 140 if (fid)
@@ -237,7 +237,7 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
237 int any, access; 237 int any, access;
238 struct v9fs_session_info *v9ses; 238 struct v9fs_session_info *v9ses;
239 239
240 v9ses = v9fs_inode2v9ses(dentry->d_inode); 240 v9ses = v9fs_dentry2v9ses(dentry);
241 access = v9ses->flags & V9FS_ACCESS_MASK; 241 access = v9ses->flags & V9FS_ACCESS_MASK;
242 switch (access) { 242 switch (access) {
243 case V9FS_ACCESS_SINGLE: 243 case V9FS_ACCESS_SINGLE:
@@ -286,9 +286,11 @@ static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, uid_t uid)
286 286
287struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) 287struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
288{ 288{
289 int err; 289 int err, flags;
290 struct p9_fid *fid; 290 struct p9_fid *fid;
291 struct v9fs_session_info *v9ses;
291 292
293 v9ses = v9fs_dentry2v9ses(dentry);
292 fid = v9fs_fid_clone_with_uid(dentry, 0); 294 fid = v9fs_fid_clone_with_uid(dentry, 0);
293 if (IS_ERR(fid)) 295 if (IS_ERR(fid))
294 goto error_out; 296 goto error_out;
@@ -297,8 +299,17 @@ struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
297 * dirty pages. We always request for the open fid in read-write 299 * dirty pages. We always request for the open fid in read-write
298 * mode so that a partial page write which result in page 300 * mode so that a partial page write which result in page
299 * read can work. 301 * read can work.
302 *
303 * we don't have a tsyncfs operation for older version
304 * of protocol. So make sure the write back fid is
305 * opened in O_SYNC mode.
300 */ 306 */
301 err = p9_client_open(fid, O_RDWR); 307 if (!v9fs_proto_dotl(v9ses))
308 flags = O_RDWR | O_SYNC;
309 else
310 flags = O_RDWR;
311
312 err = p9_client_open(fid, flags);
302 if (err < 0) { 313 if (err < 0) {
303 p9_client_clunk(fid); 314 p9_client_clunk(fid);
304 fid = ERR_PTR(err); 315 fid = ERR_PTR(err);
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index bd8496db135b..9665c2b840e6 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -130,6 +130,7 @@ struct v9fs_inode {
130#endif 130#endif
131 unsigned int cache_validity; 131 unsigned int cache_validity;
132 struct p9_fid *writeback_fid; 132 struct p9_fid *writeback_fid;
133 struct mutex v_mutex;
133 struct inode vfs_inode; 134 struct inode vfs_inode;
134}; 135};
135 136
@@ -173,6 +174,11 @@ static inline struct v9fs_session_info *v9fs_inode2v9ses(struct inode *inode)
173 return (inode->i_sb->s_fs_info); 174 return (inode->i_sb->s_fs_info);
174} 175}
175 176
177static inline struct v9fs_session_info *v9fs_dentry2v9ses(struct dentry *dentry)
178{
179 return dentry->d_sb->s_fs_info;
180}
181
176static inline int v9fs_proto_dotu(struct v9fs_session_info *v9ses) 182static inline int v9fs_proto_dotu(struct v9fs_session_info *v9ses)
177{ 183{
178 return v9ses->flags & V9FS_PROTO_2000U; 184 return v9ses->flags & V9FS_PROTO_2000U;
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 78bcb97c3425..ffed55817f0c 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -90,7 +90,9 @@ int v9fs_file_open(struct inode *inode, struct file *file)
90 } 90 }
91 91
92 file->private_data = fid; 92 file->private_data = fid;
93 if (v9ses->cache && !v9inode->writeback_fid) { 93 mutex_lock(&v9inode->v_mutex);
94 if (v9ses->cache && !v9inode->writeback_fid &&
95 ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
94 /* 96 /*
95 * clone a fid and add it to writeback_fid 97 * clone a fid and add it to writeback_fid
96 * we do it during open time instead of 98 * we do it during open time instead of
@@ -101,10 +103,12 @@ int v9fs_file_open(struct inode *inode, struct file *file)
101 fid = v9fs_writeback_fid(file->f_path.dentry); 103 fid = v9fs_writeback_fid(file->f_path.dentry);
102 if (IS_ERR(fid)) { 104 if (IS_ERR(fid)) {
103 err = PTR_ERR(fid); 105 err = PTR_ERR(fid);
106 mutex_unlock(&v9inode->v_mutex);
104 goto out_error; 107 goto out_error;
105 } 108 }
106 v9inode->writeback_fid = (void *) fid; 109 v9inode->writeback_fid = (void *) fid;
107 } 110 }
111 mutex_unlock(&v9inode->v_mutex);
108#ifdef CONFIG_9P_FSCACHE 112#ifdef CONFIG_9P_FSCACHE
109 if (v9ses->cache) 113 if (v9ses->cache)
110 v9fs_cache_inode_set_cookie(inode, file); 114 v9fs_cache_inode_set_cookie(inode, file);
@@ -504,9 +508,12 @@ v9fs_file_write(struct file *filp, const char __user * data,
504 if (!count) 508 if (!count)
505 goto out; 509 goto out;
506 510
507 return v9fs_file_write_internal(filp->f_path.dentry->d_inode, 511 retval = v9fs_file_write_internal(filp->f_path.dentry->d_inode,
508 filp->private_data, 512 filp->private_data,
509 data, count, offset, 1); 513 data, count, &origin, 1);
514 /* update offset on successful write */
515 if (retval > 0)
516 *offset = origin;
510out: 517out:
511 return retval; 518 return retval;
512} 519}
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 8a2c232f708a..7f6c67703195 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -221,6 +221,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
221#endif 221#endif
222 v9inode->writeback_fid = NULL; 222 v9inode->writeback_fid = NULL;
223 v9inode->cache_validity = 0; 223 v9inode->cache_validity = 0;
224 mutex_init(&v9inode->v_mutex);
224 return &v9inode->vfs_inode; 225 return &v9inode->vfs_inode;
225} 226}
226 227
@@ -650,7 +651,9 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode,
650 /* if we are opening a file, assign the open fid to the file */ 651 /* if we are opening a file, assign the open fid to the file */
651 if (nd && nd->flags & LOOKUP_OPEN) { 652 if (nd && nd->flags & LOOKUP_OPEN) {
652 v9inode = V9FS_I(dentry->d_inode); 653 v9inode = V9FS_I(dentry->d_inode);
653 if (v9ses->cache && !v9inode->writeback_fid) { 654 mutex_lock(&v9inode->v_mutex);
655 if (v9ses->cache && !v9inode->writeback_fid &&
656 ((flags & O_ACCMODE) != O_RDONLY)) {
654 /* 657 /*
655 * clone a fid and add it to writeback_fid 658 * clone a fid and add it to writeback_fid
656 * we do it during open time instead of 659 * we do it during open time instead of
@@ -661,10 +664,12 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode,
661 inode_fid = v9fs_writeback_fid(dentry); 664 inode_fid = v9fs_writeback_fid(dentry);
662 if (IS_ERR(inode_fid)) { 665 if (IS_ERR(inode_fid)) {
663 err = PTR_ERR(inode_fid); 666 err = PTR_ERR(inode_fid);
667 mutex_unlock(&v9inode->v_mutex);
664 goto error; 668 goto error;
665 } 669 }
666 v9inode->writeback_fid = (void *) inode_fid; 670 v9inode->writeback_fid = (void *) inode_fid;
667 } 671 }
672 mutex_unlock(&v9inode->v_mutex);
668 filp = lookup_instantiate_filp(nd, dentry, generic_file_open); 673 filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
669 if (IS_ERR(filp)) { 674 if (IS_ERR(filp)) {
670 err = PTR_ERR(filp); 675 err = PTR_ERR(filp);
@@ -931,7 +936,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
931 936
932 P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry); 937 P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
933 err = -EPERM; 938 err = -EPERM;
934 v9ses = v9fs_inode2v9ses(dentry->d_inode); 939 v9ses = v9fs_dentry2v9ses(dentry);
935 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { 940 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
936 generic_fillattr(dentry->d_inode, stat); 941 generic_fillattr(dentry->d_inode, stat);
937 return 0; 942 return 0;
@@ -967,8 +972,12 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
967 struct p9_wstat wstat; 972 struct p9_wstat wstat;
968 973
969 P9_DPRINTK(P9_DEBUG_VFS, "\n"); 974 P9_DPRINTK(P9_DEBUG_VFS, "\n");
975 retval = inode_change_ok(dentry->d_inode, iattr);
976 if (retval)
977 return retval;
978
970 retval = -EPERM; 979 retval = -EPERM;
971 v9ses = v9fs_inode2v9ses(dentry->d_inode); 980 v9ses = v9fs_dentry2v9ses(dentry);
972 fid = v9fs_fid_lookup(dentry); 981 fid = v9fs_fid_lookup(dentry);
973 if(IS_ERR(fid)) 982 if(IS_ERR(fid))
974 return PTR_ERR(fid); 983 return PTR_ERR(fid);
@@ -993,12 +1002,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
993 if (iattr->ia_valid & ATTR_GID) 1002 if (iattr->ia_valid & ATTR_GID)
994 wstat.n_gid = iattr->ia_gid; 1003 wstat.n_gid = iattr->ia_gid;
995 } 1004 }
996 if ((iattr->ia_valid & ATTR_SIZE) && 1005
997 iattr->ia_size != i_size_read(dentry->d_inode)) {
998 retval = vmtruncate(dentry->d_inode, iattr->ia_size);
999 if (retval)
1000 return retval;
1001 }
1002 /* Write all dirty data */ 1006 /* Write all dirty data */
1003 if (S_ISREG(dentry->d_inode->i_mode)) 1007 if (S_ISREG(dentry->d_inode->i_mode))
1004 filemap_write_and_wait(dentry->d_inode->i_mapping); 1008 filemap_write_and_wait(dentry->d_inode->i_mapping);
@@ -1006,6 +1010,11 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
1006 retval = p9_client_wstat(fid, &wstat); 1010 retval = p9_client_wstat(fid, &wstat);
1007 if (retval < 0) 1011 if (retval < 0)
1008 return retval; 1012 return retval;
1013
1014 if ((iattr->ia_valid & ATTR_SIZE) &&
1015 iattr->ia_size != i_size_read(dentry->d_inode))
1016 truncate_setsize(dentry->d_inode, iattr->ia_size);
1017
1009 v9fs_invalidate_inode_attr(dentry->d_inode); 1018 v9fs_invalidate_inode_attr(dentry->d_inode);
1010 1019
1011 setattr_copy(dentry->d_inode, iattr); 1020 setattr_copy(dentry->d_inode, iattr);
@@ -1130,7 +1139,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
1130 1139
1131 P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name); 1140 P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name);
1132 retval = -EPERM; 1141 retval = -EPERM;
1133 v9ses = v9fs_inode2v9ses(dentry->d_inode); 1142 v9ses = v9fs_dentry2v9ses(dentry);
1134 fid = v9fs_fid_lookup(dentry); 1143 fid = v9fs_fid_lookup(dentry);
1135 if (IS_ERR(fid)) 1144 if (IS_ERR(fid))
1136 return PTR_ERR(fid); 1145 return PTR_ERR(fid);
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 67c138e94feb..ffbb113d5f33 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -245,7 +245,9 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
245 v9fs_set_create_acl(dentry, dacl, pacl); 245 v9fs_set_create_acl(dentry, dacl, pacl);
246 246
247 v9inode = V9FS_I(inode); 247 v9inode = V9FS_I(inode);
248 if (v9ses->cache && !v9inode->writeback_fid) { 248 mutex_lock(&v9inode->v_mutex);
249 if (v9ses->cache && !v9inode->writeback_fid &&
250 ((flags & O_ACCMODE) != O_RDONLY)) {
249 /* 251 /*
250 * clone a fid and add it to writeback_fid 252 * clone a fid and add it to writeback_fid
251 * we do it during open time instead of 253 * we do it during open time instead of
@@ -256,10 +258,12 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
256 inode_fid = v9fs_writeback_fid(dentry); 258 inode_fid = v9fs_writeback_fid(dentry);
257 if (IS_ERR(inode_fid)) { 259 if (IS_ERR(inode_fid)) {
258 err = PTR_ERR(inode_fid); 260 err = PTR_ERR(inode_fid);
261 mutex_unlock(&v9inode->v_mutex);
259 goto error; 262 goto error;
260 } 263 }
261 v9inode->writeback_fid = (void *) inode_fid; 264 v9inode->writeback_fid = (void *) inode_fid;
262 } 265 }
266 mutex_unlock(&v9inode->v_mutex);
263 /* Since we are opening a file, assign the open fid to the file */ 267 /* Since we are opening a file, assign the open fid to the file */
264 filp = lookup_instantiate_filp(nd, dentry, generic_file_open); 268 filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
265 if (IS_ERR(filp)) { 269 if (IS_ERR(filp)) {
@@ -391,7 +395,7 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
391 395
392 P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry); 396 P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
393 err = -EPERM; 397 err = -EPERM;
394 v9ses = v9fs_inode2v9ses(dentry->d_inode); 398 v9ses = v9fs_dentry2v9ses(dentry);
395 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { 399 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
396 generic_fillattr(dentry->d_inode, stat); 400 generic_fillattr(dentry->d_inode, stat);
397 return 0; 401 return 0;
@@ -448,17 +452,11 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
448 p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; 452 p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
449 453
450 retval = -EPERM; 454 retval = -EPERM;
451 v9ses = v9fs_inode2v9ses(dentry->d_inode); 455 v9ses = v9fs_dentry2v9ses(dentry);
452 fid = v9fs_fid_lookup(dentry); 456 fid = v9fs_fid_lookup(dentry);
453 if (IS_ERR(fid)) 457 if (IS_ERR(fid))
454 return PTR_ERR(fid); 458 return PTR_ERR(fid);
455 459
456 if ((iattr->ia_valid & ATTR_SIZE) &&
457 iattr->ia_size != i_size_read(dentry->d_inode)) {
458 retval = vmtruncate(dentry->d_inode, iattr->ia_size);
459 if (retval)
460 return retval;
461 }
462 /* Write all dirty data */ 460 /* Write all dirty data */
463 if (S_ISREG(dentry->d_inode->i_mode)) 461 if (S_ISREG(dentry->d_inode->i_mode))
464 filemap_write_and_wait(dentry->d_inode->i_mapping); 462 filemap_write_and_wait(dentry->d_inode->i_mapping);
@@ -466,8 +464,12 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
466 retval = p9_client_setattr(fid, &p9attr); 464 retval = p9_client_setattr(fid, &p9attr);
467 if (retval < 0) 465 if (retval < 0)
468 return retval; 466 return retval;
469 v9fs_invalidate_inode_attr(dentry->d_inode);
470 467
468 if ((iattr->ia_valid & ATTR_SIZE) &&
469 iattr->ia_size != i_size_read(dentry->d_inode))
470 truncate_setsize(dentry->d_inode, iattr->ia_size);
471
472 v9fs_invalidate_inode_attr(dentry->d_inode);
471 setattr_copy(dentry->d_inode, iattr); 473 setattr_copy(dentry->d_inode, iattr);
472 mark_inode_dirty(dentry->d_inode); 474 mark_inode_dirty(dentry->d_inode);
473 if (iattr->ia_valid & ATTR_MODE) { 475 if (iattr->ia_valid & ATTR_MODE) {
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 09fd08d1606f..f3eed3383e4f 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -262,7 +262,7 @@ static int v9fs_statfs(struct dentry *dentry, struct kstatfs *buf)
262 goto done; 262 goto done;
263 } 263 }
264 264
265 v9ses = v9fs_inode2v9ses(dentry->d_inode); 265 v9ses = v9fs_dentry2v9ses(dentry);
266 if (v9fs_proto_dotl(v9ses)) { 266 if (v9fs_proto_dotl(v9ses)) {
267 res = p9_client_statfs(fid, &rs); 267 res = p9_client_statfs(fid, &rs);
268 if (res == 0) { 268 if (res == 0) {
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 2ff622f6f547..a8a58d864f96 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -50,6 +50,7 @@ struct adfs_sb_info {
50 gid_t s_gid; /* owner gid */ 50 gid_t s_gid; /* owner gid */
51 umode_t s_owner_mask; /* ADFS owner perm -> unix perm */ 51 umode_t s_owner_mask; /* ADFS owner perm -> unix perm */
52 umode_t s_other_mask; /* ADFS other perm -> unix perm */ 52 umode_t s_other_mask; /* ADFS other perm -> unix perm */
53 int s_ftsuffix; /* ,xyz hex filetype suffix option */
53 54
54 __u32 s_ids_per_zone; /* max. no ids in one zone */ 55 __u32 s_ids_per_zone; /* max. no ids in one zone */
55 __u32 s_idlen; /* length of ID in map */ 56 __u32 s_idlen; /* length of ID in map */
@@ -79,6 +80,10 @@ struct adfs_dir {
79 80
80 int nr_buffers; 81 int nr_buffers;
81 struct buffer_head *bh[4]; 82 struct buffer_head *bh[4];
83
84 /* big directories need allocated buffers */
85 struct buffer_head **bh_fplus;
86
82 unsigned int pos; 87 unsigned int pos;
83 unsigned int parent_id; 88 unsigned int parent_id;
84 89
@@ -89,7 +94,7 @@ struct adfs_dir {
89/* 94/*
90 * This is the overall maximum name length 95 * This is the overall maximum name length
91 */ 96 */
92#define ADFS_MAX_NAME_LEN 256 97#define ADFS_MAX_NAME_LEN (256 + 4) /* +4 for ,xyz hex filetype suffix */
93struct object_info { 98struct object_info {
94 __u32 parent_id; /* parent object id */ 99 __u32 parent_id; /* parent object id */
95 __u32 file_id; /* object id */ 100 __u32 file_id; /* object id */
@@ -97,10 +102,26 @@ struct object_info {
97 __u32 execaddr; /* execution address */ 102 __u32 execaddr; /* execution address */
98 __u32 size; /* size */ 103 __u32 size; /* size */
99 __u8 attr; /* RISC OS attributes */ 104 __u8 attr; /* RISC OS attributes */
100 unsigned char name_len; /* name length */ 105 unsigned int name_len; /* name length */
101 char name[ADFS_MAX_NAME_LEN];/* file name */ 106 char name[ADFS_MAX_NAME_LEN];/* file name */
107
108 /* RISC OS file type (12-bit: derived from loadaddr) */
109 __u16 filetype;
102}; 110};
103 111
112/* RISC OS 12-bit filetype converts to ,xyz hex filename suffix */
113static inline int append_filetype_suffix(char *buf, __u16 filetype)
114{
115 if (filetype == -1)
116 return 0;
117
118 *buf++ = ',';
119 *buf++ = hex_asc_lo(filetype >> 8);
120 *buf++ = hex_asc_lo(filetype >> 4);
121 *buf++ = hex_asc_lo(filetype >> 0);
122 return 4;
123}
124
104struct adfs_dir_ops { 125struct adfs_dir_ops {
105 int (*read)(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir); 126 int (*read)(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir);
106 int (*setpos)(struct adfs_dir *dir, unsigned int fpos); 127 int (*setpos)(struct adfs_dir *dir, unsigned int fpos);
diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c
index bafc71222e25..4bbe853ee50a 100644
--- a/fs/adfs/dir_f.c
+++ b/fs/adfs/dir_f.c
@@ -52,7 +52,6 @@ static inline int adfs_readname(char *buf, char *ptr, int maxlen)
52 *buf++ = *ptr; 52 *buf++ = *ptr;
53 ptr++; 53 ptr++;
54 } 54 }
55 *buf = '\0';
56 55
57 return buf - old_buf; 56 return buf - old_buf;
58} 57}
@@ -208,7 +207,8 @@ release_buffers:
208 * convert a disk-based directory entry to a Linux ADFS directory entry 207 * convert a disk-based directory entry to a Linux ADFS directory entry
209 */ 208 */
210static inline void 209static inline void
211adfs_dir2obj(struct object_info *obj, struct adfs_direntry *de) 210adfs_dir2obj(struct adfs_dir *dir, struct object_info *obj,
211 struct adfs_direntry *de)
212{ 212{
213 obj->name_len = adfs_readname(obj->name, de->dirobname, ADFS_F_NAME_LEN); 213 obj->name_len = adfs_readname(obj->name, de->dirobname, ADFS_F_NAME_LEN);
214 obj->file_id = adfs_readval(de->dirinddiscadd, 3); 214 obj->file_id = adfs_readval(de->dirinddiscadd, 3);
@@ -216,6 +216,23 @@ adfs_dir2obj(struct object_info *obj, struct adfs_direntry *de)
216 obj->execaddr = adfs_readval(de->direxec, 4); 216 obj->execaddr = adfs_readval(de->direxec, 4);
217 obj->size = adfs_readval(de->dirlen, 4); 217 obj->size = adfs_readval(de->dirlen, 4);
218 obj->attr = de->newdiratts; 218 obj->attr = de->newdiratts;
219 obj->filetype = -1;
220
221 /*
222 * object is a file and is filetyped and timestamped?
223 * RISC OS 12-bit filetype is stored in load_address[19:8]
224 */
225 if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) &&
226 (0xfff00000 == (0xfff00000 & obj->loadaddr))) {
227 obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8);
228
229 /* optionally append the ,xyz hex filetype suffix */
230 if (ADFS_SB(dir->sb)->s_ftsuffix)
231 obj->name_len +=
232 append_filetype_suffix(
233 &obj->name[obj->name_len],
234 obj->filetype);
235 }
219} 236}
220 237
221/* 238/*
@@ -260,7 +277,7 @@ __adfs_dir_get(struct adfs_dir *dir, int pos, struct object_info *obj)
260 if (!de.dirobname[0]) 277 if (!de.dirobname[0])
261 return -ENOENT; 278 return -ENOENT;
262 279
263 adfs_dir2obj(obj, &de); 280 adfs_dir2obj(dir, obj, &de);
264 281
265 return 0; 282 return 0;
266} 283}
diff --git a/fs/adfs/dir_fplus.c b/fs/adfs/dir_fplus.c
index 1796bb352d05..d9e3bee4e653 100644
--- a/fs/adfs/dir_fplus.c
+++ b/fs/adfs/dir_fplus.c
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/buffer_head.h> 10#include <linux/buffer_head.h>
11#include <linux/slab.h>
11#include "adfs.h" 12#include "adfs.h"
12#include "dir_fplus.h" 13#include "dir_fplus.h"
13 14
@@ -22,30 +23,53 @@ adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct
22 23
23 dir->nr_buffers = 0; 24 dir->nr_buffers = 0;
24 25
26 /* start off using fixed bh set - only alloc for big dirs */
27 dir->bh_fplus = &dir->bh[0];
28
25 block = __adfs_block_map(sb, id, 0); 29 block = __adfs_block_map(sb, id, 0);
26 if (!block) { 30 if (!block) {
27 adfs_error(sb, "dir object %X has a hole at offset 0", id); 31 adfs_error(sb, "dir object %X has a hole at offset 0", id);
28 goto out; 32 goto out;
29 } 33 }
30 34
31 dir->bh[0] = sb_bread(sb, block); 35 dir->bh_fplus[0] = sb_bread(sb, block);
32 if (!dir->bh[0]) 36 if (!dir->bh_fplus[0])
33 goto out; 37 goto out;
34 dir->nr_buffers += 1; 38 dir->nr_buffers += 1;
35 39
36 h = (struct adfs_bigdirheader *)dir->bh[0]->b_data; 40 h = (struct adfs_bigdirheader *)dir->bh_fplus[0]->b_data;
37 size = le32_to_cpu(h->bigdirsize); 41 size = le32_to_cpu(h->bigdirsize);
38 if (size != sz) { 42 if (size != sz) {
39 printk(KERN_WARNING "adfs: adfs_fplus_read: directory header size\n" 43 printk(KERN_WARNING "adfs: adfs_fplus_read:"
40 " does not match directory size\n"); 44 " directory header size %X\n"
45 " does not match directory size %X\n",
46 size, sz);
41 } 47 }
42 48
43 if (h->bigdirversion[0] != 0 || h->bigdirversion[1] != 0 || 49 if (h->bigdirversion[0] != 0 || h->bigdirversion[1] != 0 ||
44 h->bigdirversion[2] != 0 || size & 2047 || 50 h->bigdirversion[2] != 0 || size & 2047 ||
45 h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME)) 51 h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME)) {
52 printk(KERN_WARNING "adfs: dir object %X has"
53 " malformed dir header\n", id);
46 goto out; 54 goto out;
55 }
47 56
48 size >>= sb->s_blocksize_bits; 57 size >>= sb->s_blocksize_bits;
58 if (size > sizeof(dir->bh)/sizeof(dir->bh[0])) {
59 /* this directory is too big for fixed bh set, must allocate */
60 struct buffer_head **bh_fplus =
61 kzalloc(size * sizeof(struct buffer_head *),
62 GFP_KERNEL);
63 if (!bh_fplus) {
64 adfs_error(sb, "not enough memory for"
65 " dir object %X (%d blocks)", id, size);
66 goto out;
67 }
68 dir->bh_fplus = bh_fplus;
69 /* copy over the pointer to the block that we've already read */
70 dir->bh_fplus[0] = dir->bh[0];
71 }
72
49 for (blk = 1; blk < size; blk++) { 73 for (blk = 1; blk < size; blk++) {
50 block = __adfs_block_map(sb, id, blk); 74 block = __adfs_block_map(sb, id, blk);
51 if (!block) { 75 if (!block) {
@@ -53,25 +77,44 @@ adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct
53 goto out; 77 goto out;
54 } 78 }
55 79
56 dir->bh[blk] = sb_bread(sb, block); 80 dir->bh_fplus[blk] = sb_bread(sb, block);
57 if (!dir->bh[blk]) 81 if (!dir->bh_fplus[blk]) {
82 adfs_error(sb, "dir object %X failed read for"
83 " offset %d, mapped block %X",
84 id, blk, block);
58 goto out; 85 goto out;
59 dir->nr_buffers = blk; 86 }
87
88 dir->nr_buffers += 1;
60 } 89 }
61 90
62 t = (struct adfs_bigdirtail *)(dir->bh[size - 1]->b_data + (sb->s_blocksize - 8)); 91 t = (struct adfs_bigdirtail *)
92 (dir->bh_fplus[size - 1]->b_data + (sb->s_blocksize - 8));
63 93
64 if (t->bigdirendname != cpu_to_le32(BIGDIRENDNAME) || 94 if (t->bigdirendname != cpu_to_le32(BIGDIRENDNAME) ||
65 t->bigdirendmasseq != h->startmasseq || 95 t->bigdirendmasseq != h->startmasseq ||
66 t->reserved[0] != 0 || t->reserved[1] != 0) 96 t->reserved[0] != 0 || t->reserved[1] != 0) {
97 printk(KERN_WARNING "adfs: dir object %X has "
98 "malformed dir end\n", id);
67 goto out; 99 goto out;
100 }
68 101
69 dir->parent_id = le32_to_cpu(h->bigdirparent); 102 dir->parent_id = le32_to_cpu(h->bigdirparent);
70 dir->sb = sb; 103 dir->sb = sb;
71 return 0; 104 return 0;
105
72out: 106out:
73 for (i = 0; i < dir->nr_buffers; i++) 107 if (dir->bh_fplus) {
74 brelse(dir->bh[i]); 108 for (i = 0; i < dir->nr_buffers; i++)
109 brelse(dir->bh_fplus[i]);
110
111 if (&dir->bh[0] != dir->bh_fplus)
112 kfree(dir->bh_fplus);
113
114 dir->bh_fplus = NULL;
115 }
116
117 dir->nr_buffers = 0;
75 dir->sb = NULL; 118 dir->sb = NULL;
76 return ret; 119 return ret;
77} 120}
@@ -79,7 +122,8 @@ out:
79static int 122static int
80adfs_fplus_setpos(struct adfs_dir *dir, unsigned int fpos) 123adfs_fplus_setpos(struct adfs_dir *dir, unsigned int fpos)
81{ 124{
82 struct adfs_bigdirheader *h = (struct adfs_bigdirheader *)dir->bh[0]->b_data; 125 struct adfs_bigdirheader *h =
126 (struct adfs_bigdirheader *) dir->bh_fplus[0]->b_data;
83 int ret = -ENOENT; 127 int ret = -ENOENT;
84 128
85 if (fpos <= le32_to_cpu(h->bigdirentries)) { 129 if (fpos <= le32_to_cpu(h->bigdirentries)) {
@@ -102,21 +146,27 @@ dir_memcpy(struct adfs_dir *dir, unsigned int offset, void *to, int len)
102 partial = sb->s_blocksize - offset; 146 partial = sb->s_blocksize - offset;
103 147
104 if (partial >= len) 148 if (partial >= len)
105 memcpy(to, dir->bh[buffer]->b_data + offset, len); 149 memcpy(to, dir->bh_fplus[buffer]->b_data + offset, len);
106 else { 150 else {
107 char *c = (char *)to; 151 char *c = (char *)to;
108 152
109 remainder = len - partial; 153 remainder = len - partial;
110 154
111 memcpy(c, dir->bh[buffer]->b_data + offset, partial); 155 memcpy(c,
112 memcpy(c + partial, dir->bh[buffer + 1]->b_data, remainder); 156 dir->bh_fplus[buffer]->b_data + offset,
157 partial);
158
159 memcpy(c + partial,
160 dir->bh_fplus[buffer + 1]->b_data,
161 remainder);
113 } 162 }
114} 163}
115 164
116static int 165static int
117adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj) 166adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
118{ 167{
119 struct adfs_bigdirheader *h = (struct adfs_bigdirheader *)dir->bh[0]->b_data; 168 struct adfs_bigdirheader *h =
169 (struct adfs_bigdirheader *) dir->bh_fplus[0]->b_data;
120 struct adfs_bigdirentry bde; 170 struct adfs_bigdirentry bde;
121 unsigned int offset; 171 unsigned int offset;
122 int i, ret = -ENOENT; 172 int i, ret = -ENOENT;
@@ -147,6 +197,24 @@ adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
147 if (obj->name[i] == '/') 197 if (obj->name[i] == '/')
148 obj->name[i] = '.'; 198 obj->name[i] = '.';
149 199
200 obj->filetype = -1;
201
202 /*
203 * object is a file and is filetyped and timestamped?
204 * RISC OS 12-bit filetype is stored in load_address[19:8]
205 */
206 if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) &&
207 (0xfff00000 == (0xfff00000 & obj->loadaddr))) {
208 obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8);
209
210 /* optionally append the ,xyz hex filetype suffix */
211 if (ADFS_SB(dir->sb)->s_ftsuffix)
212 obj->name_len +=
213 append_filetype_suffix(
214 &obj->name[obj->name_len],
215 obj->filetype);
216 }
217
150 dir->pos += 1; 218 dir->pos += 1;
151 ret = 0; 219 ret = 0;
152out: 220out:
@@ -160,7 +228,7 @@ adfs_fplus_sync(struct adfs_dir *dir)
160 int i; 228 int i;
161 229
162 for (i = dir->nr_buffers - 1; i >= 0; i--) { 230 for (i = dir->nr_buffers - 1; i >= 0; i--) {
163 struct buffer_head *bh = dir->bh[i]; 231 struct buffer_head *bh = dir->bh_fplus[i];
164 sync_dirty_buffer(bh); 232 sync_dirty_buffer(bh);
165 if (buffer_req(bh) && !buffer_uptodate(bh)) 233 if (buffer_req(bh) && !buffer_uptodate(bh))
166 err = -EIO; 234 err = -EIO;
@@ -174,8 +242,17 @@ adfs_fplus_free(struct adfs_dir *dir)
174{ 242{
175 int i; 243 int i;
176 244
177 for (i = 0; i < dir->nr_buffers; i++) 245 if (dir->bh_fplus) {
178 brelse(dir->bh[i]); 246 for (i = 0; i < dir->nr_buffers; i++)
247 brelse(dir->bh_fplus[i]);
248
249 if (&dir->bh[0] != dir->bh_fplus)
250 kfree(dir->bh_fplus);
251
252 dir->bh_fplus = NULL;
253 }
254
255 dir->nr_buffers = 0;
179 dir->sb = NULL; 256 dir->sb = NULL;
180} 257}
181 258
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 09fe40198d1c..92444e94f842 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -78,26 +78,13 @@ static const struct address_space_operations adfs_aops = {
78 .bmap = _adfs_bmap 78 .bmap = _adfs_bmap
79}; 79};
80 80
81static inline unsigned int
82adfs_filetype(struct inode *inode)
83{
84 unsigned int type;
85
86 if (ADFS_I(inode)->stamped)
87 type = (ADFS_I(inode)->loadaddr >> 8) & 0xfff;
88 else
89 type = (unsigned int) -1;
90
91 return type;
92}
93
94/* 81/*
95 * Convert ADFS attributes and filetype to Linux permission. 82 * Convert ADFS attributes and filetype to Linux permission.
96 */ 83 */
97static umode_t 84static umode_t
98adfs_atts2mode(struct super_block *sb, struct inode *inode) 85adfs_atts2mode(struct super_block *sb, struct inode *inode)
99{ 86{
100 unsigned int filetype, attr = ADFS_I(inode)->attr; 87 unsigned int attr = ADFS_I(inode)->attr;
101 umode_t mode, rmask; 88 umode_t mode, rmask;
102 struct adfs_sb_info *asb = ADFS_SB(sb); 89 struct adfs_sb_info *asb = ADFS_SB(sb);
103 90
@@ -106,9 +93,7 @@ adfs_atts2mode(struct super_block *sb, struct inode *inode)
106 return S_IFDIR | S_IXUGO | mode; 93 return S_IFDIR | S_IXUGO | mode;
107 } 94 }
108 95
109 filetype = adfs_filetype(inode); 96 switch (ADFS_I(inode)->filetype) {
110
111 switch (filetype) {
112 case 0xfc0: /* LinkFS */ 97 case 0xfc0: /* LinkFS */
113 return S_IFLNK|S_IRWXUGO; 98 return S_IFLNK|S_IRWXUGO;
114 99
@@ -174,50 +159,48 @@ adfs_mode2atts(struct super_block *sb, struct inode *inode)
174 159
175/* 160/*
176 * Convert an ADFS time to Unix time. ADFS has a 40-bit centi-second time 161 * Convert an ADFS time to Unix time. ADFS has a 40-bit centi-second time
177 * referenced to 1 Jan 1900 (til 2248) 162 * referenced to 1 Jan 1900 (til 2248) so we need to discard 2208988800 seconds
163 * of time to convert from RISC OS epoch to Unix epoch.
178 */ 164 */
179static void 165static void
180adfs_adfs2unix_time(struct timespec *tv, struct inode *inode) 166adfs_adfs2unix_time(struct timespec *tv, struct inode *inode)
181{ 167{
182 unsigned int high, low; 168 unsigned int high, low;
169 /* 01 Jan 1970 00:00:00 (Unix epoch) as nanoseconds since
170 * 01 Jan 1900 00:00:00 (RISC OS epoch)
171 */
172 static const s64 nsec_unix_epoch_diff_risc_os_epoch =
173 2208988800000000000LL;
174 s64 nsec;
183 175
184 if (ADFS_I(inode)->stamped == 0) 176 if (ADFS_I(inode)->stamped == 0)
185 goto cur_time; 177 goto cur_time;
186 178
187 high = ADFS_I(inode)->loadaddr << 24; 179 high = ADFS_I(inode)->loadaddr & 0xFF; /* top 8 bits of timestamp */
188 low = ADFS_I(inode)->execaddr; 180 low = ADFS_I(inode)->execaddr; /* bottom 32 bits of timestamp */
189 181
190 high |= low >> 8; 182 /* convert 40-bit centi-seconds to 32-bit seconds
191 low &= 255; 183 * going via nanoseconds to retain precision
184 */
185 nsec = (((s64) high << 32) | (s64) low) * 10000000; /* cs to ns */
192 186
193 /* Files dated pre 01 Jan 1970 00:00:00. */ 187 /* Files dated pre 01 Jan 1970 00:00:00. */
194 if (high < 0x336e996a) 188 if (nsec < nsec_unix_epoch_diff_risc_os_epoch)
195 goto too_early; 189 goto too_early;
196 190
197 /* Files dated post 18 Jan 2038 03:14:05. */ 191 /* convert from RISC OS to Unix epoch */
198 if (high >= 0x656e9969) 192 nsec -= nsec_unix_epoch_diff_risc_os_epoch;
199 goto too_late;
200 193
201 /* discard 2208988800 (0x336e996a00) seconds of time */ 194 *tv = ns_to_timespec(nsec);
202 high -= 0x336e996a;
203
204 /* convert 40-bit centi-seconds to 32-bit seconds */
205 tv->tv_sec = (((high % 100) << 8) + low) / 100 + (high / 100 << 8);
206 tv->tv_nsec = 0;
207 return; 195 return;
208 196
209 cur_time: 197 cur_time:
210 *tv = CURRENT_TIME_SEC; 198 *tv = CURRENT_TIME;
211 return; 199 return;
212 200
213 too_early: 201 too_early:
214 tv->tv_sec = tv->tv_nsec = 0; 202 tv->tv_sec = tv->tv_nsec = 0;
215 return; 203 return;
216
217 too_late:
218 tv->tv_sec = 0x7ffffffd;
219 tv->tv_nsec = 0;
220 return;
221} 204}
222 205
223/* 206/*
@@ -279,7 +262,8 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
279 ADFS_I(inode)->loadaddr = obj->loadaddr; 262 ADFS_I(inode)->loadaddr = obj->loadaddr;
280 ADFS_I(inode)->execaddr = obj->execaddr; 263 ADFS_I(inode)->execaddr = obj->execaddr;
281 ADFS_I(inode)->attr = obj->attr; 264 ADFS_I(inode)->attr = obj->attr;
282 ADFS_I(inode)->stamped = ((obj->loadaddr & 0xfff00000) == 0xfff00000); 265 ADFS_I(inode)->filetype = obj->filetype;
266 ADFS_I(inode)->stamped = ((obj->loadaddr & 0xfff00000) == 0xfff00000);
283 267
284 inode->i_mode = adfs_atts2mode(sb, inode); 268 inode->i_mode = adfs_atts2mode(sb, inode);
285 adfs_adfs2unix_time(&inode->i_mtime, inode); 269 adfs_adfs2unix_time(&inode->i_mtime, inode);
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 06d7388b477b..c8bf36a1996a 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -138,17 +138,20 @@ static int adfs_show_options(struct seq_file *seq, struct vfsmount *mnt)
138 seq_printf(seq, ",ownmask=%o", asb->s_owner_mask); 138 seq_printf(seq, ",ownmask=%o", asb->s_owner_mask);
139 if (asb->s_other_mask != ADFS_DEFAULT_OTHER_MASK) 139 if (asb->s_other_mask != ADFS_DEFAULT_OTHER_MASK)
140 seq_printf(seq, ",othmask=%o", asb->s_other_mask); 140 seq_printf(seq, ",othmask=%o", asb->s_other_mask);
141 if (asb->s_ftsuffix != 0)
142 seq_printf(seq, ",ftsuffix=%u", asb->s_ftsuffix);
141 143
142 return 0; 144 return 0;
143} 145}
144 146
145enum {Opt_uid, Opt_gid, Opt_ownmask, Opt_othmask, Opt_err}; 147enum {Opt_uid, Opt_gid, Opt_ownmask, Opt_othmask, Opt_ftsuffix, Opt_err};
146 148
147static const match_table_t tokens = { 149static const match_table_t tokens = {
148 {Opt_uid, "uid=%u"}, 150 {Opt_uid, "uid=%u"},
149 {Opt_gid, "gid=%u"}, 151 {Opt_gid, "gid=%u"},
150 {Opt_ownmask, "ownmask=%o"}, 152 {Opt_ownmask, "ownmask=%o"},
151 {Opt_othmask, "othmask=%o"}, 153 {Opt_othmask, "othmask=%o"},
154 {Opt_ftsuffix, "ftsuffix=%u"},
152 {Opt_err, NULL} 155 {Opt_err, NULL}
153}; 156};
154 157
@@ -189,6 +192,11 @@ static int parse_options(struct super_block *sb, char *options)
189 return -EINVAL; 192 return -EINVAL;
190 asb->s_other_mask = option; 193 asb->s_other_mask = option;
191 break; 194 break;
195 case Opt_ftsuffix:
196 if (match_int(args, &option))
197 return -EINVAL;
198 asb->s_ftsuffix = option;
199 break;
192 default: 200 default:
193 printk("ADFS-fs: unrecognised mount option \"%s\" " 201 printk("ADFS-fs: unrecognised mount option \"%s\" "
194 "or missing value\n", p); 202 "or missing value\n", p);
@@ -366,6 +374,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
366 asb->s_gid = 0; 374 asb->s_gid = 0;
367 asb->s_owner_mask = ADFS_DEFAULT_OWNER_MASK; 375 asb->s_owner_mask = ADFS_DEFAULT_OWNER_MASK;
368 asb->s_other_mask = ADFS_DEFAULT_OTHER_MASK; 376 asb->s_other_mask = ADFS_DEFAULT_OTHER_MASK;
377 asb->s_ftsuffix = 0;
369 378
370 if (parse_options(sb, data)) 379 if (parse_options(sb, data))
371 goto error; 380 goto error;
@@ -445,11 +454,13 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
445 454
446 root_obj.parent_id = root_obj.file_id = le32_to_cpu(dr->root); 455 root_obj.parent_id = root_obj.file_id = le32_to_cpu(dr->root);
447 root_obj.name_len = 0; 456 root_obj.name_len = 0;
448 root_obj.loadaddr = 0; 457 /* Set root object date as 01 Jan 1987 00:00:00 */
449 root_obj.execaddr = 0; 458 root_obj.loadaddr = 0xfff0003f;
459 root_obj.execaddr = 0xec22c000;
450 root_obj.size = ADFS_NEWDIR_SIZE; 460 root_obj.size = ADFS_NEWDIR_SIZE;
451 root_obj.attr = ADFS_NDA_DIRECTORY | ADFS_NDA_OWNER_READ | 461 root_obj.attr = ADFS_NDA_DIRECTORY | ADFS_NDA_OWNER_READ |
452 ADFS_NDA_OWNER_WRITE | ADFS_NDA_PUBLIC_READ; 462 ADFS_NDA_OWNER_WRITE | ADFS_NDA_PUBLIC_READ;
463 root_obj.filetype = -1;
453 464
454 /* 465 /*
455 * If this is a F+ disk with variable length directories, 466 * If this is a F+ disk with variable length directories,
@@ -463,6 +474,12 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
463 asb->s_dir = &adfs_f_dir_ops; 474 asb->s_dir = &adfs_f_dir_ops;
464 asb->s_namelen = ADFS_F_NAME_LEN; 475 asb->s_namelen = ADFS_F_NAME_LEN;
465 } 476 }
477 /*
478 * ,xyz hex filetype suffix may be added by driver
479 * to files that have valid RISC OS filetype
480 */
481 if (asb->s_ftsuffix)
482 asb->s_namelen += 4;
466 483
467 sb->s_d_op = &adfs_dentry_operations; 484 sb->s_d_op = &adfs_dentry_operations;
468 root = adfs_iget(sb, &root_obj); 485 root = adfs_iget(sb, &root_obj);
diff --git a/fs/aio.c b/fs/aio.c
index 7f54f43b8f7c..ebb6a22e4e1b 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -520,7 +520,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
520 ctx->reqs_active--; 520 ctx->reqs_active--;
521 521
522 if (unlikely(!ctx->reqs_active && ctx->dead)) 522 if (unlikely(!ctx->reqs_active && ctx->dead))
523 wake_up(&ctx->wait); 523 wake_up_all(&ctx->wait);
524} 524}
525 525
526static void aio_fput_routine(struct work_struct *data) 526static void aio_fput_routine(struct work_struct *data)
@@ -1229,7 +1229,7 @@ static void io_destroy(struct kioctx *ioctx)
1229 * by other CPUs at this point. Right now, we rely on the 1229 * by other CPUs at this point. Right now, we rely on the
1230 * locking done by the above calls to ensure this consistency. 1230 * locking done by the above calls to ensure this consistency.
1231 */ 1231 */
1232 wake_up(&ioctx->wait); 1232 wake_up_all(&ioctx->wait);
1233 put_ioctx(ioctx); /* once for the lookup */ 1233 put_ioctx(ioctx); /* once for the lookup */
1234} 1234}
1235 1235
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index d5b640ba6cb1..b2fae009a4b7 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -570,7 +570,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
570 unsigned long elf_entry; 570 unsigned long elf_entry;
571 unsigned long interp_load_addr = 0; 571 unsigned long interp_load_addr = 0;
572 unsigned long start_code, end_code, start_data, end_data; 572 unsigned long start_code, end_code, start_data, end_data;
573 unsigned long reloc_func_desc = 0; 573 unsigned long reloc_func_desc __maybe_unused = 0;
574 int executable_stack = EXSTACK_DEFAULT; 574 int executable_stack = EXSTACK_DEFAULT;
575 unsigned long def_flags = 0; 575 unsigned long def_flags = 0;
576 struct { 576 struct {
diff --git a/fs/bio.c b/fs/bio.c
index 4bd454fa844e..4cf2a52fbc54 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -111,7 +111,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
111 if (!slab) 111 if (!slab)
112 goto out_unlock; 112 goto out_unlock;
113 113
114 printk("bio: create slab <%s> at %d\n", bslab->name, entry); 114 printk(KERN_INFO "bio: create slab <%s> at %d\n", bslab->name, entry);
115 bslab->slab = slab; 115 bslab->slab = slab;
116 bslab->slab_ref = 1; 116 bslab->slab_ref = 1;
117 bslab->slab_size = sz; 117 bslab->slab_size = sz;
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index f5ec2d44150d..faccd47c6c46 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -57,7 +57,8 @@ static struct list_head *zlib_alloc_workspace(void)
57 if (!workspace) 57 if (!workspace)
58 return ERR_PTR(-ENOMEM); 58 return ERR_PTR(-ENOMEM);
59 59
60 workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize()); 60 workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize(
61 MAX_WBITS, MAX_MEM_LEVEL));
61 workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); 62 workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
62 workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); 63 workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS);
63 if (!workspace->def_strm.workspace || 64 if (!workspace->def_strm.workspace ||
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 08f65faac112..0dba6915712b 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -210,8 +210,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
210 if (!fsc->debugfs_congestion_kb) 210 if (!fsc->debugfs_congestion_kb)
211 goto out; 211 goto out;
212 212
213 dout("a\n");
214
215 snprintf(name, sizeof(name), "../../bdi/%s", 213 snprintf(name, sizeof(name), "../../bdi/%s",
216 dev_name(fsc->backing_dev_info.dev)); 214 dev_name(fsc->backing_dev_info.dev));
217 fsc->debugfs_bdi = 215 fsc->debugfs_bdi =
@@ -221,7 +219,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
221 if (!fsc->debugfs_bdi) 219 if (!fsc->debugfs_bdi)
222 goto out; 220 goto out;
223 221
224 dout("b\n");
225 fsc->debugfs_mdsmap = debugfs_create_file("mdsmap", 222 fsc->debugfs_mdsmap = debugfs_create_file("mdsmap",
226 0600, 223 0600,
227 fsc->client->debugfs_dir, 224 fsc->client->debugfs_dir,
@@ -230,7 +227,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
230 if (!fsc->debugfs_mdsmap) 227 if (!fsc->debugfs_mdsmap)
231 goto out; 228 goto out;
232 229
233 dout("ca\n");
234 fsc->debugfs_mdsc = debugfs_create_file("mdsc", 230 fsc->debugfs_mdsc = debugfs_create_file("mdsc",
235 0600, 231 0600,
236 fsc->client->debugfs_dir, 232 fsc->client->debugfs_dir,
@@ -239,7 +235,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
239 if (!fsc->debugfs_mdsc) 235 if (!fsc->debugfs_mdsc)
240 goto out; 236 goto out;
241 237
242 dout("da\n");
243 fsc->debugfs_caps = debugfs_create_file("caps", 238 fsc->debugfs_caps = debugfs_create_file("caps",
244 0400, 239 0400,
245 fsc->client->debugfs_dir, 240 fsc->client->debugfs_dir,
@@ -248,7 +243,6 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
248 if (!fsc->debugfs_caps) 243 if (!fsc->debugfs_caps)
249 goto out; 244 goto out;
250 245
251 dout("ea\n");
252 fsc->debugfs_dentry_lru = debugfs_create_file("dentry_lru", 246 fsc->debugfs_dentry_lru = debugfs_create_file("dentry_lru",
253 0600, 247 0600,
254 fsc->client->debugfs_dir, 248 fsc->client->debugfs_dir,
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index ebafa65a29b6..1a867a3601ae 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -161,7 +161,7 @@ more:
161 filp->f_pos = di->offset; 161 filp->f_pos = di->offset;
162 err = filldir(dirent, dentry->d_name.name, 162 err = filldir(dirent, dentry->d_name.name,
163 dentry->d_name.len, di->offset, 163 dentry->d_name.len, di->offset,
164 dentry->d_inode->i_ino, 164 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
165 dentry->d_inode->i_mode >> 12); 165 dentry->d_inode->i_mode >> 12);
166 166
167 if (last) { 167 if (last) {
@@ -245,15 +245,17 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
245 245
246 dout("readdir off 0 -> '.'\n"); 246 dout("readdir off 0 -> '.'\n");
247 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0), 247 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
248 inode->i_ino, inode->i_mode >> 12) < 0) 248 ceph_translate_ino(inode->i_sb, inode->i_ino),
249 inode->i_mode >> 12) < 0)
249 return 0; 250 return 0;
250 filp->f_pos = 1; 251 filp->f_pos = 1;
251 off = 1; 252 off = 1;
252 } 253 }
253 if (filp->f_pos == 1) { 254 if (filp->f_pos == 1) {
255 ino_t ino = filp->f_dentry->d_parent->d_inode->i_ino;
254 dout("readdir off 1 -> '..'\n"); 256 dout("readdir off 1 -> '..'\n");
255 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1), 257 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
256 filp->f_dentry->d_parent->d_inode->i_ino, 258 ceph_translate_ino(inode->i_sb, ino),
257 inode->i_mode >> 12) < 0) 259 inode->i_mode >> 12) < 0)
258 return 0; 260 return 0;
259 filp->f_pos = 2; 261 filp->f_pos = 2;
@@ -377,7 +379,8 @@ more:
377 if (filldir(dirent, 379 if (filldir(dirent,
378 rinfo->dir_dname[off - fi->offset], 380 rinfo->dir_dname[off - fi->offset],
379 rinfo->dir_dname_len[off - fi->offset], 381 rinfo->dir_dname_len[off - fi->offset],
380 pos, ino, ftype) < 0) { 382 pos,
383 ceph_translate_ino(inode->i_sb, ino), ftype) < 0) {
381 dout("filldir stopping us...\n"); 384 dout("filldir stopping us...\n");
382 return 0; 385 return 0;
383 } 386 }
@@ -1024,14 +1027,13 @@ out_touch:
1024} 1027}
1025 1028
1026/* 1029/*
1027 * When a dentry is released, clear the dir I_COMPLETE if it was part 1030 * Release our ceph_dentry_info.
1028 * of the current dir gen or if this is in the snapshot namespace.
1029 */ 1031 */
1030static void ceph_dentry_release(struct dentry *dentry) 1032static void ceph_d_release(struct dentry *dentry)
1031{ 1033{
1032 struct ceph_dentry_info *di = ceph_dentry(dentry); 1034 struct ceph_dentry_info *di = ceph_dentry(dentry);
1033 1035
1034 dout("dentry_release %p\n", dentry); 1036 dout("d_release %p\n", dentry);
1035 if (di) { 1037 if (di) {
1036 ceph_dentry_lru_del(dentry); 1038 ceph_dentry_lru_del(dentry);
1037 if (di->lease_session) 1039 if (di->lease_session)
@@ -1256,14 +1258,14 @@ const struct inode_operations ceph_dir_iops = {
1256 1258
1257const struct dentry_operations ceph_dentry_ops = { 1259const struct dentry_operations ceph_dentry_ops = {
1258 .d_revalidate = ceph_d_revalidate, 1260 .d_revalidate = ceph_d_revalidate,
1259 .d_release = ceph_dentry_release, 1261 .d_release = ceph_d_release,
1260}; 1262};
1261 1263
1262const struct dentry_operations ceph_snapdir_dentry_ops = { 1264const struct dentry_operations ceph_snapdir_dentry_ops = {
1263 .d_revalidate = ceph_snapdir_d_revalidate, 1265 .d_revalidate = ceph_snapdir_d_revalidate,
1264 .d_release = ceph_dentry_release, 1266 .d_release = ceph_d_release,
1265}; 1267};
1266 1268
1267const struct dentry_operations ceph_snap_dentry_ops = { 1269const struct dentry_operations ceph_snap_dentry_ops = {
1268 .d_release = ceph_dentry_release, 1270 .d_release = ceph_d_release,
1269}; 1271};
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 7d0e4a82d898..159b512d5a27 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -564,11 +564,19 @@ more:
564 * start_request so that a tid has been assigned. 564 * start_request so that a tid has been assigned.
565 */ 565 */
566 spin_lock(&ci->i_unsafe_lock); 566 spin_lock(&ci->i_unsafe_lock);
567 list_add(&req->r_unsafe_item, &ci->i_unsafe_writes); 567 list_add_tail(&req->r_unsafe_item,
568 &ci->i_unsafe_writes);
568 spin_unlock(&ci->i_unsafe_lock); 569 spin_unlock(&ci->i_unsafe_lock);
569 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); 570 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
570 } 571 }
572
571 ret = ceph_osdc_wait_request(&fsc->client->osdc, req); 573 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
574 if (ret < 0 && req->r_safe_callback) {
575 spin_lock(&ci->i_unsafe_lock);
576 list_del_init(&req->r_unsafe_item);
577 spin_unlock(&ci->i_unsafe_lock);
578 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
579 }
572 } 580 }
573 581
574 if (file->f_flags & O_DIRECT) 582 if (file->f_flags & O_DIRECT)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 193bfa5e9cbd..b54c97da1c43 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -36,6 +36,13 @@ static void ceph_vmtruncate_work(struct work_struct *work);
36/* 36/*
37 * find or create an inode, given the ceph ino number 37 * find or create an inode, given the ceph ino number
38 */ 38 */
39static int ceph_set_ino_cb(struct inode *inode, void *data)
40{
41 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
42 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
43 return 0;
44}
45
39struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) 46struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
40{ 47{
41 struct inode *inode; 48 struct inode *inode;
@@ -1030,9 +1037,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1030 dout("fill_trace doing d_move %p -> %p\n", 1037 dout("fill_trace doing d_move %p -> %p\n",
1031 req->r_old_dentry, dn); 1038 req->r_old_dentry, dn);
1032 1039
1033 /* d_move screws up d_subdirs order */
1034 ceph_i_clear(dir, CEPH_I_COMPLETE);
1035
1036 d_move(req->r_old_dentry, dn); 1040 d_move(req->r_old_dentry, dn);
1037 dout(" src %p '%.*s' dst %p '%.*s'\n", 1041 dout(" src %p '%.*s' dst %p '%.*s'\n",
1038 req->r_old_dentry, 1042 req->r_old_dentry,
@@ -1044,12 +1048,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1044 rehashing bug in vfs_rename_dir */ 1048 rehashing bug in vfs_rename_dir */
1045 ceph_invalidate_dentry_lease(dn); 1049 ceph_invalidate_dentry_lease(dn);
1046 1050
1047 /* take overwritten dentry's readdir offset */ 1051 /*
1048 dout("dn %p gets %p offset %lld (old offset %lld)\n", 1052 * d_move() puts the renamed dentry at the end of
1049 req->r_old_dentry, dn, ceph_dentry(dn)->offset, 1053 * d_subdirs. We need to assign it an appropriate
1054 * directory offset so we can behave when holding
1055 * I_COMPLETE.
1056 */
1057 ceph_set_dentry_offset(req->r_old_dentry);
1058 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1050 ceph_dentry(req->r_old_dentry)->offset); 1059 ceph_dentry(req->r_old_dentry)->offset);
1051 ceph_dentry(req->r_old_dentry)->offset =
1052 ceph_dentry(dn)->offset;
1053 1060
1054 dn = req->r_old_dentry; /* use old_dentry */ 1061 dn = req->r_old_dentry; /* use old_dentry */
1055 in = dn->d_inode; 1062 in = dn->d_inode;
@@ -1809,7 +1816,7 @@ int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1809 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL); 1816 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1810 if (!err) { 1817 if (!err) {
1811 generic_fillattr(inode, stat); 1818 generic_fillattr(inode, stat);
1812 stat->ino = inode->i_ino; 1819 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
1813 if (ceph_snap(inode) != CEPH_NOSNAP) 1820 if (ceph_snap(inode) != CEPH_NOSNAP)
1814 stat->dev = ceph_snap(inode); 1821 stat->dev = ceph_snap(inode);
1815 else 1822 else
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 9c5085465a63..a9e78b4a258c 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -131,6 +131,7 @@ enum {
131 Opt_rbytes, 131 Opt_rbytes,
132 Opt_norbytes, 132 Opt_norbytes,
133 Opt_noasyncreaddir, 133 Opt_noasyncreaddir,
134 Opt_ino32,
134}; 135};
135 136
136static match_table_t fsopt_tokens = { 137static match_table_t fsopt_tokens = {
@@ -150,6 +151,7 @@ static match_table_t fsopt_tokens = {
150 {Opt_rbytes, "rbytes"}, 151 {Opt_rbytes, "rbytes"},
151 {Opt_norbytes, "norbytes"}, 152 {Opt_norbytes, "norbytes"},
152 {Opt_noasyncreaddir, "noasyncreaddir"}, 153 {Opt_noasyncreaddir, "noasyncreaddir"},
154 {Opt_ino32, "ino32"},
153 {-1, NULL} 155 {-1, NULL}
154}; 156};
155 157
@@ -225,6 +227,9 @@ static int parse_fsopt_token(char *c, void *private)
225 case Opt_noasyncreaddir: 227 case Opt_noasyncreaddir:
226 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR; 228 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
227 break; 229 break;
230 case Opt_ino32:
231 fsopt->flags |= CEPH_MOUNT_OPT_INO32;
232 break;
228 default: 233 default:
229 BUG_ON(token); 234 BUG_ON(token);
230 } 235 }
@@ -288,7 +293,7 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
288 fsopt->sb_flags = flags; 293 fsopt->sb_flags = flags;
289 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT; 294 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
290 295
291 fsopt->rsize = CEPH_MOUNT_RSIZE_DEFAULT; 296 fsopt->rsize = CEPH_RSIZE_DEFAULT;
292 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL); 297 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
293 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT; 298 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
294 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT; 299 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
@@ -370,7 +375,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
370 375
371 if (fsopt->wsize) 376 if (fsopt->wsize)
372 seq_printf(m, ",wsize=%d", fsopt->wsize); 377 seq_printf(m, ",wsize=%d", fsopt->wsize);
373 if (fsopt->rsize != CEPH_MOUNT_RSIZE_DEFAULT) 378 if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
374 seq_printf(m, ",rsize=%d", fsopt->rsize); 379 seq_printf(m, ",rsize=%d", fsopt->rsize);
375 if (fsopt->congestion_kb != default_congestion_kb()) 380 if (fsopt->congestion_kb != default_congestion_kb())
376 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb); 381 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 20b907d76ae2..619fe719968f 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -27,6 +27,7 @@
27#define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */ 27#define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
28#define CEPH_MOUNT_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */ 28#define CEPH_MOUNT_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */
29#define CEPH_MOUNT_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */ 29#define CEPH_MOUNT_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */
30#define CEPH_MOUNT_OPT_INO32 (1<<8) /* 32 bit inos */
30 31
31#define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES) 32#define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES)
32 33
@@ -35,6 +36,7 @@
35#define ceph_test_mount_opt(fsc, opt) \ 36#define ceph_test_mount_opt(fsc, opt) \
36 (!!((fsc)->mount_options->flags & CEPH_MOUNT_OPT_##opt)) 37 (!!((fsc)->mount_options->flags & CEPH_MOUNT_OPT_##opt))
37 38
39#define CEPH_RSIZE_DEFAULT (512*1024) /* readahead */
38#define CEPH_MAX_READDIR_DEFAULT 1024 40#define CEPH_MAX_READDIR_DEFAULT 1024
39#define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024) 41#define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024)
40#define CEPH_SNAPDIRNAME_DEFAULT ".snap" 42#define CEPH_SNAPDIRNAME_DEFAULT ".snap"
@@ -319,6 +321,16 @@ static inline struct ceph_inode_info *ceph_inode(struct inode *inode)
319 return container_of(inode, struct ceph_inode_info, vfs_inode); 321 return container_of(inode, struct ceph_inode_info, vfs_inode);
320} 322}
321 323
324static inline struct ceph_fs_client *ceph_inode_to_client(struct inode *inode)
325{
326 return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
327}
328
329static inline struct ceph_fs_client *ceph_sb_to_client(struct super_block *sb)
330{
331 return (struct ceph_fs_client *)sb->s_fs_info;
332}
333
322static inline struct ceph_vino ceph_vino(struct inode *inode) 334static inline struct ceph_vino ceph_vino(struct inode *inode)
323{ 335{
324 return ceph_inode(inode)->i_vino; 336 return ceph_inode(inode)->i_vino;
@@ -327,19 +339,49 @@ static inline struct ceph_vino ceph_vino(struct inode *inode)
327/* 339/*
328 * ino_t is <64 bits on many architectures, blech. 340 * ino_t is <64 bits on many architectures, blech.
329 * 341 *
330 * don't include snap in ino hash, at least for now. 342 * i_ino (kernel inode) st_ino (userspace)
343 * i386 32 32
344 * x86_64+ino32 64 32
345 * x86_64 64 64
346 */
347static inline u32 ceph_ino_to_ino32(ino_t ino)
348{
349 ino ^= ino >> (sizeof(ino) * 8 - 32);
350 if (!ino)
351 ino = 1;
352 return ino;
353}
354
355/*
356 * kernel i_ino value
331 */ 357 */
332static inline ino_t ceph_vino_to_ino(struct ceph_vino vino) 358static inline ino_t ceph_vino_to_ino(struct ceph_vino vino)
333{ 359{
334 ino_t ino = (ino_t)vino.ino; /* ^ (vino.snap << 20); */ 360 ino_t ino = (ino_t)vino.ino; /* ^ (vino.snap << 20); */
335#if BITS_PER_LONG == 32 361#if BITS_PER_LONG == 32
336 ino ^= vino.ino >> (sizeof(u64)-sizeof(ino_t)) * 8; 362 ino = ceph_ino_to_ino32(ino);
337 if (!ino)
338 ino = 1;
339#endif 363#endif
340 return ino; 364 return ino;
341} 365}
342 366
367/*
368 * user-visible ino (stat, filldir)
369 */
370#if BITS_PER_LONG == 32
371static inline ino_t ceph_translate_ino(struct super_block *sb, ino_t ino)
372{
373 return ino;
374}
375#else
376static inline ino_t ceph_translate_ino(struct super_block *sb, ino_t ino)
377{
378 if (ceph_test_mount_opt(ceph_sb_to_client(sb), INO32))
379 ino = ceph_ino_to_ino32(ino);
380 return ino;
381}
382#endif
383
384
343/* for printf-style formatting */ 385/* for printf-style formatting */
344#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap 386#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
345 387
@@ -428,13 +470,6 @@ static inline loff_t ceph_make_fpos(unsigned frag, unsigned off)
428 return ((loff_t)frag << 32) | (loff_t)off; 470 return ((loff_t)frag << 32) | (loff_t)off;
429} 471}
430 472
431static inline int ceph_set_ino_cb(struct inode *inode, void *data)
432{
433 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
434 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
435 return 0;
436}
437
438/* 473/*
439 * caps helpers 474 * caps helpers
440 */ 475 */
@@ -503,15 +538,6 @@ extern void ceph_reservation_status(struct ceph_fs_client *client,
503 int *total, int *avail, int *used, 538 int *total, int *avail, int *used,
504 int *reserved, int *min); 539 int *reserved, int *min);
505 540
506static inline struct ceph_fs_client *ceph_inode_to_client(struct inode *inode)
507{
508 return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
509}
510
511static inline struct ceph_fs_client *ceph_sb_to_client(struct super_block *sb)
512{
513 return (struct ceph_fs_client *)sb->s_fs_info;
514}
515 541
516 542
517/* 543/*
diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c
index c6405ce3c50e..06d27a41807f 100644
--- a/fs/coda/sysctl.c
+++ b/fs/coda/sysctl.c
@@ -13,7 +13,6 @@
13 13
14#ifdef CONFIG_SYSCTL 14#ifdef CONFIG_SYSCTL
15static struct ctl_table_header *fs_table_header; 15static struct ctl_table_header *fs_table_header;
16#endif
17 16
18static ctl_table coda_table[] = { 17static ctl_table coda_table[] = {
19 { 18 {
@@ -40,7 +39,6 @@ static ctl_table coda_table[] = {
40 {} 39 {}
41}; 40};
42 41
43#ifdef CONFIG_SYSCTL
44static ctl_table fs_table[] = { 42static ctl_table fs_table[] = {
45 { 43 {
46 .procname = "coda", 44 .procname = "coda",
@@ -49,22 +47,18 @@ static ctl_table fs_table[] = {
49 }, 47 },
50 {} 48 {}
51}; 49};
52#endif
53 50
54void coda_sysctl_init(void) 51void coda_sysctl_init(void)
55{ 52{
56#ifdef CONFIG_SYSCTL
57 if ( !fs_table_header ) 53 if ( !fs_table_header )
58 fs_table_header = register_sysctl_table(fs_table); 54 fs_table_header = register_sysctl_table(fs_table);
59#endif
60} 55}
61 56
62void coda_sysctl_clean(void) 57void coda_sysctl_clean(void)
63{ 58{
64#ifdef CONFIG_SYSCTL
65 if ( fs_table_header ) { 59 if ( fs_table_header ) {
66 unregister_sysctl_table(fs_table_header); 60 unregister_sysctl_table(fs_table_header);
67 fs_table_header = NULL; 61 fs_table_header = NULL;
68 } 62 }
69#endif
70} 63}
64#endif
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index c6bd815dc794..2f27e578d466 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -502,7 +502,7 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty)
502 mutex_lock(&root->d_inode->i_mutex); 502 mutex_lock(&root->d_inode->i_mutex);
503 503
504 dentry = d_alloc_name(root, s); 504 dentry = d_alloc_name(root, s);
505 if (!IS_ERR(dentry)) { 505 if (dentry) {
506 d_add(dentry, inode); 506 d_add(dentry, inode);
507 fsnotify_create(root->d_inode, dentry); 507 fsnotify_create(root->d_inode, dentry);
508 } else { 508 } else {
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index ff12f7ac73ef..ed38801b57a7 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -316,6 +316,19 @@ static void ep_nested_calls_init(struct nested_calls *ncalls)
316} 316}
317 317
318/** 318/**
319 * ep_events_available - Checks if ready events might be available.
320 *
321 * @ep: Pointer to the eventpoll context.
322 *
323 * Returns: Returns a value different than zero if ready events are available,
324 * or zero otherwise.
325 */
326static inline int ep_events_available(struct eventpoll *ep)
327{
328 return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
329}
330
331/**
319 * ep_call_nested - Perform a bound (possibly) nested call, by checking 332 * ep_call_nested - Perform a bound (possibly) nested call, by checking
320 * that the recursion limit is not exceeded, and that 333 * that the recursion limit is not exceeded, and that
321 * the same nested call (by the meaning of same cookie) is 334 * the same nested call (by the meaning of same cookie) is
@@ -1135,12 +1148,29 @@ static inline struct timespec ep_set_mstimeout(long ms)
1135 return timespec_add_safe(now, ts); 1148 return timespec_add_safe(now, ts);
1136} 1149}
1137 1150
1151/**
1152 * ep_poll - Retrieves ready events, and delivers them to the caller supplied
1153 * event buffer.
1154 *
1155 * @ep: Pointer to the eventpoll context.
1156 * @events: Pointer to the userspace buffer where the ready events should be
1157 * stored.
1158 * @maxevents: Size (in terms of number of events) of the caller event buffer.
1159 * @timeout: Maximum timeout for the ready events fetch operation, in
1160 * milliseconds. If the @timeout is zero, the function will not block,
1161 * while if the @timeout is less than zero, the function will block
1162 * until at least one event has been retrieved (or an error
1163 * occurred).
1164 *
1165 * Returns: Returns the number of ready events which have been fetched, or an
1166 * error code, in case of error.
1167 */
1138static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, 1168static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1139 int maxevents, long timeout) 1169 int maxevents, long timeout)
1140{ 1170{
1141 int res, eavail, timed_out = 0; 1171 int res = 0, eavail, timed_out = 0;
1142 unsigned long flags; 1172 unsigned long flags;
1143 long slack; 1173 long slack = 0;
1144 wait_queue_t wait; 1174 wait_queue_t wait;
1145 ktime_t expires, *to = NULL; 1175 ktime_t expires, *to = NULL;
1146 1176
@@ -1151,14 +1181,19 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1151 to = &expires; 1181 to = &expires;
1152 *to = timespec_to_ktime(end_time); 1182 *to = timespec_to_ktime(end_time);
1153 } else if (timeout == 0) { 1183 } else if (timeout == 0) {
1184 /*
1185 * Avoid the unnecessary trip to the wait queue loop, if the
1186 * caller specified a non blocking operation.
1187 */
1154 timed_out = 1; 1188 timed_out = 1;
1189 spin_lock_irqsave(&ep->lock, flags);
1190 goto check_events;
1155 } 1191 }
1156 1192
1157retry: 1193fetch_events:
1158 spin_lock_irqsave(&ep->lock, flags); 1194 spin_lock_irqsave(&ep->lock, flags);
1159 1195
1160 res = 0; 1196 if (!ep_events_available(ep)) {
1161 if (list_empty(&ep->rdllist)) {
1162 /* 1197 /*
1163 * We don't have any available event to return to the caller. 1198 * We don't have any available event to return to the caller.
1164 * We need to sleep here, and we will be wake up by 1199 * We need to sleep here, and we will be wake up by
@@ -1174,7 +1209,7 @@ retry:
1174 * to TASK_INTERRUPTIBLE before doing the checks. 1209 * to TASK_INTERRUPTIBLE before doing the checks.
1175 */ 1210 */
1176 set_current_state(TASK_INTERRUPTIBLE); 1211 set_current_state(TASK_INTERRUPTIBLE);
1177 if (!list_empty(&ep->rdllist) || timed_out) 1212 if (ep_events_available(ep) || timed_out)
1178 break; 1213 break;
1179 if (signal_pending(current)) { 1214 if (signal_pending(current)) {
1180 res = -EINTR; 1215 res = -EINTR;
@@ -1191,8 +1226,9 @@ retry:
1191 1226
1192 set_current_state(TASK_RUNNING); 1227 set_current_state(TASK_RUNNING);
1193 } 1228 }
1229check_events:
1194 /* Is it worth to try to dig for events ? */ 1230 /* Is it worth to try to dig for events ? */
1195 eavail = !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR; 1231 eavail = ep_events_available(ep);
1196 1232
1197 spin_unlock_irqrestore(&ep->lock, flags); 1233 spin_unlock_irqrestore(&ep->lock, flags);
1198 1234
@@ -1203,7 +1239,7 @@ retry:
1203 */ 1239 */
1204 if (!res && eavail && 1240 if (!res && eavail &&
1205 !(res = ep_send_events(ep, events, maxevents)) && !timed_out) 1241 !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
1206 goto retry; 1242 goto fetch_events;
1207 1243
1208 return res; 1244 return res;
1209} 1245}
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index 7c39b885f969..b6cca47f7b07 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -305,7 +305,7 @@ static void cuse_gendev_release(struct device *dev)
305static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) 305static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
306{ 306{
307 struct cuse_conn *cc = fc_to_cc(fc); 307 struct cuse_conn *cc = fc_to_cc(fc);
308 struct cuse_init_out *arg = &req->misc.cuse_init_out; 308 struct cuse_init_out *arg = req->out.args[0].value;
309 struct page *page = req->pages[0]; 309 struct page *page = req->pages[0];
310 struct cuse_devinfo devinfo = { }; 310 struct cuse_devinfo devinfo = { };
311 struct device *dev; 311 struct device *dev;
@@ -384,6 +384,7 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
384 dev_set_uevent_suppress(dev, 0); 384 dev_set_uevent_suppress(dev, 0);
385 kobject_uevent(&dev->kobj, KOBJ_ADD); 385 kobject_uevent(&dev->kobj, KOBJ_ADD);
386out: 386out:
387 kfree(arg);
387 __free_page(page); 388 __free_page(page);
388 return; 389 return;
389 390
@@ -405,6 +406,7 @@ static int cuse_send_init(struct cuse_conn *cc)
405 struct page *page; 406 struct page *page;
406 struct fuse_conn *fc = &cc->fc; 407 struct fuse_conn *fc = &cc->fc;
407 struct cuse_init_in *arg; 408 struct cuse_init_in *arg;
409 void *outarg;
408 410
409 BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE); 411 BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE);
410 412
@@ -419,6 +421,10 @@ static int cuse_send_init(struct cuse_conn *cc)
419 if (!page) 421 if (!page)
420 goto err_put_req; 422 goto err_put_req;
421 423
424 outarg = kzalloc(sizeof(struct cuse_init_out), GFP_KERNEL);
425 if (!outarg)
426 goto err_free_page;
427
422 arg = &req->misc.cuse_init_in; 428 arg = &req->misc.cuse_init_in;
423 arg->major = FUSE_KERNEL_VERSION; 429 arg->major = FUSE_KERNEL_VERSION;
424 arg->minor = FUSE_KERNEL_MINOR_VERSION; 430 arg->minor = FUSE_KERNEL_MINOR_VERSION;
@@ -429,7 +435,7 @@ static int cuse_send_init(struct cuse_conn *cc)
429 req->in.args[0].value = arg; 435 req->in.args[0].value = arg;
430 req->out.numargs = 2; 436 req->out.numargs = 2;
431 req->out.args[0].size = sizeof(struct cuse_init_out); 437 req->out.args[0].size = sizeof(struct cuse_init_out);
432 req->out.args[0].value = &req->misc.cuse_init_out; 438 req->out.args[0].value = outarg;
433 req->out.args[1].size = CUSE_INIT_INFO_MAX; 439 req->out.args[1].size = CUSE_INIT_INFO_MAX;
434 req->out.argvar = 1; 440 req->out.argvar = 1;
435 req->out.argpages = 1; 441 req->out.argpages = 1;
@@ -440,6 +446,8 @@ static int cuse_send_init(struct cuse_conn *cc)
440 446
441 return 0; 447 return 0;
442 448
449err_free_page:
450 __free_page(page);
443err_put_req: 451err_put_req:
444 fuse_put_request(fc, req); 452 fuse_put_request(fc, req);
445err: 453err:
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index cf8d28d1fbad..640fc229df10 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -737,14 +737,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
737 if (WARN_ON(PageMlocked(oldpage))) 737 if (WARN_ON(PageMlocked(oldpage)))
738 goto out_fallback_unlock; 738 goto out_fallback_unlock;
739 739
740 remove_from_page_cache(oldpage); 740 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
741 page_cache_release(oldpage);
742
743 err = add_to_page_cache_locked(newpage, mapping, index, GFP_KERNEL);
744 if (err) { 741 if (err) {
745 printk(KERN_WARNING "fuse_try_move_page: failed to add page"); 742 unlock_page(newpage);
746 goto out_fallback_unlock; 743 return err;
747 } 744 }
745
748 page_cache_get(newpage); 746 page_cache_get(newpage);
749 747
750 if (!(buf->flags & PIPE_BUF_FLAG_LRU)) 748 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
@@ -1910,6 +1908,21 @@ __acquires(fc->lock)
1910 kfree(dequeue_forget(fc, 1, NULL)); 1908 kfree(dequeue_forget(fc, 1, NULL));
1911} 1909}
1912 1910
1911static void end_polls(struct fuse_conn *fc)
1912{
1913 struct rb_node *p;
1914
1915 p = rb_first(&fc->polled_files);
1916
1917 while (p) {
1918 struct fuse_file *ff;
1919 ff = rb_entry(p, struct fuse_file, polled_node);
1920 wake_up_interruptible_all(&ff->poll_wait);
1921
1922 p = rb_next(p);
1923 }
1924}
1925
1913/* 1926/*
1914 * Abort all requests. 1927 * Abort all requests.
1915 * 1928 *
@@ -1937,6 +1950,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
1937 fc->blocked = 0; 1950 fc->blocked = 0;
1938 end_io_requests(fc); 1951 end_io_requests(fc);
1939 end_queued_requests(fc); 1952 end_queued_requests(fc);
1953 end_polls(fc);
1940 wake_up_all(&fc->waitq); 1954 wake_up_all(&fc->waitq);
1941 wake_up_all(&fc->blocked_waitq); 1955 wake_up_all(&fc->blocked_waitq);
1942 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 1956 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1953,6 +1967,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
1953 fc->connected = 0; 1967 fc->connected = 0;
1954 fc->blocked = 0; 1968 fc->blocked = 0;
1955 end_queued_requests(fc); 1969 end_queued_requests(fc);
1970 end_polls(fc);
1956 wake_up_all(&fc->blocked_waitq); 1971 wake_up_all(&fc->blocked_waitq);
1957 spin_unlock(&fc->lock); 1972 spin_unlock(&fc->lock);
1958 fuse_conn_put(fc); 1973 fuse_conn_put(fc);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8bd0ef9286c3..c6ba49bd95b3 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -158,10 +158,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
158{ 158{
159 struct inode *inode; 159 struct inode *inode;
160 160
161 if (nd && nd->flags & LOOKUP_RCU) 161 inode = ACCESS_ONCE(entry->d_inode);
162 return -ECHILD;
163
164 inode = entry->d_inode;
165 if (inode && is_bad_inode(inode)) 162 if (inode && is_bad_inode(inode))
166 return 0; 163 return 0;
167 else if (fuse_dentry_time(entry) < get_jiffies_64()) { 164 else if (fuse_dentry_time(entry) < get_jiffies_64()) {
@@ -177,6 +174,9 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
177 if (!inode) 174 if (!inode)
178 return 0; 175 return 0;
179 176
177 if (nd->flags & LOOKUP_RCU)
178 return -ECHILD;
179
180 fc = get_fuse_conn(inode); 180 fc = get_fuse_conn(inode);
181 req = fuse_get_req(fc); 181 req = fuse_get_req(fc);
182 if (IS_ERR(req)) 182 if (IS_ERR(req))
@@ -970,6 +970,14 @@ static int fuse_access(struct inode *inode, int mask)
970 return err; 970 return err;
971} 971}
972 972
973static int fuse_perm_getattr(struct inode *inode, int flags)
974{
975 if (flags & IPERM_FLAG_RCU)
976 return -ECHILD;
977
978 return fuse_do_getattr(inode, NULL, NULL);
979}
980
973/* 981/*
974 * Check permission. The two basic access models of FUSE are: 982 * Check permission. The two basic access models of FUSE are:
975 * 983 *
@@ -989,9 +997,6 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags)
989 bool refreshed = false; 997 bool refreshed = false;
990 int err = 0; 998 int err = 0;
991 999
992 if (flags & IPERM_FLAG_RCU)
993 return -ECHILD;
994
995 if (!fuse_allow_task(fc, current)) 1000 if (!fuse_allow_task(fc, current))
996 return -EACCES; 1001 return -EACCES;
997 1002
@@ -1000,9 +1005,15 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags)
1000 */ 1005 */
1001 if ((fc->flags & FUSE_DEFAULT_PERMISSIONS) || 1006 if ((fc->flags & FUSE_DEFAULT_PERMISSIONS) ||
1002 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) { 1007 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1003 err = fuse_update_attributes(inode, NULL, NULL, &refreshed); 1008 struct fuse_inode *fi = get_fuse_inode(inode);
1004 if (err) 1009
1005 return err; 1010 if (fi->i_time < get_jiffies_64()) {
1011 refreshed = true;
1012
1013 err = fuse_perm_getattr(inode, flags);
1014 if (err)
1015 return err;
1016 }
1006 } 1017 }
1007 1018
1008 if (fc->flags & FUSE_DEFAULT_PERMISSIONS) { 1019 if (fc->flags & FUSE_DEFAULT_PERMISSIONS) {
@@ -1012,7 +1023,7 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags)
1012 attributes. This is also needed, because the root 1023 attributes. This is also needed, because the root
1013 node will at first have no permissions */ 1024 node will at first have no permissions */
1014 if (err == -EACCES && !refreshed) { 1025 if (err == -EACCES && !refreshed) {
1015 err = fuse_do_getattr(inode, NULL, NULL); 1026 err = fuse_perm_getattr(inode, flags);
1016 if (!err) 1027 if (!err)
1017 err = generic_permission(inode, mask, 1028 err = generic_permission(inode, mask,
1018 flags, NULL); 1029 flags, NULL);
@@ -1023,13 +1034,16 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags)
1023 noticed immediately, only after the attribute 1034 noticed immediately, only after the attribute
1024 timeout has expired */ 1035 timeout has expired */
1025 } else if (mask & (MAY_ACCESS | MAY_CHDIR)) { 1036 } else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
1037 if (flags & IPERM_FLAG_RCU)
1038 return -ECHILD;
1039
1026 err = fuse_access(inode, mask); 1040 err = fuse_access(inode, mask);
1027 } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) { 1041 } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
1028 if (!(inode->i_mode & S_IXUGO)) { 1042 if (!(inode->i_mode & S_IXUGO)) {
1029 if (refreshed) 1043 if (refreshed)
1030 return -EACCES; 1044 return -EACCES;
1031 1045
1032 err = fuse_do_getattr(inode, NULL, NULL); 1046 err = fuse_perm_getattr(inode, flags);
1033 if (!err && !(inode->i_mode & S_IXUGO)) 1047 if (!err && !(inode->i_mode & S_IXUGO))
1034 return -EACCES; 1048 return -EACCES;
1035 } 1049 }
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 9e0832dbb1e3..6ea00734984e 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -222,7 +222,7 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
222 rb_erase(&ff->polled_node, &fc->polled_files); 222 rb_erase(&ff->polled_node, &fc->polled_files);
223 spin_unlock(&fc->lock); 223 spin_unlock(&fc->lock);
224 224
225 wake_up_interruptible_sync(&ff->poll_wait); 225 wake_up_interruptible_all(&ff->poll_wait);
226 226
227 inarg->fh = ff->fh; 227 inarg->fh = ff->fh;
228 inarg->flags = flags; 228 inarg->flags = flags;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index d4286947bc2c..b788becada76 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -272,7 +272,6 @@ struct fuse_req {
272 struct fuse_init_in init_in; 272 struct fuse_init_in init_in;
273 struct fuse_init_out init_out; 273 struct fuse_init_out init_out;
274 struct cuse_init_in cuse_init_in; 274 struct cuse_init_in cuse_init_in;
275 struct cuse_init_out cuse_init_out;
276 struct { 275 struct {
277 struct fuse_read_in in; 276 struct fuse_read_in in;
278 u64 attr_ver; 277 u64 attr_ver;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 9885082b470f..b9eeb1cd03ff 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -332,8 +332,7 @@ static void truncate_huge_page(struct page *page)
332{ 332{
333 cancel_dirty_page(page, /* No IO accounting for huge pages? */0); 333 cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
334 ClearPageUptodate(page); 334 ClearPageUptodate(page);
335 remove_from_page_cache(page); 335 delete_from_page_cache(page);
336 put_page(page);
337} 336}
338 337
339static void truncate_hugepages(struct inode *inode, loff_t lstart) 338static void truncate_hugepages(struct inode *inode, loff_t lstart)
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
index fd05a0b9431d..5a001020c542 100644
--- a/fs/jffs2/compr_zlib.c
+++ b/fs/jffs2/compr_zlib.c
@@ -40,12 +40,13 @@ static z_stream inf_strm, def_strm;
40 40
41static int __init alloc_workspaces(void) 41static int __init alloc_workspaces(void)
42{ 42{
43 def_strm.workspace = vmalloc(zlib_deflate_workspacesize()); 43 def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS,
44 MAX_MEM_LEVEL));
44 if (!def_strm.workspace) { 45 if (!def_strm.workspace) {
45 printk(KERN_WARNING "Failed to allocate %d bytes for deflate workspace\n", zlib_deflate_workspacesize()); 46 printk(KERN_WARNING "Failed to allocate %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL));
46 return -ENOMEM; 47 return -ENOMEM;
47 } 48 }
48 D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize())); 49 D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL)));
49 inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); 50 inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
50 if (!inf_strm.workspace) { 51 if (!inf_strm.workspace) {
51 printk(KERN_WARNING "Failed to allocate %d bytes for inflate workspace\n", zlib_inflate_workspacesize()); 52 printk(KERN_WARNING "Failed to allocate %d bytes for inflate workspace\n", zlib_inflate_workspacesize());
diff --git a/fs/logfs/compr.c b/fs/logfs/compr.c
index 44bbfd249abc..961f02b86d97 100644
--- a/fs/logfs/compr.c
+++ b/fs/logfs/compr.c
@@ -81,7 +81,7 @@ error:
81 81
82int __init logfs_compr_init(void) 82int __init logfs_compr_init(void)
83{ 83{
84 size_t size = max(zlib_deflate_workspacesize(), 84 size_t size = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
85 zlib_inflate_workspacesize()); 85 zlib_inflate_workspacesize());
86 stream.workspace = vmalloc(size); 86 stream.workspace = vmalloc(size);
87 if (!stream.workspace) 87 if (!stream.workspace)
diff --git a/fs/namespace.c b/fs/namespace.c
index 9263995bf6a1..7dba2ed03429 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2701,7 +2701,7 @@ void __init mnt_init(void)
2701 if (!mount_hashtable) 2701 if (!mount_hashtable)
2702 panic("Failed to allocate mount hash table\n"); 2702 panic("Failed to allocate mount hash table\n");
2703 2703
2704 printk("Mount-cache hash table entries: %lu\n", HASH_SIZE); 2704 printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE);
2705 2705
2706 for (u = 0; u < HASH_SIZE; u++) 2706 for (u = 0; u < HASH_SIZE; u++)
2707 INIT_LIST_HEAD(&mount_hashtable[u]); 2707 INIT_LIST_HEAD(&mount_hashtable[u]);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 60b914860f81..93381aae9363 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1,5 +1,6 @@
1#include <linux/mm.h> 1#include <linux/mm.h>
2#include <linux/hugetlb.h> 2#include <linux/hugetlb.h>
3#include <linux/huge_mm.h>
3#include <linux/mount.h> 4#include <linux/mount.h>
4#include <linux/seq_file.h> 5#include <linux/seq_file.h>
5#include <linux/highmem.h> 6#include <linux/highmem.h>
@@ -7,6 +8,7 @@
7#include <linux/slab.h> 8#include <linux/slab.h>
8#include <linux/pagemap.h> 9#include <linux/pagemap.h>
9#include <linux/mempolicy.h> 10#include <linux/mempolicy.h>
11#include <linux/rmap.h>
10#include <linux/swap.h> 12#include <linux/swap.h>
11#include <linux/swapops.h> 13#include <linux/swapops.h>
12 14
@@ -329,58 +331,86 @@ struct mem_size_stats {
329 unsigned long private_dirty; 331 unsigned long private_dirty;
330 unsigned long referenced; 332 unsigned long referenced;
331 unsigned long anonymous; 333 unsigned long anonymous;
334 unsigned long anonymous_thp;
332 unsigned long swap; 335 unsigned long swap;
333 u64 pss; 336 u64 pss;
334}; 337};
335 338
336static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 339
337 struct mm_walk *walk) 340static void smaps_pte_entry(pte_t ptent, unsigned long addr,
341 unsigned long ptent_size, struct mm_walk *walk)
338{ 342{
339 struct mem_size_stats *mss = walk->private; 343 struct mem_size_stats *mss = walk->private;
340 struct vm_area_struct *vma = mss->vma; 344 struct vm_area_struct *vma = mss->vma;
341 pte_t *pte, ptent;
342 spinlock_t *ptl;
343 struct page *page; 345 struct page *page;
344 int mapcount; 346 int mapcount;
345 347
346 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 348 if (is_swap_pte(ptent)) {
347 for (; addr != end; pte++, addr += PAGE_SIZE) { 349 mss->swap += ptent_size;
348 ptent = *pte; 350 return;
349 351 }
350 if (is_swap_pte(ptent)) {
351 mss->swap += PAGE_SIZE;
352 continue;
353 }
354 352
355 if (!pte_present(ptent)) 353 if (!pte_present(ptent))
356 continue; 354 return;
355
356 page = vm_normal_page(vma, addr, ptent);
357 if (!page)
358 return;
359
360 if (PageAnon(page))
361 mss->anonymous += ptent_size;
362
363 mss->resident += ptent_size;
364 /* Accumulate the size in pages that have been accessed. */
365 if (pte_young(ptent) || PageReferenced(page))
366 mss->referenced += ptent_size;
367 mapcount = page_mapcount(page);
368 if (mapcount >= 2) {
369 if (pte_dirty(ptent) || PageDirty(page))
370 mss->shared_dirty += ptent_size;
371 else
372 mss->shared_clean += ptent_size;
373 mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
374 } else {
375 if (pte_dirty(ptent) || PageDirty(page))
376 mss->private_dirty += ptent_size;
377 else
378 mss->private_clean += ptent_size;
379 mss->pss += (ptent_size << PSS_SHIFT);
380 }
381}
357 382
358 page = vm_normal_page(vma, addr, ptent); 383static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
359 if (!page) 384 struct mm_walk *walk)
360 continue; 385{
386 struct mem_size_stats *mss = walk->private;
387 struct vm_area_struct *vma = mss->vma;
388 pte_t *pte;
389 spinlock_t *ptl;
361 390
362 if (PageAnon(page)) 391 spin_lock(&walk->mm->page_table_lock);
363 mss->anonymous += PAGE_SIZE; 392 if (pmd_trans_huge(*pmd)) {
364 393 if (pmd_trans_splitting(*pmd)) {
365 mss->resident += PAGE_SIZE; 394 spin_unlock(&walk->mm->page_table_lock);
366 /* Accumulate the size in pages that have been accessed. */ 395 wait_split_huge_page(vma->anon_vma, pmd);
367 if (pte_young(ptent) || PageReferenced(page))
368 mss->referenced += PAGE_SIZE;
369 mapcount = page_mapcount(page);
370 if (mapcount >= 2) {
371 if (pte_dirty(ptent) || PageDirty(page))
372 mss->shared_dirty += PAGE_SIZE;
373 else
374 mss->shared_clean += PAGE_SIZE;
375 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
376 } else { 396 } else {
377 if (pte_dirty(ptent) || PageDirty(page)) 397 smaps_pte_entry(*(pte_t *)pmd, addr,
378 mss->private_dirty += PAGE_SIZE; 398 HPAGE_PMD_SIZE, walk);
379 else 399 spin_unlock(&walk->mm->page_table_lock);
380 mss->private_clean += PAGE_SIZE; 400 mss->anonymous_thp += HPAGE_PMD_SIZE;
381 mss->pss += (PAGE_SIZE << PSS_SHIFT); 401 return 0;
382 } 402 }
403 } else {
404 spin_unlock(&walk->mm->page_table_lock);
383 } 405 }
406 /*
407 * The mmap_sem held all the way back in m_start() is what
408 * keeps khugepaged out of here and from collapsing things
409 * in here.
410 */
411 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
412 for (; addr != end; pte++, addr += PAGE_SIZE)
413 smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
384 pte_unmap_unlock(pte - 1, ptl); 414 pte_unmap_unlock(pte - 1, ptl);
385 cond_resched(); 415 cond_resched();
386 return 0; 416 return 0;
@@ -416,6 +446,7 @@ static int show_smap(struct seq_file *m, void *v)
416 "Private_Dirty: %8lu kB\n" 446 "Private_Dirty: %8lu kB\n"
417 "Referenced: %8lu kB\n" 447 "Referenced: %8lu kB\n"
418 "Anonymous: %8lu kB\n" 448 "Anonymous: %8lu kB\n"
449 "AnonHugePages: %8lu kB\n"
419 "Swap: %8lu kB\n" 450 "Swap: %8lu kB\n"
420 "KernelPageSize: %8lu kB\n" 451 "KernelPageSize: %8lu kB\n"
421 "MMUPageSize: %8lu kB\n" 452 "MMUPageSize: %8lu kB\n"
@@ -429,6 +460,7 @@ static int show_smap(struct seq_file *m, void *v)
429 mss.private_dirty >> 10, 460 mss.private_dirty >> 10,
430 mss.referenced >> 10, 461 mss.referenced >> 10,
431 mss.anonymous >> 10, 462 mss.anonymous >> 10,
463 mss.anonymous_thp >> 10,
432 mss.swap >> 10, 464 mss.swap >> 10,
433 vma_kernel_pagesize(vma) >> 10, 465 vma_kernel_pagesize(vma) >> 10,
434 vma_mmu_pagesize(vma) >> 10, 466 vma_mmu_pagesize(vma) >> 10,
@@ -467,6 +499,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
467 spinlock_t *ptl; 499 spinlock_t *ptl;
468 struct page *page; 500 struct page *page;
469 501
502 split_huge_page_pmd(walk->mm, pmd);
503
470 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 504 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
471 for (; addr != end; pte++, addr += PAGE_SIZE) { 505 for (; addr != end; pte++, addr += PAGE_SIZE) {
472 ptent = *pte; 506 ptent = *pte;
@@ -623,6 +657,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
623 pte_t *pte; 657 pte_t *pte;
624 int err = 0; 658 int err = 0;
625 659
660 split_huge_page_pmd(walk->mm, pmd);
661
626 /* find the first VMA at or above 'addr' */ 662 /* find the first VMA at or above 'addr' */
627 vma = find_vma(walk->mm, addr); 663 vma = find_vma(walk->mm, addr);
628 for (; addr != end; addr += PAGE_SIZE) { 664 for (; addr != end; addr += PAGE_SIZE) {
diff --git a/include/asm-generic/types.h b/include/asm-generic/types.h
index fba7d33ca3f2..7a0f69e6c618 100644
--- a/include/asm-generic/types.h
+++ b/include/asm-generic/types.h
@@ -12,31 +12,4 @@ typedef unsigned short umode_t;
12 12
13#endif /* __ASSEMBLY__ */ 13#endif /* __ASSEMBLY__ */
14 14
15/*
16 * These aren't exported outside the kernel to avoid name space clashes
17 */
18#ifdef __KERNEL__
19#ifndef __ASSEMBLY__
20/*
21 * DMA addresses may be very different from physical addresses
22 * and pointers. i386 and powerpc may have 64 bit DMA on 32 bit
23 * systems, while sparc64 uses 32 bit DMA addresses for 64 bit
24 * physical addresses.
25 * This default defines dma_addr_t to have the same size as
26 * phys_addr_t, which is the most common way.
27 * Do not define the dma64_addr_t type, which never really
28 * worked.
29 */
30#ifndef dma_addr_t
31#ifdef CONFIG_PHYS_ADDR_T_64BIT
32typedef u64 dma_addr_t;
33#else
34typedef u32 dma_addr_t;
35#endif /* CONFIG_PHYS_ADDR_T_64BIT */
36#endif /* dma_addr_t */
37
38#endif /* __ASSEMBLY__ */
39
40#endif /* __KERNEL__ */
41
42#endif /* _ASM_GENERIC_TYPES_H */ 15#endif /* _ASM_GENERIC_TYPES_H */
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 176b825add52..07c40d5149de 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -652,7 +652,7 @@ __SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
652__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at) 652__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
653#define __NR_clock_adjtime 266 653#define __NR_clock_adjtime 266
654__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime) 654__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime)
655#define __NR_syncfs 264 655#define __NR_syncfs 267
656__SYSCALL(__NR_syncfs, sys_syncfs) 656__SYSCALL(__NR_syncfs, sys_syncfs)
657 657
658#undef __NR_syscalls 658#undef __NR_syscalls
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 4a3d52e545e1..5ffc6dda4675 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -32,6 +32,13 @@ enum backlight_update_reason {
32 BACKLIGHT_UPDATE_SYSFS, 32 BACKLIGHT_UPDATE_SYSFS,
33}; 33};
34 34
35enum backlight_type {
36 BACKLIGHT_RAW = 1,
37 BACKLIGHT_PLATFORM,
38 BACKLIGHT_FIRMWARE,
39 BACKLIGHT_TYPE_MAX,
40};
41
35struct backlight_device; 42struct backlight_device;
36struct fb_info; 43struct fb_info;
37 44
@@ -62,6 +69,8 @@ struct backlight_properties {
62 /* FB Blanking active? (values as for power) */ 69 /* FB Blanking active? (values as for power) */
63 /* Due to be removed, please use (state & BL_CORE_FBBLANK) */ 70 /* Due to be removed, please use (state & BL_CORE_FBBLANK) */
64 int fb_blank; 71 int fb_blank;
72 /* Backlight type */
73 enum backlight_type type;
65 /* Flags used to signal drivers of state changes */ 74 /* Flags used to signal drivers of state changes */
66 /* Upper 4 bits are reserved for driver internal use */ 75 /* Upper 4 bits are reserved for driver internal use */
67 unsigned int state; 76 unsigned int state;
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 09dcc0c2ffd5..b8e995fbd867 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -136,9 +136,18 @@ struct ceph_dir_layout {
136 136
137 137
138/* osd */ 138/* osd */
139#define CEPH_MSG_OSD_MAP 41 139#define CEPH_MSG_OSD_MAP 41
140#define CEPH_MSG_OSD_OP 42 140#define CEPH_MSG_OSD_OP 42
141#define CEPH_MSG_OSD_OPREPLY 43 141#define CEPH_MSG_OSD_OPREPLY 43
142#define CEPH_MSG_WATCH_NOTIFY 44
143
144
145/* watch-notify operations */
146enum {
147 WATCH_NOTIFY = 1, /* notifying watcher */
148 WATCH_NOTIFY_COMPLETE = 2, /* notifier notified when done */
149};
150
142 151
143/* pool operations */ 152/* pool operations */
144enum { 153enum {
@@ -213,8 +222,10 @@ struct ceph_client_mount {
213 struct ceph_mon_request_header monhdr; 222 struct ceph_mon_request_header monhdr;
214} __attribute__ ((packed)); 223} __attribute__ ((packed));
215 224
225#define CEPH_SUBSCRIBE_ONETIME 1 /* i want only 1 update after have */
226
216struct ceph_mon_subscribe_item { 227struct ceph_mon_subscribe_item {
217 __le64 have_version; __le64 have; 228 __le64 have_version; __le64 have;
218 __u8 onetime; 229 __u8 onetime;
219} __attribute__ ((packed)); 230} __attribute__ ((packed));
220 231
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 72c72bfccb88..0d2e0fffb470 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -71,7 +71,6 @@ struct ceph_options {
71#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */ 71#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */
72#define CEPH_OSD_KEEPALIVE_DEFAULT 5 72#define CEPH_OSD_KEEPALIVE_DEFAULT 5
73#define CEPH_OSD_IDLE_TTL_DEFAULT 60 73#define CEPH_OSD_IDLE_TTL_DEFAULT 60
74#define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */
75 74
76#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) 75#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
77#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024) 76#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index a1af29648fb5..f88eacb111d4 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -32,6 +32,7 @@ struct ceph_osd {
32 struct rb_node o_node; 32 struct rb_node o_node;
33 struct ceph_connection o_con; 33 struct ceph_connection o_con;
34 struct list_head o_requests; 34 struct list_head o_requests;
35 struct list_head o_linger_requests;
35 struct list_head o_osd_lru; 36 struct list_head o_osd_lru;
36 struct ceph_authorizer *o_authorizer; 37 struct ceph_authorizer *o_authorizer;
37 void *o_authorizer_buf, *o_authorizer_reply_buf; 38 void *o_authorizer_buf, *o_authorizer_reply_buf;
@@ -47,6 +48,8 @@ struct ceph_osd_request {
47 struct rb_node r_node; 48 struct rb_node r_node;
48 struct list_head r_req_lru_item; 49 struct list_head r_req_lru_item;
49 struct list_head r_osd_item; 50 struct list_head r_osd_item;
51 struct list_head r_linger_item;
52 struct list_head r_linger_osd;
50 struct ceph_osd *r_osd; 53 struct ceph_osd *r_osd;
51 struct ceph_pg r_pgid; 54 struct ceph_pg r_pgid;
52 int r_pg_osds[CEPH_PG_MAX_SIZE]; 55 int r_pg_osds[CEPH_PG_MAX_SIZE];
@@ -59,6 +62,7 @@ struct ceph_osd_request {
59 int r_flags; /* any additional flags for the osd */ 62 int r_flags; /* any additional flags for the osd */
60 u32 r_sent; /* >0 if r_request is sending/sent */ 63 u32 r_sent; /* >0 if r_request is sending/sent */
61 int r_got_reply; 64 int r_got_reply;
65 int r_linger;
62 66
63 struct ceph_osd_client *r_osdc; 67 struct ceph_osd_client *r_osdc;
64 struct kref r_kref; 68 struct kref r_kref;
@@ -74,7 +78,6 @@ struct ceph_osd_request {
74 char r_oid[40]; /* object name */ 78 char r_oid[40]; /* object name */
75 int r_oid_len; 79 int r_oid_len;
76 unsigned long r_stamp; /* send OR check time */ 80 unsigned long r_stamp; /* send OR check time */
77 bool r_resend; /* msg send failed, needs retry */
78 81
79 struct ceph_file_layout r_file_layout; 82 struct ceph_file_layout r_file_layout;
80 struct ceph_snap_context *r_snapc; /* snap context for writes */ 83 struct ceph_snap_context *r_snapc; /* snap context for writes */
@@ -90,6 +93,26 @@ struct ceph_osd_request {
90 struct ceph_pagelist *r_trail; /* trailing part of the data */ 93 struct ceph_pagelist *r_trail; /* trailing part of the data */
91}; 94};
92 95
96struct ceph_osd_event {
97 u64 cookie;
98 int one_shot;
99 struct ceph_osd_client *osdc;
100 void (*cb)(u64, u64, u8, void *);
101 void *data;
102 struct rb_node node;
103 struct list_head osd_node;
104 struct kref kref;
105 struct completion completion;
106};
107
108struct ceph_osd_event_work {
109 struct work_struct work;
110 struct ceph_osd_event *event;
111 u64 ver;
112 u64 notify_id;
113 u8 opcode;
114};
115
93struct ceph_osd_client { 116struct ceph_osd_client {
94 struct ceph_client *client; 117 struct ceph_client *client;
95 118
@@ -104,7 +127,10 @@ struct ceph_osd_client {
104 u64 timeout_tid; /* tid of timeout triggering rq */ 127 u64 timeout_tid; /* tid of timeout triggering rq */
105 u64 last_tid; /* tid of last request */ 128 u64 last_tid; /* tid of last request */
106 struct rb_root requests; /* pending requests */ 129 struct rb_root requests; /* pending requests */
107 struct list_head req_lru; /* pending requests lru */ 130 struct list_head req_lru; /* in-flight lru */
131 struct list_head req_unsent; /* unsent/need-resend queue */
132 struct list_head req_notarget; /* map to no osd */
133 struct list_head req_linger; /* lingering requests */
108 int num_requests; 134 int num_requests;
109 struct delayed_work timeout_work; 135 struct delayed_work timeout_work;
110 struct delayed_work osds_timeout_work; 136 struct delayed_work osds_timeout_work;
@@ -116,6 +142,12 @@ struct ceph_osd_client {
116 142
117 struct ceph_msgpool msgpool_op; 143 struct ceph_msgpool msgpool_op;
118 struct ceph_msgpool msgpool_op_reply; 144 struct ceph_msgpool msgpool_op_reply;
145
146 spinlock_t event_lock;
147 struct rb_root event_tree;
148 u64 event_count;
149
150 struct workqueue_struct *notify_wq;
119}; 151};
120 152
121struct ceph_osd_req_op { 153struct ceph_osd_req_op {
@@ -150,6 +182,13 @@ struct ceph_osd_req_op {
150 struct { 182 struct {
151 u64 snapid; 183 u64 snapid;
152 } snap; 184 } snap;
185 struct {
186 u64 cookie;
187 u64 ver;
188 __u8 flag;
189 u32 prot_ver;
190 u32 timeout;
191 } watch;
153 }; 192 };
154 u32 payload_len; 193 u32 payload_len;
155}; 194};
@@ -198,6 +237,11 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
198 bool use_mempool, int num_reply, 237 bool use_mempool, int num_reply,
199 int page_align); 238 int page_align);
200 239
240extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
241 struct ceph_osd_request *req);
242extern void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
243 struct ceph_osd_request *req);
244
201static inline void ceph_osdc_get_request(struct ceph_osd_request *req) 245static inline void ceph_osdc_get_request(struct ceph_osd_request *req)
202{ 246{
203 kref_get(&req->r_kref); 247 kref_get(&req->r_kref);
@@ -233,5 +277,14 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
233 struct page **pages, int nr_pages, 277 struct page **pages, int nr_pages,
234 int flags, int do_sync, bool nofail); 278 int flags, int do_sync, bool nofail);
235 279
280/* watch/notify events */
281extern int ceph_osdc_create_event(struct ceph_osd_client *osdc,
282 void (*event_cb)(u64, u64, u8, void *),
283 int one_shot, void *data,
284 struct ceph_osd_event **pevent);
285extern void ceph_osdc_cancel_event(struct ceph_osd_event *event);
286extern int ceph_osdc_wait_event(struct ceph_osd_event *event,
287 unsigned long timeout);
288extern void ceph_osdc_put_event(struct ceph_osd_event *event);
236#endif 289#endif
237 290
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 6d5247f2e81b..0a99099801a4 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -12,9 +12,9 @@
12 * osdmap encoding versions 12 * osdmap encoding versions
13 */ 13 */
14#define CEPH_OSDMAP_INC_VERSION 5 14#define CEPH_OSDMAP_INC_VERSION 5
15#define CEPH_OSDMAP_INC_VERSION_EXT 5 15#define CEPH_OSDMAP_INC_VERSION_EXT 6
16#define CEPH_OSDMAP_VERSION 5 16#define CEPH_OSDMAP_VERSION 5
17#define CEPH_OSDMAP_VERSION_EXT 5 17#define CEPH_OSDMAP_VERSION_EXT 6
18 18
19/* 19/*
20 * fs id 20 * fs id
@@ -181,9 +181,17 @@ enum {
181 /* read */ 181 /* read */
182 CEPH_OSD_OP_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 1, 182 CEPH_OSD_OP_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 1,
183 CEPH_OSD_OP_STAT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 2, 183 CEPH_OSD_OP_STAT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 2,
184 CEPH_OSD_OP_MAPEXT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 3,
184 185
185 /* fancy read */ 186 /* fancy read */
186 CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4, 187 CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4,
188 CEPH_OSD_OP_SPARSE_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 5,
189
190 CEPH_OSD_OP_NOTIFY = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 6,
191 CEPH_OSD_OP_NOTIFY_ACK = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 7,
192
193 /* versioning */
194 CEPH_OSD_OP_ASSERT_VER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 8,
187 195
188 /* write */ 196 /* write */
189 CEPH_OSD_OP_WRITE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 1, 197 CEPH_OSD_OP_WRITE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 1,
@@ -205,6 +213,8 @@ enum {
205 CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13, 213 CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13,
206 CEPH_OSD_OP_ROLLBACK= CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 14, 214 CEPH_OSD_OP_ROLLBACK= CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 14,
207 215
216 CEPH_OSD_OP_WATCH = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 15,
217
208 /** attrs **/ 218 /** attrs **/
209 /* read */ 219 /* read */
210 CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1, 220 CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1,
@@ -218,11 +228,14 @@ enum {
218 CEPH_OSD_OP_RMXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 4, 228 CEPH_OSD_OP_RMXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 4,
219 229
220 /** subop **/ 230 /** subop **/
221 CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1, 231 CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1,
222 CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2, 232 CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2,
223 CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3, 233 CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3,
224 CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4, 234 CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4,
225 CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5, 235 CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5,
236 CEPH_OSD_OP_SCRUB_RESERVE = CEPH_OSD_OP_MODE_SUB | 6,
237 CEPH_OSD_OP_SCRUB_UNRESERVE = CEPH_OSD_OP_MODE_SUB | 7,
238 CEPH_OSD_OP_SCRUB_STOP = CEPH_OSD_OP_MODE_SUB | 8,
226 239
227 /** lock **/ 240 /** lock **/
228 CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1, 241 CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1,
@@ -328,6 +341,8 @@ enum {
328 CEPH_OSD_CMPXATTR_MODE_U64 = 2 341 CEPH_OSD_CMPXATTR_MODE_U64 = 2
329}; 342};
330 343
344#define RADOS_NOTIFY_VER 1
345
331/* 346/*
332 * an individual object operation. each may be accompanied by some data 347 * an individual object operation. each may be accompanied by some data
333 * payload 348 * payload
@@ -359,7 +374,12 @@ struct ceph_osd_op {
359 struct { 374 struct {
360 __le64 snapid; 375 __le64 snapid;
361 } __attribute__ ((packed)) snap; 376 } __attribute__ ((packed)) snap;
362 }; 377 struct {
378 __le64 cookie;
379 __le64 ver;
380 __u8 flag; /* 0 = unwatch, 1 = watch */
381 } __attribute__ ((packed)) watch;
382};
363 __le32 payload_len; 383 __le32 payload_len;
364} __attribute__ ((packed)); 384} __attribute__ ((packed));
365 385
@@ -402,4 +422,5 @@ struct ceph_osd_reply_head {
402} __attribute__ ((packed)); 422} __attribute__ ((packed));
403 423
404 424
425
405#endif 426#endif
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index dfa2ed4c0d26..cc9f7a428649 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -11,9 +11,6 @@
11/* The full zone was compacted */ 11/* The full zone was compacted */
12#define COMPACT_COMPLETE 3 12#define COMPACT_COMPLETE 3
13 13
14#define COMPACT_MODE_DIRECT_RECLAIM 0
15#define COMPACT_MODE_KSWAPD 1
16
17#ifdef CONFIG_COMPACTION 14#ifdef CONFIG_COMPACTION
18extern int sysctl_compact_memory; 15extern int sysctl_compact_memory;
19extern int sysctl_compaction_handler(struct ctl_table *table, int write, 16extern int sysctl_compaction_handler(struct ctl_table *table, int write,
@@ -28,8 +25,7 @@ extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
28 bool sync); 25 bool sync);
29extern unsigned long compaction_suitable(struct zone *zone, int order); 26extern unsigned long compaction_suitable(struct zone *zone, int order);
30extern unsigned long compact_zone_order(struct zone *zone, int order, 27extern unsigned long compact_zone_order(struct zone *zone, int order,
31 gfp_t gfp_mask, bool sync, 28 gfp_t gfp_mask, bool sync);
32 int compact_mode);
33 29
34/* Do not skip compaction more than 64 times */ 30/* Do not skip compaction more than 64 times */
35#define COMPACT_MAX_DEFER_SHIFT 6 31#define COMPACT_MAX_DEFER_SHIFT 6
@@ -74,8 +70,7 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order)
74} 70}
75 71
76static inline unsigned long compact_zone_order(struct zone *zone, int order, 72static inline unsigned long compact_zone_order(struct zone *zone, int order,
77 gfp_t gfp_mask, bool sync, 73 gfp_t gfp_mask, bool sync)
78 int compact_mode)
79{ 74{
80 return COMPACT_CONTINUE; 75 return COMPACT_CONTINUE;
81} 76}
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 16508bcddacc..cb4c1eb7778e 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -92,3 +92,11 @@
92#if !defined(__noclone) 92#if !defined(__noclone)
93#define __noclone /* not needed */ 93#define __noclone /* not needed */
94#endif 94#endif
95
96/*
97 * A trick to suppress uninitialized variable warning without generating any
98 * code
99 */
100#define uninitialized_var(x) x = x
101
102#define __always_inline inline __attribute__((always_inline))
diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h
index b721129e0469..37d412436d0f 100644
--- a/include/linux/compiler-gcc3.h
+++ b/include/linux/compiler-gcc3.h
@@ -21,11 +21,3 @@
21# error "GCOV profiling support for gcc versions below 3.4 not included" 21# error "GCOV profiling support for gcc versions below 3.4 not included"
22# endif /* __GNUC_MINOR__ */ 22# endif /* __GNUC_MINOR__ */
23#endif /* CONFIG_GCOV_KERNEL */ 23#endif /* CONFIG_GCOV_KERNEL */
24
25/*
26 * A trick to suppress uninitialized variable warning without generating any
27 * code
28 */
29#define uninitialized_var(x) x = x
30
31#define __always_inline inline __attribute__((always_inline))
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index fcfa5b9a4317..64b7c003fd7a 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -12,13 +12,6 @@
12#define __used __attribute__((__used__)) 12#define __used __attribute__((__used__))
13#define __must_check __attribute__((warn_unused_result)) 13#define __must_check __attribute__((warn_unused_result))
14#define __compiler_offsetof(a,b) __builtin_offsetof(a,b) 14#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
15#define __always_inline inline __attribute__((always_inline))
16
17/*
18 * A trick to suppress uninitialized variable warning without generating any
19 * code
20 */
21#define uninitialized_var(x) x = x
22 15
23#if __GNUC_MINOR__ >= 3 16#if __GNUC_MINOR__ >= 3
24/* Mark functions as cold. gcc will assume any path leading to a call 17/* Mark functions as cold. gcc will assume any path leading to a call
@@ -53,7 +46,6 @@
53#define __noclone __attribute__((__noclone__)) 46#define __noclone __attribute__((__noclone__))
54 47
55#endif 48#endif
56
57#endif 49#endif
58 50
59#if __GNUC_MINOR__ > 0 51#if __GNUC_MINOR__ > 0
diff --git a/include/linux/crc32.h b/include/linux/crc32.h
index e20dd1f9b40a..391a259b2cc9 100644
--- a/include/linux/crc32.h
+++ b/include/linux/crc32.h
@@ -11,7 +11,7 @@
11extern u32 crc32_le(u32 crc, unsigned char const *p, size_t len); 11extern u32 crc32_le(u32 crc, unsigned char const *p, size_t len);
12extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len); 12extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len);
13 13
14#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)data, length) 14#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length)
15 15
16/* 16/*
17 * Helpers for hash table generation of ethernet nics: 17 * Helpers for hash table generation of ethernet nics:
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index c8aad713a046..6998d9376ef9 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -16,9 +16,18 @@
16/** 16/**
17 * struct dw_dma_platform_data - Controller configuration parameters 17 * struct dw_dma_platform_data - Controller configuration parameters
18 * @nr_channels: Number of channels supported by hardware (max 8) 18 * @nr_channels: Number of channels supported by hardware (max 8)
19 * @is_private: The device channels should be marked as private and not for
20 * by the general purpose DMA channel allocator.
19 */ 21 */
20struct dw_dma_platform_data { 22struct dw_dma_platform_data {
21 unsigned int nr_channels; 23 unsigned int nr_channels;
24 bool is_private;
25#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
26#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
27 unsigned char chan_allocation_order;
28#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
29#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
30 unsigned char chan_priority;
22}; 31};
23 32
24/** 33/**
@@ -33,6 +42,30 @@ enum dw_dma_slave_width {
33 DW_DMA_SLAVE_WIDTH_32BIT, 42 DW_DMA_SLAVE_WIDTH_32BIT,
34}; 43};
35 44
45/* bursts size */
46enum dw_dma_msize {
47 DW_DMA_MSIZE_1,
48 DW_DMA_MSIZE_4,
49 DW_DMA_MSIZE_8,
50 DW_DMA_MSIZE_16,
51 DW_DMA_MSIZE_32,
52 DW_DMA_MSIZE_64,
53 DW_DMA_MSIZE_128,
54 DW_DMA_MSIZE_256,
55};
56
57/* flow controller */
58enum dw_dma_fc {
59 DW_DMA_FC_D_M2M,
60 DW_DMA_FC_D_M2P,
61 DW_DMA_FC_D_P2M,
62 DW_DMA_FC_D_P2P,
63 DW_DMA_FC_P_P2M,
64 DW_DMA_FC_SP_P2P,
65 DW_DMA_FC_P_M2P,
66 DW_DMA_FC_DP_P2P,
67};
68
36/** 69/**
37 * struct dw_dma_slave - Controller-specific information about a slave 70 * struct dw_dma_slave - Controller-specific information about a slave
38 * 71 *
@@ -44,6 +77,11 @@ enum dw_dma_slave_width {
44 * @reg_width: peripheral register width 77 * @reg_width: peripheral register width
45 * @cfg_hi: Platform-specific initializer for the CFG_HI register 78 * @cfg_hi: Platform-specific initializer for the CFG_HI register
46 * @cfg_lo: Platform-specific initializer for the CFG_LO register 79 * @cfg_lo: Platform-specific initializer for the CFG_LO register
80 * @src_master: src master for transfers on allocated channel.
81 * @dst_master: dest master for transfers on allocated channel.
82 * @src_msize: src burst size.
83 * @dst_msize: dest burst size.
84 * @fc: flow controller for DMA transfer
47 */ 85 */
48struct dw_dma_slave { 86struct dw_dma_slave {
49 struct device *dma_dev; 87 struct device *dma_dev;
@@ -52,6 +90,11 @@ struct dw_dma_slave {
52 enum dw_dma_slave_width reg_width; 90 enum dw_dma_slave_width reg_width;
53 u32 cfg_hi; 91 u32 cfg_hi;
54 u32 cfg_lo; 92 u32 cfg_lo;
93 u8 src_master;
94 u8 dst_master;
95 u8 src_msize;
96 u8 dst_msize;
97 u8 fc;
55}; 98};
56 99
57/* Platform-configurable bits in CFG_HI */ 100/* Platform-configurable bits in CFG_HI */
@@ -62,7 +105,6 @@ struct dw_dma_slave {
62#define DWC_CFGH_DST_PER(x) ((x) << 11) 105#define DWC_CFGH_DST_PER(x) ((x) << 11)
63 106
64/* Platform-configurable bits in CFG_LO */ 107/* Platform-configurable bits in CFG_LO */
65#define DWC_CFGL_PRIO(x) ((x) << 5) /* priority */
66#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ 108#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */
67#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) 109#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12)
68#define DWC_CFGL_LOCK_CH_XACT (2 << 12) 110#define DWC_CFGL_LOCK_CH_XACT (2 << 12)
diff --git a/include/linux/err.h b/include/linux/err.h
index 448afc12c78a..f2edce25a76b 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -52,6 +52,14 @@ static inline void * __must_check ERR_CAST(const void *ptr)
52 return (void *) ptr; 52 return (void *) ptr;
53} 53}
54 54
55static inline int __must_check PTR_RET(const void *ptr)
56{
57 if (IS_ERR(ptr))
58 return PTR_ERR(ptr);
59 else
60 return 0;
61}
62
55#endif 63#endif
56 64
57#endif /* _LINUX_ERR_H */ 65#endif /* _LINUX_ERR_H */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index aac3e2eeb4fd..b297f288f6eb 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -13,6 +13,9 @@
13#ifndef _LINUX_ETHTOOL_H 13#ifndef _LINUX_ETHTOOL_H
14#define _LINUX_ETHTOOL_H 14#define _LINUX_ETHTOOL_H
15 15
16#ifdef __KERNEL__
17#include <linux/compat.h>
18#endif
16#include <linux/types.h> 19#include <linux/types.h>
17#include <linux/if_ether.h> 20#include <linux/if_ether.h>
18 21
@@ -450,6 +453,37 @@ struct ethtool_rxnfc {
450 __u32 rule_locs[0]; 453 __u32 rule_locs[0];
451}; 454};
452 455
456#ifdef __KERNEL__
457#ifdef CONFIG_COMPAT
458
459struct compat_ethtool_rx_flow_spec {
460 u32 flow_type;
461 union {
462 struct ethtool_tcpip4_spec tcp_ip4_spec;
463 struct ethtool_tcpip4_spec udp_ip4_spec;
464 struct ethtool_tcpip4_spec sctp_ip4_spec;
465 struct ethtool_ah_espip4_spec ah_ip4_spec;
466 struct ethtool_ah_espip4_spec esp_ip4_spec;
467 struct ethtool_usrip4_spec usr_ip4_spec;
468 struct ethhdr ether_spec;
469 u8 hdata[72];
470 } h_u, m_u;
471 compat_u64 ring_cookie;
472 u32 location;
473};
474
475struct compat_ethtool_rxnfc {
476 u32 cmd;
477 u32 flow_type;
478 compat_u64 data;
479 struct compat_ethtool_rx_flow_spec fs;
480 u32 rule_cnt;
481 u32 rule_locs[0];
482};
483
484#endif /* CONFIG_COMPAT */
485#endif /* __KERNEL__ */
486
453/** 487/**
454 * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection 488 * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection
455 * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR 489 * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7061a8587ee3..12529e966350 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -662,9 +662,9 @@ struct address_space {
662 662
663struct block_device { 663struct block_device {
664 dev_t bd_dev; /* not a kdev_t - it's a search key */ 664 dev_t bd_dev; /* not a kdev_t - it's a search key */
665 int bd_openers;
665 struct inode * bd_inode; /* will die */ 666 struct inode * bd_inode; /* will die */
666 struct super_block * bd_super; 667 struct super_block * bd_super;
667 int bd_openers;
668 struct mutex bd_mutex; /* open/close mutex */ 668 struct mutex bd_mutex; /* open/close mutex */
669 struct list_head bd_inodes; 669 struct list_head bd_inodes;
670 void * bd_claiming; 670 void * bd_claiming;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index dca31761b311..bfb8f934521e 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -35,6 +35,7 @@ struct vm_area_struct;
35#define ___GFP_NOTRACK 0 35#define ___GFP_NOTRACK 0
36#endif 36#endif
37#define ___GFP_NO_KSWAPD 0x400000u 37#define ___GFP_NO_KSWAPD 0x400000u
38#define ___GFP_OTHER_NODE 0x800000u
38 39
39/* 40/*
40 * GFP bitmasks.. 41 * GFP bitmasks..
@@ -83,6 +84,7 @@ struct vm_area_struct;
83#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ 84#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
84 85
85#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) 86#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
87#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
86 88
87/* 89/*
88 * This may seem redundant, but it's a way of annotating false positives vs. 90 * This may seem redundant, but it's a way of annotating false positives vs.
diff --git a/arch/arm/plat-pxa/include/plat/i2c.h b/include/linux/i2c/pxa-i2c.h
index 1a9f65e6ec0f..1a9f65e6ec0f 100644
--- a/arch/arm/plat-pxa/include/plat/i2c.h
+++ b/include/linux/i2c/pxa-i2c.h
diff --git a/include/linux/if_ppp.h b/include/linux/if_ppp.h
index fcef103aa3f6..c9ad38322576 100644
--- a/include/linux/if_ppp.h
+++ b/include/linux/if_ppp.h
@@ -114,14 +114,14 @@ struct pppol2tp_ioc_stats {
114 __u16 tunnel_id; /* redundant */ 114 __u16 tunnel_id; /* redundant */
115 __u16 session_id; /* if zero, get tunnel stats */ 115 __u16 session_id; /* if zero, get tunnel stats */
116 __u32 using_ipsec:1; /* valid only for session_id == 0 */ 116 __u32 using_ipsec:1; /* valid only for session_id == 0 */
117 aligned_u64 tx_packets; 117 __aligned_u64 tx_packets;
118 aligned_u64 tx_bytes; 118 __aligned_u64 tx_bytes;
119 aligned_u64 tx_errors; 119 __aligned_u64 tx_errors;
120 aligned_u64 rx_packets; 120 __aligned_u64 rx_packets;
121 aligned_u64 rx_bytes; 121 __aligned_u64 rx_bytes;
122 aligned_u64 rx_seq_discards; 122 __aligned_u64 rx_seq_discards;
123 aligned_u64 rx_oos_packets; 123 __aligned_u64 rx_oos_packets;
124 aligned_u64 rx_errors; 124 __aligned_u64 rx_errors;
125}; 125};
126 126
127#define ifr__name b.ifr_ifrn.ifrn_name 127#define ifr__name b.ifr_ifrn.ifrn_name
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index 4b0761cc7dd9..ec2d17bc1f1e 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -159,7 +159,7 @@ static inline void con_schedule_flip(struct tty_struct *t)
159 if (t->buf.tail != NULL) 159 if (t->buf.tail != NULL)
160 t->buf.tail->commit = t->buf.tail->used; 160 t->buf.tail->commit = t->buf.tail->used;
161 spin_unlock_irqrestore(&t->buf.lock, flags); 161 spin_unlock_irqrestore(&t->buf.lock, flags);
162 schedule_delayed_work(&t->buf.work, 0); 162 schedule_work(&t->buf.work);
163} 163}
164 164
165#endif 165#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2fe6e84894a4..00cec4dc0ae2 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -187,14 +187,76 @@ NORET_TYPE void do_exit(long error_code)
187 ATTRIB_NORET; 187 ATTRIB_NORET;
188NORET_TYPE void complete_and_exit(struct completion *, long) 188NORET_TYPE void complete_and_exit(struct completion *, long)
189 ATTRIB_NORET; 189 ATTRIB_NORET;
190
191/* Internal, do not use. */
192int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
193int __must_check _kstrtol(const char *s, unsigned int base, long *res);
194
195int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
196int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
197static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
198{
199 /*
200 * We want to shortcut function call, but
201 * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
202 */
203 if (sizeof(unsigned long) == sizeof(unsigned long long) &&
204 __alignof__(unsigned long) == __alignof__(unsigned long long))
205 return kstrtoull(s, base, (unsigned long long *)res);
206 else
207 return _kstrtoul(s, base, res);
208}
209
210static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
211{
212 /*
213 * We want to shortcut function call, but
214 * __builtin_types_compatible_p(long, long long) = 0.
215 */
216 if (sizeof(long) == sizeof(long long) &&
217 __alignof__(long) == __alignof__(long long))
218 return kstrtoll(s, base, (long long *)res);
219 else
220 return _kstrtol(s, base, res);
221}
222
223int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
224int __must_check kstrtoint(const char *s, unsigned int base, int *res);
225
226static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
227{
228 return kstrtoull(s, base, res);
229}
230
231static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
232{
233 return kstrtoll(s, base, res);
234}
235
236static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
237{
238 return kstrtouint(s, base, res);
239}
240
241static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
242{
243 return kstrtoint(s, base, res);
244}
245
246int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
247int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
248int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
249int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
250
190extern unsigned long simple_strtoul(const char *,char **,unsigned int); 251extern unsigned long simple_strtoul(const char *,char **,unsigned int);
191extern long simple_strtol(const char *,char **,unsigned int); 252extern long simple_strtol(const char *,char **,unsigned int);
192extern unsigned long long simple_strtoull(const char *,char **,unsigned int); 253extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
193extern long long simple_strtoll(const char *,char **,unsigned int); 254extern long long simple_strtoll(const char *,char **,unsigned int);
194extern int __must_check strict_strtoul(const char *, unsigned int, unsigned long *); 255#define strict_strtoul kstrtoul
195extern int __must_check strict_strtol(const char *, unsigned int, long *); 256#define strict_strtol kstrtol
196extern int __must_check strict_strtoull(const char *, unsigned int, unsigned long long *); 257#define strict_strtoull kstrtoull
197extern int __must_check strict_strtoll(const char *, unsigned int, long long *); 258#define strict_strtoll kstrtoll
259
198extern int sprintf(char * buf, const char * fmt, ...) 260extern int sprintf(char * buf, const char * fmt, ...)
199 __attribute__ ((format (printf, 2, 3))); 261 __attribute__ ((format (printf, 2, 3)));
200extern int vsprintf(char *buf, const char *, va_list) 262extern int vsprintf(char *buf, const char *, va_list)
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 7ff16f7d3ed4..1e923e5e88e8 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -4,10 +4,15 @@
4#include <linux/err.h> 4#include <linux/err.h>
5#include <linux/sched.h> 5#include <linux/sched.h>
6 6
7struct task_struct *kthread_create(int (*threadfn)(void *data), 7struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
8 void *data, 8 void *data,
9 const char namefmt[], ...) 9 int node,
10 __attribute__((format(printf, 3, 4))); 10 const char namefmt[], ...)
11 __attribute__((format(printf, 4, 5)));
12
13#define kthread_create(threadfn, data, namefmt, arg...) \
14 kthread_create_on_node(threadfn, data, -1, namefmt, ##arg)
15
11 16
12/** 17/**
13 * kthread_run - create and wake a thread. 18 * kthread_run - create and wake a thread.
@@ -34,6 +39,7 @@ void *kthread_data(struct task_struct *k);
34 39
35int kthreadd(void *unused); 40int kthreadd(void *unused);
36extern struct task_struct *kthreadd_task; 41extern struct task_struct *kthreadd_task;
42extern int tsk_fork_get_node(struct task_struct *tsk);
37 43
38/* 44/*
39 * Simple work processor based on kthread. 45 * Simple work processor based on kthread.
diff --git a/include/linux/led-lm3530.h b/include/linux/led-lm3530.h
new file mode 100644
index 000000000000..bb69d20da0dc
--- /dev/null
+++ b/include/linux/led-lm3530.h
@@ -0,0 +1,107 @@
1/*
2 * Copyright (C) 2011 ST-Ericsson SA.
3 * Copyright (C) 2009 Motorola, Inc.
4 *
5 * License Terms: GNU General Public License v2
6 *
7 * Simple driver for National Semiconductor LM35330 Backlight driver chip
8 *
9 * Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
10 * based on leds-lm3530.c by Dan Murphy <D.Murphy@motorola.com>
11 */
12
13#ifndef _LINUX_LED_LM3530_H__
14#define _LINUX_LED_LM3530_H__
15
16#define LM3530_FS_CURR_5mA (0) /* Full Scale Current */
17#define LM3530_FS_CURR_8mA (1)
18#define LM3530_FS_CURR_12mA (2)
19#define LM3530_FS_CURR_15mA (3)
20#define LM3530_FS_CURR_19mA (4)
21#define LM3530_FS_CURR_22mA (5)
22#define LM3530_FS_CURR_26mA (6)
23#define LM3530_FS_CURR_29mA (7)
24
25#define LM3530_ALS_AVRG_TIME_32ms (0) /* ALS Averaging Time */
26#define LM3530_ALS_AVRG_TIME_64ms (1)
27#define LM3530_ALS_AVRG_TIME_128ms (2)
28#define LM3530_ALS_AVRG_TIME_256ms (3)
29#define LM3530_ALS_AVRG_TIME_512ms (4)
30#define LM3530_ALS_AVRG_TIME_1024ms (5)
31#define LM3530_ALS_AVRG_TIME_2048ms (6)
32#define LM3530_ALS_AVRG_TIME_4096ms (7)
33
34#define LM3530_RAMP_TIME_1ms (0) /* Brigtness Ramp Time */
35#define LM3530_RAMP_TIME_130ms (1) /* Max to 0 and vice versa */
36#define LM3530_RAMP_TIME_260ms (2)
37#define LM3530_RAMP_TIME_520ms (3)
38#define LM3530_RAMP_TIME_1s (4)
39#define LM3530_RAMP_TIME_2s (5)
40#define LM3530_RAMP_TIME_4s (6)
41#define LM3530_RAMP_TIME_8s (7)
42
43/* ALS Resistor Select */
44#define LM3530_ALS_IMPD_Z (0x00) /* ALS Impedence */
45#define LM3530_ALS_IMPD_13_53kOhm (0x01)
46#define LM3530_ALS_IMPD_9_01kOhm (0x02)
47#define LM3530_ALS_IMPD_5_41kOhm (0x03)
48#define LM3530_ALS_IMPD_2_27kOhm (0x04)
49#define LM3530_ALS_IMPD_1_94kOhm (0x05)
50#define LM3530_ALS_IMPD_1_81kOhm (0x06)
51#define LM3530_ALS_IMPD_1_6kOhm (0x07)
52#define LM3530_ALS_IMPD_1_138kOhm (0x08)
53#define LM3530_ALS_IMPD_1_05kOhm (0x09)
54#define LM3530_ALS_IMPD_1_011kOhm (0x0A)
55#define LM3530_ALS_IMPD_941Ohm (0x0B)
56#define LM3530_ALS_IMPD_759Ohm (0x0C)
57#define LM3530_ALS_IMPD_719Ohm (0x0D)
58#define LM3530_ALS_IMPD_700Ohm (0x0E)
59#define LM3530_ALS_IMPD_667Ohm (0x0F)
60
61enum lm3530_mode {
62 LM3530_BL_MODE_MANUAL = 0, /* "man" */
63 LM3530_BL_MODE_ALS, /* "als" */
64 LM3530_BL_MODE_PWM, /* "pwm" */
65};
66
67/* ALS input select */
68enum lm3530_als_mode {
69 LM3530_INPUT_AVRG = 0, /* ALS1 and ALS2 input average */
70 LM3530_INPUT_ALS1, /* ALS1 Input */
71 LM3530_INPUT_ALS2, /* ALS2 Input */
72 LM3530_INPUT_CEIL, /* Max of ALS1 and ALS2 */
73};
74
75/**
76 * struct lm3530_platform_data
77 * @mode: mode of operation i.e. Manual, ALS or PWM
78 * @als_input_mode: select source of ALS input - ALS1/2 or average
79 * @max_current: full scale LED current
80 * @pwm_pol_hi: PWM input polarity - active high/active low
81 * @als_avrg_time: ALS input averaging time
82 * @brt_ramp_law: brightness mapping mode - exponential/linear
83 * @brt_ramp_fall: rate of fall of led current
84 * @brt_ramp_rise: rate of rise of led current
85 * @als1_resistor_sel: internal resistance from ALS1 input to ground
86 * @als2_resistor_sel: internal resistance from ALS2 input to ground
87 * @brt_val: brightness value (0-255)
88 */
89struct lm3530_platform_data {
90 enum lm3530_mode mode;
91 enum lm3530_als_mode als_input_mode;
92
93 u8 max_current;
94 bool pwm_pol_hi;
95 u8 als_avrg_time;
96
97 bool brt_ramp_law;
98 u8 brt_ramp_fall;
99 u8 brt_ramp_rise;
100
101 u8 als1_resistor_sel;
102 u8 als2_resistor_sel;
103
104 u8 brt_val;
105};
106
107#endif /* _LINUX_LED_LM3530_H__ */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 0f19df9e37b0..383811d9af83 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -194,11 +194,11 @@ struct gpio_led {
194 194
195struct gpio_led_platform_data { 195struct gpio_led_platform_data {
196 int num_leds; 196 int num_leds;
197 struct gpio_led *leds; 197 const struct gpio_led *leds;
198 198
199#define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */ 199#define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */
200#define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */ 200#define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */
201#define GPIO_LED_BLINK 2 /* Plase, blink */ 201#define GPIO_LED_BLINK 2 /* Please, blink */
202 int (*gpio_blink_set)(unsigned gpio, int state, 202 int (*gpio_blink_set)(unsigned gpio, int state,
203 unsigned long *delay_on, 203 unsigned long *delay_on,
204 unsigned long *delay_off); 204 unsigned long *delay_off);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index f512e189be5a..5bb7be2628ca 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -62,6 +62,7 @@ extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
62 gfp_t gfp_mask); 62 gfp_t gfp_mask);
63extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); 63extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
64extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); 64extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
65extern void mem_cgroup_rotate_reclaimable_page(struct page *page);
65extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); 66extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
66extern void mem_cgroup_del_lru(struct page *page); 67extern void mem_cgroup_del_lru(struct page *page);
67extern void mem_cgroup_move_lists(struct page *page, 68extern void mem_cgroup_move_lists(struct page *page,
@@ -96,7 +97,7 @@ extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem);
96 97
97extern int 98extern int
98mem_cgroup_prepare_migration(struct page *page, 99mem_cgroup_prepare_migration(struct page *page,
99 struct page *newpage, struct mem_cgroup **ptr); 100 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask);
100extern void mem_cgroup_end_migration(struct mem_cgroup *mem, 101extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
101 struct page *oldpage, struct page *newpage, bool migration_ok); 102 struct page *oldpage, struct page *newpage, bool migration_ok);
102 103
@@ -211,6 +212,11 @@ static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
211 return ; 212 return ;
212} 213}
213 214
215static inline inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
216{
217 return ;
218}
219
214static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) 220static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
215{ 221{
216 return ; 222 return ;
@@ -249,7 +255,7 @@ static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
249 255
250static inline int 256static inline int
251mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 257mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
252 struct mem_cgroup **ptr) 258 struct mem_cgroup **ptr, gfp_t gfp_mask)
253{ 259{
254 return 0; 260 return 0;
255} 261}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 581703d86fbd..294104e0891d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -151,6 +151,7 @@ extern pgprot_t protection_map[16];
151#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ 151#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
152#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ 152#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
153#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ 153#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */
154#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
154 155
155/* 156/*
156 * This interface is used by x86 PAT code to identify a pfn mapping that is 157 * This interface is used by x86 PAT code to identify a pfn mapping that is
@@ -859,7 +860,14 @@ extern void pagefault_out_of_memory(void);
859 860
860#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 861#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
861 862
863/*
864 * Flags passed to __show_mem() and __show_free_areas() to suppress output in
865 * various contexts.
866 */
867#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */
868
862extern void show_free_areas(void); 869extern void show_free_areas(void);
870extern void __show_free_areas(unsigned int flags);
863 871
864int shmem_lock(struct file *file, int lock, struct user_struct *user); 872int shmem_lock(struct file *file, int lock, struct user_struct *user);
865struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); 873struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
@@ -906,6 +914,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlb,
906 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry 914 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
907 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry 915 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
908 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry 916 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
917 * this handler is required to be able to handle
918 * pmd_trans_huge() pmds. They may simply choose to
919 * split_huge_page() instead of handling it explicitly.
909 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry 920 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
910 * @pte_hole: if set, called for each hole at all levels 921 * @pte_hole: if set, called for each hole at all levels
911 * @hugetlb_entry: if set, called for each hugetlb entry 922 * @hugetlb_entry: if set, called for each hugetlb entry
@@ -1348,6 +1359,7 @@ extern void calculate_zone_inactive_ratio(struct zone *zone);
1348extern void mem_init(void); 1359extern void mem_init(void);
1349extern void __init mmap_init(void); 1360extern void __init mmap_init(void);
1350extern void show_mem(void); 1361extern void show_mem(void);
1362extern void __show_mem(unsigned int flags);
1351extern void si_meminfo(struct sysinfo * val); 1363extern void si_meminfo(struct sysinfo * val);
1352extern void si_meminfo_node(struct sysinfo *val, int nid); 1364extern void si_meminfo_node(struct sysinfo *val, int nid);
1353extern int after_bootmem; 1365extern int after_bootmem;
@@ -1537,6 +1549,8 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
1537#define FOLL_GET 0x04 /* do get_page on page */ 1549#define FOLL_GET 0x04 /* do get_page on page */
1538#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ 1550#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
1539#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ 1551#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
1552#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
1553 * and return without waiting upon it */
1540#define FOLL_MLOCK 0x40 /* mark page as mlocked */ 1554#define FOLL_MLOCK 0x40 /* mark page as mlocked */
1541#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ 1555#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
1542#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 1556#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 26bc4e2cd275..02aa5619709b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -237,8 +237,9 @@ struct mm_struct {
237 atomic_t mm_users; /* How many users with user space? */ 237 atomic_t mm_users; /* How many users with user space? */
238 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ 238 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
239 int map_count; /* number of VMAs */ 239 int map_count; /* number of VMAs */
240 struct rw_semaphore mmap_sem; 240
241 spinlock_t page_table_lock; /* Protects page tables and some counters */ 241 spinlock_t page_table_lock; /* Protects page tables and some counters */
242 struct rw_semaphore mmap_sem;
242 243
243 struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung 244 struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
244 * together off init_mm.mmlist, and are protected 245 * together off init_mm.mmlist, and are protected
@@ -281,6 +282,9 @@ struct mm_struct {
281 unsigned int token_priority; 282 unsigned int token_priority;
282 unsigned int last_interval; 283 unsigned int last_interval;
283 284
285 /* How many tasks sharing this mm are OOM_DISABLE */
286 atomic_t oom_disable_count;
287
284 unsigned long flags; /* Must use atomic bitops to access the bits */ 288 unsigned long flags; /* Must use atomic bitops to access the bits */
285 289
286 struct core_state *core_state; /* coredumping support */ 290 struct core_state *core_state; /* coredumping support */
@@ -313,8 +317,6 @@ struct mm_struct {
313#ifdef CONFIG_TRANSPARENT_HUGEPAGE 317#ifdef CONFIG_TRANSPARENT_HUGEPAGE
314 pgtable_t pmd_huge_pte; /* protected by page_table_lock */ 318 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
315#endif 319#endif
316 /* How many tasks sharing this mm are OOM_DISABLE */
317 atomic_t oom_disable_count;
318}; 320};
319 321
320/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ 322/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h
index 3882a81a3b3c..5aebd170f899 100644
--- a/include/linux/netfilter/ipset/ip_set_getport.h
+++ b/include/linux/netfilter/ipset/ip_set_getport.h
@@ -18,4 +18,14 @@ static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
18extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, 18extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src,
19 __be16 *port); 19 __be16 *port);
20 20
21static inline bool ip_set_proto_with_ports(u8 proto)
22{
23 switch (proto) {
24 case IPPROTO_TCP:
25 case IPPROTO_UDP:
26 return true;
27 }
28 return false;
29}
30
21#endif /*_IP_SET_GETPORT_H*/ 31#endif /*_IP_SET_GETPORT_H*/
diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h
index ea9b8d380527..90c2c9575bac 100644
--- a/include/linux/netfilter/nfnetlink_log.h
+++ b/include/linux/netfilter/nfnetlink_log.h
@@ -28,8 +28,8 @@ struct nfulnl_msg_packet_hw {
28}; 28};
29 29
30struct nfulnl_msg_packet_timestamp { 30struct nfulnl_msg_packet_timestamp {
31 aligned_be64 sec; 31 __aligned_be64 sec;
32 aligned_be64 usec; 32 __aligned_be64 usec;
33}; 33};
34 34
35enum nfulnl_attr_type { 35enum nfulnl_attr_type {
diff --git a/include/linux/netfilter/nfnetlink_queue.h b/include/linux/netfilter/nfnetlink_queue.h
index 2455fe5f4e01..af94e0014ebd 100644
--- a/include/linux/netfilter/nfnetlink_queue.h
+++ b/include/linux/netfilter/nfnetlink_queue.h
@@ -25,8 +25,8 @@ struct nfqnl_msg_packet_hw {
25}; 25};
26 26
27struct nfqnl_msg_packet_timestamp { 27struct nfqnl_msg_packet_timestamp {
28 aligned_be64 sec; 28 __aligned_be64 sec;
29 aligned_be64 usec; 29 __aligned_be64 usec;
30}; 30};
31 31
32enum nfqnl_attr_type { 32enum nfqnl_attr_type {
diff --git a/include/linux/netfilter/xt_connbytes.h b/include/linux/netfilter/xt_connbytes.h
index 92fcbb0d193e..f1d6c15bd9e3 100644
--- a/include/linux/netfilter/xt_connbytes.h
+++ b/include/linux/netfilter/xt_connbytes.h
@@ -17,8 +17,8 @@ enum xt_connbytes_direction {
17 17
18struct xt_connbytes_info { 18struct xt_connbytes_info {
19 struct { 19 struct {
20 aligned_u64 from; /* count to be matched */ 20 __aligned_u64 from; /* count to be matched */
21 aligned_u64 to; /* count to be matched */ 21 __aligned_u64 to; /* count to be matched */
22 } count; 22 } count;
23 __u8 what; /* ipt_connbytes_what */ 23 __u8 what; /* ipt_connbytes_what */
24 __u8 direction; /* ipt_connbytes_direction */ 24 __u8 direction; /* ipt_connbytes_direction */
diff --git a/include/linux/netfilter/xt_quota.h b/include/linux/netfilter/xt_quota.h
index ca6e03e47a17..9314723f39ca 100644
--- a/include/linux/netfilter/xt_quota.h
+++ b/include/linux/netfilter/xt_quota.h
@@ -13,7 +13,7 @@ struct xt_quota_priv;
13struct xt_quota_info { 13struct xt_quota_info {
14 __u32 flags; 14 __u32 flags;
15 __u32 pad; 15 __u32 pad;
16 aligned_u64 quota; 16 __aligned_u64 quota;
17 17
18 /* Used internally by the kernel */ 18 /* Used internally by the kernel */
19 struct xt_quota_priv *master; 19 struct xt_quota_priv *master;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 0db8037e2725..811183de1ef5 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -196,7 +196,7 @@ static inline int __TestClearPage##uname(struct page *page) { return 0; }
196 196
197struct page; /* forward declaration */ 197struct page; /* forward declaration */
198 198
199TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked) 199TESTPAGEFLAG(Locked, locked)
200PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error) 200PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
201PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) 201PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
202PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) 202PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 9c66e994540f..29ebba54c238 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -455,8 +455,9 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
455 pgoff_t index, gfp_t gfp_mask); 455 pgoff_t index, gfp_t gfp_mask);
456int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 456int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
457 pgoff_t index, gfp_t gfp_mask); 457 pgoff_t index, gfp_t gfp_mask);
458extern void remove_from_page_cache(struct page *page); 458extern void delete_from_page_cache(struct page *page);
459extern void __remove_from_page_cache(struct page *page); 459extern void __delete_from_page_cache(struct page *page);
460int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
460 461
461/* 462/*
462 * Like add_to_page_cache_locked, but used to add newly allocated pages: 463 * Like add_to_page_cache_locked, but used to add newly allocated pages:
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index e031e1a486d9..5e3e25a3c9c3 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -4,6 +4,8 @@
4#ifndef __LINUX_PWM_BACKLIGHT_H 4#ifndef __LINUX_PWM_BACKLIGHT_H
5#define __LINUX_PWM_BACKLIGHT_H 5#define __LINUX_PWM_BACKLIGHT_H
6 6
7#include <linux/backlight.h>
8
7struct platform_pwm_backlight_data { 9struct platform_pwm_backlight_data {
8 int pwm_id; 10 int pwm_id;
9 unsigned int max_brightness; 11 unsigned int max_brightness;
@@ -13,6 +15,7 @@ struct platform_pwm_backlight_data {
13 int (*init)(struct device *dev); 15 int (*init)(struct device *dev);
14 int (*notify)(struct device *dev, int brightness); 16 int (*notify)(struct device *dev, int brightness);
15 void (*exit)(struct device *dev); 17 void (*exit)(struct device *dev);
18 int (*check_fb)(struct device *dev, struct fb_info *info);
16}; 19};
17 20
18#endif 21#endif
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index e9fd04ca1e51..830e65dc01ee 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -27,18 +27,15 @@
27struct anon_vma { 27struct anon_vma {
28 struct anon_vma *root; /* Root of this anon_vma tree */ 28 struct anon_vma *root; /* Root of this anon_vma tree */
29 spinlock_t lock; /* Serialize access to vma list */ 29 spinlock_t lock; /* Serialize access to vma list */
30#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
31
32 /* 30 /*
33 * The external_refcount is taken by either KSM or page migration 31 * The refcount is taken on an anon_vma when there is no
34 * to take a reference to an anon_vma when there is no
35 * guarantee that the vma of page tables will exist for 32 * guarantee that the vma of page tables will exist for
36 * the duration of the operation. A caller that takes 33 * the duration of the operation. A caller that takes
37 * the reference is responsible for clearing up the 34 * the reference is responsible for clearing up the
38 * anon_vma if they are the last user on release 35 * anon_vma if they are the last user on release
39 */ 36 */
40 atomic_t external_refcount; 37 atomic_t refcount;
41#endif 38
42 /* 39 /*
43 * NOTE: the LSB of the head.next is set by 40 * NOTE: the LSB of the head.next is set by
44 * mm_take_all_locks() _after_ taking the above lock. So the 41 * mm_take_all_locks() _after_ taking the above lock. So the
@@ -71,42 +68,19 @@ struct anon_vma_chain {
71}; 68};
72 69
73#ifdef CONFIG_MMU 70#ifdef CONFIG_MMU
74#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
75static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
76{
77 atomic_set(&anon_vma->external_refcount, 0);
78}
79
80static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
81{
82 return atomic_read(&anon_vma->external_refcount);
83}
84
85static inline void get_anon_vma(struct anon_vma *anon_vma) 71static inline void get_anon_vma(struct anon_vma *anon_vma)
86{ 72{
87 atomic_inc(&anon_vma->external_refcount); 73 atomic_inc(&anon_vma->refcount);
88} 74}
89 75
90void drop_anon_vma(struct anon_vma *); 76void __put_anon_vma(struct anon_vma *anon_vma);
91#else
92static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
93{
94}
95 77
96static inline int anonvma_external_refcount(struct anon_vma *anon_vma) 78static inline void put_anon_vma(struct anon_vma *anon_vma)
97{
98 return 0;
99}
100
101static inline void get_anon_vma(struct anon_vma *anon_vma)
102{ 79{
80 if (atomic_dec_and_test(&anon_vma->refcount))
81 __put_anon_vma(anon_vma);
103} 82}
104 83
105static inline void drop_anon_vma(struct anon_vma *anon_vma)
106{
107}
108#endif /* CONFIG_KSM */
109
110static inline struct anon_vma *page_anon_vma(struct page *page) 84static inline struct anon_vma *page_anon_vma(struct page *page)
111{ 85{
112 if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 86 if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
@@ -148,7 +122,6 @@ void unlink_anon_vmas(struct vm_area_struct *);
148int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); 122int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
149int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); 123int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
150void __anon_vma_link(struct vm_area_struct *); 124void __anon_vma_link(struct vm_area_struct *);
151void anon_vma_free(struct anon_vma *);
152 125
153static inline void anon_vma_merge(struct vm_area_struct *vma, 126static inline void anon_vma_merge(struct vm_area_struct *vma,
154 struct vm_area_struct *next) 127 struct vm_area_struct *next)
@@ -157,6 +130,8 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
157 unlink_anon_vmas(next); 130 unlink_anon_vmas(next);
158} 131}
159 132
133struct anon_vma *page_get_anon_vma(struct page *page);
134
160/* 135/*
161 * rmap interfaces called when adding or removing pte of page 136 * rmap interfaces called when adding or removing pte of page
162 */ 137 */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c15936fe998b..4b601be3dace 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1471,6 +1471,7 @@ struct task_struct {
1471#ifdef CONFIG_NUMA 1471#ifdef CONFIG_NUMA
1472 struct mempolicy *mempolicy; /* Protected by alloc_lock */ 1472 struct mempolicy *mempolicy; /* Protected by alloc_lock */
1473 short il_next; 1473 short il_next;
1474 short pref_node_fork;
1474#endif 1475#endif
1475 atomic_t fs_excl; /* holding fs exclusive resources */ 1476 atomic_t fs_excl; /* holding fs exclusive resources */
1476 struct rcu_head rcu; 1477 struct rcu_head rcu;
diff --git a/include/linux/sigma.h b/include/linux/sigma.h
new file mode 100644
index 000000000000..e2accb3164d8
--- /dev/null
+++ b/include/linux/sigma.h
@@ -0,0 +1,60 @@
1/*
2 * Load firmware files from Analog Devices SigmaStudio
3 *
4 * Copyright 2009-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __SIGMA_FIRMWARE_H__
10#define __SIGMA_FIRMWARE_H__
11
12#include <linux/firmware.h>
13#include <linux/types.h>
14
15struct i2c_client;
16
17#define SIGMA_MAGIC "ADISIGM"
18
19struct sigma_firmware {
20 const struct firmware *fw;
21 size_t pos;
22};
23
24struct sigma_firmware_header {
25 unsigned char magic[7];
26 u8 version;
27 u32 crc;
28};
29
30enum {
31 SIGMA_ACTION_WRITEXBYTES = 0,
32 SIGMA_ACTION_WRITESINGLE,
33 SIGMA_ACTION_WRITESAFELOAD,
34 SIGMA_ACTION_DELAY,
35 SIGMA_ACTION_PLLWAIT,
36 SIGMA_ACTION_NOOP,
37 SIGMA_ACTION_END,
38};
39
40struct sigma_action {
41 u8 instr;
42 u8 len_hi;
43 u16 len;
44 u16 addr;
45 unsigned char payload[];
46};
47
48static inline u32 sigma_action_len(struct sigma_action *sa)
49{
50 return (sa->len_hi << 16) | sa->len;
51}
52
53static inline size_t sigma_action_size(struct sigma_action *sa, u32 payload_len)
54{
55 return sizeof(*sa) + payload_len + (payload_len % 2);
56}
57
58extern int process_sigma_firmware(struct i2c_client *client, const char *name);
59
60#endif
diff --git a/include/linux/slab.h b/include/linux/slab.h
index fa9086647eb7..ad4dd1c8d30a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -105,7 +105,6 @@ void kmem_cache_destroy(struct kmem_cache *);
105int kmem_cache_shrink(struct kmem_cache *); 105int kmem_cache_shrink(struct kmem_cache *);
106void kmem_cache_free(struct kmem_cache *, void *); 106void kmem_cache_free(struct kmem_cache *, void *);
107unsigned int kmem_cache_size(struct kmem_cache *); 107unsigned int kmem_cache_size(struct kmem_cache *);
108const char *kmem_cache_name(struct kmem_cache *);
109 108
110/* 109/*
111 * Please use this macro to create slab caches. Simply specify the 110 * Please use this macro to create slab caches. Simply specify the
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 8b6e8ae5d5ca..45ca123e8002 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -32,10 +32,14 @@ enum stat_item {
32 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 32 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
33 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 33 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
34 ORDER_FALLBACK, /* Number of times fallback was necessary */ 34 ORDER_FALLBACK, /* Number of times fallback was necessary */
35 CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
35 NR_SLUB_STAT_ITEMS }; 36 NR_SLUB_STAT_ITEMS };
36 37
37struct kmem_cache_cpu { 38struct kmem_cache_cpu {
38 void **freelist; /* Pointer to first free per cpu object */ 39 void **freelist; /* Pointer to next available object */
40#ifdef CONFIG_CMPXCHG_LOCAL
41 unsigned long tid; /* Globally unique transaction id */
42#endif
39 struct page *page; /* The slab from which we are allocating */ 43 struct page *page; /* The slab from which we are allocating */
40 int node; /* The node of the page (or -1 for debug) */ 44 int node; /* The node of the page (or -1 for debug) */
41#ifdef CONFIG_SLUB_STATS 45#ifdef CONFIG_SLUB_STATS
@@ -70,6 +74,7 @@ struct kmem_cache {
70 struct kmem_cache_cpu __percpu *cpu_slab; 74 struct kmem_cache_cpu __percpu *cpu_slab;
71 /* Used for retriving partial slabs etc */ 75 /* Used for retriving partial slabs etc */
72 unsigned long flags; 76 unsigned long flags;
77 unsigned long min_partial;
73 int size; /* The size of an object including meta data */ 78 int size; /* The size of an object including meta data */
74 int objsize; /* The size of an object without meta data */ 79 int objsize; /* The size of an object without meta data */
75 int offset; /* Free pointer offset. */ 80 int offset; /* Free pointer offset. */
@@ -83,7 +88,7 @@ struct kmem_cache {
83 void (*ctor)(void *); 88 void (*ctor)(void *);
84 int inuse; /* Offset to metadata */ 89 int inuse; /* Offset to metadata */
85 int align; /* Alignment */ 90 int align; /* Alignment */
86 unsigned long min_partial; 91 int reserved; /* Reserved bytes at the end of slabs */
87 const char *name; /* Name (only for display!) */ 92 const char *name; /* Name (only for display!) */
88 struct list_head list; /* List of slab caches */ 93 struct list_head list; /* List of slab caches */
89#ifdef CONFIG_SYSFS 94#ifdef CONFIG_SYSFS
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 6dc95cac6b3d..74243c86ba39 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -10,6 +10,7 @@
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/init.h>
13 14
14extern void cpu_idle(void); 15extern void cpu_idle(void);
15 16
@@ -114,6 +115,8 @@ int on_each_cpu(smp_call_func_t func, void *info, int wait);
114void smp_prepare_boot_cpu(void); 115void smp_prepare_boot_cpu(void);
115 116
116extern unsigned int setup_max_cpus; 117extern unsigned int setup_max_cpus;
118extern void __init setup_nr_cpu_ids(void);
119extern void __init smp_init(void);
117 120
118#else /* !SMP */ 121#else /* !SMP */
119 122
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4d559325d919..ed6ebe690f4a 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -155,6 +155,15 @@ enum {
155#define SWAP_CLUSTER_MAX 32 155#define SWAP_CLUSTER_MAX 32
156#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX 156#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
157 157
158/*
159 * Ratio between the present memory in the zone and the "gap" that
160 * we're allowing kswapd to shrink in addition to the per-zone high
161 * wmark, even for zones that already have the high wmark satisfied,
162 * in order to provide better per-zone lru behavior. We are ok to
163 * spend not more than 1% of the memory for this zone balancing "gap".
164 */
165#define KSWAPD_ZONE_BALANCE_GAP_RATIO 100
166
158#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ 167#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
159#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ 168#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
160#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ 169#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
@@ -215,6 +224,7 @@ extern void mark_page_accessed(struct page *);
215extern void lru_add_drain(void); 224extern void lru_add_drain(void);
216extern int lru_add_drain_all(void); 225extern int lru_add_drain_all(void);
217extern void rotate_reclaimable_page(struct page *page); 226extern void rotate_reclaimable_page(struct page *page);
227extern void deactivate_page(struct page *page);
218extern void swap_setup(void); 228extern void swap_setup(void);
219 229
220extern void add_page_to_unevictable_list(struct page *page); 230extern void add_page_to_unevictable_list(struct page *page);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 4e53d4641b38..9f469c700550 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -82,7 +82,7 @@ struct tty_buffer {
82 82
83 83
84struct tty_bufhead { 84struct tty_bufhead {
85 struct delayed_work work; 85 struct work_struct work;
86 spinlock_t lock; 86 spinlock_t lock;
87 struct tty_buffer *head; /* Queue head */ 87 struct tty_buffer *head; /* Queue head */
88 struct tty_buffer *tail; /* Active buffer */ 88 struct tty_buffer *tail; /* Active buffer */
diff --git a/include/linux/types.h b/include/linux/types.h
index c2a9eb44f2fa..176da8c1fbb1 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -150,6 +150,12 @@ typedef unsigned long blkcnt_t;
150#define pgoff_t unsigned long 150#define pgoff_t unsigned long
151#endif 151#endif
152 152
153#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
154typedef u64 dma_addr_t;
155#else
156typedef u32 dma_addr_t;
157#endif /* dma_addr_t */
158
153#endif /* __KERNEL__ */ 159#endif /* __KERNEL__ */
154 160
155/* 161/*
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 833e676d6d92..461c0119664f 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -220,12 +220,12 @@ static inline unsigned long node_page_state(int node,
220 zone_page_state(&zones[ZONE_MOVABLE], item); 220 zone_page_state(&zones[ZONE_MOVABLE], item);
221} 221}
222 222
223extern void zone_statistics(struct zone *, struct zone *); 223extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
224 224
225#else 225#else
226 226
227#define node_page_state(node, item) global_page_state(item) 227#define node_page_state(node, item) global_page_state(item)
228#define zone_statistics(_zl,_z) do { } while (0) 228#define zone_statistics(_zl, _z, gfp) do { } while (0)
229 229
230#endif /* CONFIG_NUMA */ 230#endif /* CONFIG_NUMA */
231 231
diff --git a/include/linux/zlib.h b/include/linux/zlib.h
index 40c49cb3eb51..9c5a6b4de0a3 100644
--- a/include/linux/zlib.h
+++ b/include/linux/zlib.h
@@ -179,11 +179,16 @@ typedef z_stream *z_streamp;
179 179
180 /* basic functions */ 180 /* basic functions */
181 181
182extern int zlib_deflate_workspacesize (void); 182extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
183/* 183/*
184 Returns the number of bytes that needs to be allocated for a per- 184 Returns the number of bytes that needs to be allocated for a per-
185 stream workspace. A pointer to this number of bytes should be 185 stream workspace with the specified parameters. A pointer to this
186 returned in stream->workspace before calling zlib_deflateInit(). 186 number of bytes should be returned in stream->workspace before
187 you call zlib_deflateInit() or zlib_deflateInit2(). If you call
188 zlib_deflateInit(), specify windowBits = MAX_WBITS and memLevel =
189 MAX_MEM_LEVEL here. If you call zlib_deflateInit2(), the windowBits
190 and memLevel parameters passed to zlib_deflateInit2() must not
191 exceed those passed here.
187*/ 192*/
188 193
189/* 194/*
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 272f59336b73..30b49ed72f0d 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -801,8 +801,6 @@ struct netns_ipvs {
801 struct list_head rs_table[IP_VS_RTAB_SIZE]; 801 struct list_head rs_table[IP_VS_RTAB_SIZE];
802 /* ip_vs_app */ 802 /* ip_vs_app */
803 struct list_head app_list; 803 struct list_head app_list;
804 struct mutex app_mutex;
805 struct lock_class_key app_key; /* mutex debuging */
806 804
807 /* ip_vs_proto */ 805 /* ip_vs_proto */
808 #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ 806 #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 762e2abce889..27461d6dd46f 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -150,7 +150,7 @@ struct linux_xfrm_mib {
150#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ 150#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
151 do { \ 151 do { \
152 __typeof__(*mib[0]) *ptr = \ 152 __typeof__(*mib[0]) *ptr = \
153 __this_cpu_ptr((mib)[!in_softirq()]); \ 153 __this_cpu_ptr((mib)[0]); \
154 ptr->mibs[basefield##PKTS]++; \ 154 ptr->mibs[basefield##PKTS]++; \
155 ptr->mibs[basefield##OCTETS] += addend;\ 155 ptr->mibs[basefield##OCTETS] += addend;\
156 } while (0) 156 } while (0)
@@ -202,7 +202,7 @@ struct linux_xfrm_mib {
202#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \ 202#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \
203 do { \ 203 do { \
204 __typeof__(*mib[0]) *ptr; \ 204 __typeof__(*mib[0]) *ptr; \
205 ptr = __this_cpu_ptr((mib)[!in_softirq()]); \ 205 ptr = __this_cpu_ptr((mib)[0]); \
206 u64_stats_update_begin(&ptr->syncp); \ 206 u64_stats_update_begin(&ptr->syncp); \
207 ptr->mibs[basefield##PKTS]++; \ 207 ptr->mibs[basefield##PKTS]++; \
208 ptr->mibs[basefield##OCTETS] += addend; \ 208 ptr->mibs[basefield##OCTETS] += addend; \
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 42a8c32a10e2..cffa5dc66449 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1430,6 +1430,7 @@ extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1430extern u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); 1430extern u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
1431extern int xfrm_init_replay(struct xfrm_state *x); 1431extern int xfrm_init_replay(struct xfrm_state *x);
1432extern int xfrm_state_mtu(struct xfrm_state *x, int mtu); 1432extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
1433extern int __xfrm_init_state(struct xfrm_state *x, bool init_replay);
1433extern int xfrm_init_state(struct xfrm_state *x); 1434extern int xfrm_init_state(struct xfrm_state *x);
1434extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb); 1435extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
1435extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, 1436extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi,
diff --git a/init/calibrate.c b/init/calibrate.c
index 24fe022c55f9..76ac9194cbc4 100644
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -110,8 +110,8 @@ static unsigned long __cpuinit calibrate_delay_direct(void) {return 0;}
110 110
111/* 111/*
112 * This is the number of bits of precision for the loops_per_jiffy. Each 112 * This is the number of bits of precision for the loops_per_jiffy. Each
113 * bit takes on average 1.5/HZ seconds. This (like the original) is a little 113 * time we refine our estimate after the first takes 1.5/HZ seconds, so try
114 * better than 1% 114 * to start with a good estimate.
115 * For the boot cpu we can skip the delay calibration and assign it a value 115 * For the boot cpu we can skip the delay calibration and assign it a value
116 * calculated based on the timer frequency. 116 * calculated based on the timer frequency.
117 * For the rest of the CPUs we cannot assume that the timer frequency is same as 117 * For the rest of the CPUs we cannot assume that the timer frequency is same as
@@ -119,10 +119,72 @@ static unsigned long __cpuinit calibrate_delay_direct(void) {return 0;}
119 */ 119 */
120#define LPS_PREC 8 120#define LPS_PREC 8
121 121
122static unsigned long __cpuinit calibrate_delay_converge(void)
123{
124 /* First stage - slowly accelerate to find initial bounds */
125 unsigned long lpj, lpj_base, ticks, loopadd, loopadd_base, chop_limit;
126 int trials = 0, band = 0, trial_in_band = 0;
127
128 lpj = (1<<12);
129
130 /* wait for "start of" clock tick */
131 ticks = jiffies;
132 while (ticks == jiffies)
133 ; /* nothing */
134 /* Go .. */
135 ticks = jiffies;
136 do {
137 if (++trial_in_band == (1<<band)) {
138 ++band;
139 trial_in_band = 0;
140 }
141 __delay(lpj * band);
142 trials += band;
143 } while (ticks == jiffies);
144 /*
145 * We overshot, so retreat to a clear underestimate. Then estimate
146 * the largest likely undershoot. This defines our chop bounds.
147 */
148 trials -= band;
149 loopadd_base = lpj * band;
150 lpj_base = lpj * trials;
151
152recalibrate:
153 lpj = lpj_base;
154 loopadd = loopadd_base;
155
156 /*
157 * Do a binary approximation to get lpj set to
158 * equal one clock (up to LPS_PREC bits)
159 */
160 chop_limit = lpj >> LPS_PREC;
161 while (loopadd > chop_limit) {
162 lpj += loopadd;
163 ticks = jiffies;
164 while (ticks == jiffies)
165 ; /* nothing */
166 ticks = jiffies;
167 __delay(lpj);
168 if (jiffies != ticks) /* longer than 1 tick */
169 lpj -= loopadd;
170 loopadd >>= 1;
171 }
172 /*
173 * If we incremented every single time possible, presume we've
174 * massively underestimated initially, and retry with a higher
175 * start, and larger range. (Only seen on x86_64, due to SMIs)
176 */
177 if (lpj + loopadd * 2 == lpj_base + loopadd_base * 2) {
178 lpj_base = lpj;
179 loopadd_base <<= 2;
180 goto recalibrate;
181 }
182
183 return lpj;
184}
185
122void __cpuinit calibrate_delay(void) 186void __cpuinit calibrate_delay(void)
123{ 187{
124 unsigned long ticks, loopbit;
125 int lps_precision = LPS_PREC;
126 static bool printed; 188 static bool printed;
127 189
128 if (preset_lpj) { 190 if (preset_lpj) {
@@ -139,39 +201,9 @@ void __cpuinit calibrate_delay(void)
139 pr_info("Calibrating delay using timer " 201 pr_info("Calibrating delay using timer "
140 "specific routine.. "); 202 "specific routine.. ");
141 } else { 203 } else {
142 loops_per_jiffy = (1<<12);
143
144 if (!printed) 204 if (!printed)
145 pr_info("Calibrating delay loop... "); 205 pr_info("Calibrating delay loop... ");
146 while ((loops_per_jiffy <<= 1) != 0) { 206 loops_per_jiffy = calibrate_delay_converge();
147 /* wait for "start of" clock tick */
148 ticks = jiffies;
149 while (ticks == jiffies)
150 /* nothing */;
151 /* Go .. */
152 ticks = jiffies;
153 __delay(loops_per_jiffy);
154 ticks = jiffies - ticks;
155 if (ticks)
156 break;
157 }
158
159 /*
160 * Do a binary approximation to get loops_per_jiffy set to
161 * equal one clock (up to lps_precision bits)
162 */
163 loops_per_jiffy >>= 1;
164 loopbit = loops_per_jiffy;
165 while (lps_precision-- && (loopbit >>= 1)) {
166 loops_per_jiffy |= loopbit;
167 ticks = jiffies;
168 while (ticks == jiffies)
169 /* nothing */;
170 ticks = jiffies;
171 __delay(loops_per_jiffy);
172 if (jiffies != ticks) /* longer than 1 tick */
173 loops_per_jiffy &= ~loopbit;
174 }
175 } 207 }
176 if (!printed) 208 if (!printed)
177 pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n", 209 pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 2b54bef33b55..3e0112157795 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -293,7 +293,8 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
293 293
294 sys_chdir((const char __user __force *)"/root"); 294 sys_chdir((const char __user __force *)"/root");
295 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev; 295 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
296 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n", 296 printk(KERN_INFO
297 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
297 current->fs->pwd.mnt->mnt_sb->s_type->name, 298 current->fs->pwd.mnt->mnt_sb->s_type->name,
298 current->fs->pwd.mnt->mnt_sb->s_flags & MS_RDONLY ? 299 current->fs->pwd.mnt->mnt_sb->s_flags & MS_RDONLY ?
299 " readonly" : "", MAJOR(ROOT_DEV), MINOR(ROOT_DEV)); 300 " readonly" : "", MAJOR(ROOT_DEV), MINOR(ROOT_DEV));
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 6e1ee6987c78..fe9acb0ae480 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -64,7 +64,7 @@ identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor)
64 64
65 buf = kmalloc(size, GFP_KERNEL); 65 buf = kmalloc(size, GFP_KERNEL);
66 if (!buf) 66 if (!buf)
67 return -1; 67 return -ENOMEM;
68 68
69 minixsb = (struct minix_super_block *) buf; 69 minixsb = (struct minix_super_block *) buf;
70 ext2sb = (struct ext2_super_block *) buf; 70 ext2sb = (struct ext2_super_block *) buf;
diff --git a/init/main.c b/init/main.c
index 33c37c379e96..3627bb37225c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -129,63 +129,6 @@ static char *static_command_line;
129static char *execute_command; 129static char *execute_command;
130static char *ramdisk_execute_command; 130static char *ramdisk_execute_command;
131 131
132#ifdef CONFIG_SMP
133/* Setup configured maximum number of CPUs to activate */
134unsigned int setup_max_cpus = NR_CPUS;
135EXPORT_SYMBOL(setup_max_cpus);
136
137
138/*
139 * Setup routine for controlling SMP activation
140 *
141 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
142 * activation entirely (the MPS table probe still happens, though).
143 *
144 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
145 * greater than 0, limits the maximum number of CPUs activated in
146 * SMP mode to <NUM>.
147 */
148
149void __weak arch_disable_smp_support(void) { }
150
151static int __init nosmp(char *str)
152{
153 setup_max_cpus = 0;
154 arch_disable_smp_support();
155
156 return 0;
157}
158
159early_param("nosmp", nosmp);
160
161/* this is hard limit */
162static int __init nrcpus(char *str)
163{
164 int nr_cpus;
165
166 get_option(&str, &nr_cpus);
167 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
168 nr_cpu_ids = nr_cpus;
169
170 return 0;
171}
172
173early_param("nr_cpus", nrcpus);
174
175static int __init maxcpus(char *str)
176{
177 get_option(&str, &setup_max_cpus);
178 if (setup_max_cpus == 0)
179 arch_disable_smp_support();
180
181 return 0;
182}
183
184early_param("maxcpus", maxcpus);
185#else
186static const unsigned int setup_max_cpus = NR_CPUS;
187#endif
188
189/* 132/*
190 * If set, this is an indication to the drivers that reset the underlying 133 * If set, this is an indication to the drivers that reset the underlying
191 * device before going ahead with the initialization otherwise driver might 134 * device before going ahead with the initialization otherwise driver might
@@ -362,7 +305,7 @@ static int __init rdinit_setup(char *str)
362__setup("rdinit=", rdinit_setup); 305__setup("rdinit=", rdinit_setup);
363 306
364#ifndef CONFIG_SMP 307#ifndef CONFIG_SMP
365 308static const unsigned int setup_max_cpus = NR_CPUS;
366#ifdef CONFIG_X86_LOCAL_APIC 309#ifdef CONFIG_X86_LOCAL_APIC
367static void __init smp_init(void) 310static void __init smp_init(void)
368{ 311{
@@ -374,37 +317,6 @@ static void __init smp_init(void)
374 317
375static inline void setup_nr_cpu_ids(void) { } 318static inline void setup_nr_cpu_ids(void) { }
376static inline void smp_prepare_cpus(unsigned int maxcpus) { } 319static inline void smp_prepare_cpus(unsigned int maxcpus) { }
377
378#else
379
380/* Setup number of possible processor ids */
381int nr_cpu_ids __read_mostly = NR_CPUS;
382EXPORT_SYMBOL(nr_cpu_ids);
383
384/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
385static void __init setup_nr_cpu_ids(void)
386{
387 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
388}
389
390/* Called by boot processor to activate the rest. */
391static void __init smp_init(void)
392{
393 unsigned int cpu;
394
395 /* FIXME: This should be done in userspace --RR */
396 for_each_present_cpu(cpu) {
397 if (num_online_cpus() >= setup_max_cpus)
398 break;
399 if (!cpu_online(cpu))
400 cpu_up(cpu);
401 }
402
403 /* Any cleanup work */
404 printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
405 smp_cpus_done(setup_max_cpus);
406}
407
408#endif 320#endif
409 321
410/* 322/*
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 95362d15128c..e31b220a743d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1813,10 +1813,8 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1813 1813
1814 /* Update the css_set linked lists if we're using them */ 1814 /* Update the css_set linked lists if we're using them */
1815 write_lock(&css_set_lock); 1815 write_lock(&css_set_lock);
1816 if (!list_empty(&tsk->cg_list)) { 1816 if (!list_empty(&tsk->cg_list))
1817 list_del(&tsk->cg_list); 1817 list_move(&tsk->cg_list, &newcg->tasks);
1818 list_add(&tsk->cg_list, &newcg->tasks);
1819 }
1820 write_unlock(&css_set_lock); 1818 write_unlock(&css_set_lock);
1821 1819
1822 for_each_subsys(root, ss) { 1820 for_each_subsys(root, ss) {
@@ -3655,12 +3653,12 @@ again:
3655 spin_lock(&release_list_lock); 3653 spin_lock(&release_list_lock);
3656 set_bit(CGRP_REMOVED, &cgrp->flags); 3654 set_bit(CGRP_REMOVED, &cgrp->flags);
3657 if (!list_empty(&cgrp->release_list)) 3655 if (!list_empty(&cgrp->release_list))
3658 list_del(&cgrp->release_list); 3656 list_del_init(&cgrp->release_list);
3659 spin_unlock(&release_list_lock); 3657 spin_unlock(&release_list_lock);
3660 3658
3661 cgroup_lock_hierarchy(cgrp->root); 3659 cgroup_lock_hierarchy(cgrp->root);
3662 /* delete this cgroup from parent->children */ 3660 /* delete this cgroup from parent->children */
3663 list_del(&cgrp->sibling); 3661 list_del_init(&cgrp->sibling);
3664 cgroup_unlock_hierarchy(cgrp->root); 3662 cgroup_unlock_hierarchy(cgrp->root);
3665 3663
3666 d = dget(cgrp->dentry); 3664 d = dget(cgrp->dentry);
@@ -3879,7 +3877,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
3879 subsys[ss->subsys_id] = NULL; 3877 subsys[ss->subsys_id] = NULL;
3880 3878
3881 /* remove subsystem from rootnode's list of subsystems */ 3879 /* remove subsystem from rootnode's list of subsystems */
3882 list_del(&ss->sibling); 3880 list_del_init(&ss->sibling);
3883 3881
3884 /* 3882 /*
3885 * disentangle the css from all css_sets attached to the dummytop. as 3883 * disentangle the css from all css_sets attached to the dummytop. as
@@ -4241,7 +4239,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
4241 if (!list_empty(&tsk->cg_list)) { 4239 if (!list_empty(&tsk->cg_list)) {
4242 write_lock(&css_set_lock); 4240 write_lock(&css_set_lock);
4243 if (!list_empty(&tsk->cg_list)) 4241 if (!list_empty(&tsk->cg_list))
4244 list_del(&tsk->cg_list); 4242 list_del_init(&tsk->cg_list);
4245 write_unlock(&css_set_lock); 4243 write_unlock(&css_set_lock);
4246 } 4244 }
4247 4245
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 156cc5556140..c95fc4df0faa 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -160,7 +160,6 @@ static void cpu_notify_nofail(unsigned long val, void *v)
160{ 160{
161 BUG_ON(cpu_notify(val, v)); 161 BUG_ON(cpu_notify(val, v));
162} 162}
163
164EXPORT_SYMBOL(register_cpu_notifier); 163EXPORT_SYMBOL(register_cpu_notifier);
165 164
166void __ref unregister_cpu_notifier(struct notifier_block *nb) 165void __ref unregister_cpu_notifier(struct notifier_block *nb)
@@ -205,7 +204,6 @@ static int __ref take_cpu_down(void *_param)
205 return err; 204 return err;
206 205
207 cpu_notify(CPU_DYING | param->mod, param->hcpu); 206 cpu_notify(CPU_DYING | param->mod, param->hcpu);
208
209 return 0; 207 return 0;
210} 208}
211 209
@@ -227,6 +225,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
227 return -EINVAL; 225 return -EINVAL;
228 226
229 cpu_hotplug_begin(); 227 cpu_hotplug_begin();
228
230 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); 229 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
231 if (err) { 230 if (err) {
232 nr_calls--; 231 nr_calls--;
@@ -304,7 +303,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
304 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); 303 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
305 if (ret) { 304 if (ret) {
306 nr_calls--; 305 nr_calls--;
307 printk("%s: attempt to bring up CPU %u failed\n", 306 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
308 __func__, cpu); 307 __func__, cpu);
309 goto out_notify; 308 goto out_notify;
310 } 309 }
@@ -450,14 +449,14 @@ void __ref enable_nonboot_cpus(void)
450 if (cpumask_empty(frozen_cpus)) 449 if (cpumask_empty(frozen_cpus))
451 goto out; 450 goto out;
452 451
453 printk("Enabling non-boot CPUs ...\n"); 452 printk(KERN_INFO "Enabling non-boot CPUs ...\n");
454 453
455 arch_enable_nonboot_cpus_begin(); 454 arch_enable_nonboot_cpus_begin();
456 455
457 for_each_cpu(cpu, frozen_cpus) { 456 for_each_cpu(cpu, frozen_cpus) {
458 error = _cpu_up(cpu, 1); 457 error = _cpu_up(cpu, 1);
459 if (!error) { 458 if (!error) {
460 printk("CPU%d is up\n", cpu); 459 printk(KERN_INFO "CPU%d is up\n", cpu);
461 continue; 460 continue;
462 } 461 }
463 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 462 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
@@ -509,7 +508,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu)
509 */ 508 */
510 509
511/* cpu_bit_bitmap[0] is empty - so we can back into it */ 510/* cpu_bit_bitmap[0] is empty - so we can back into it */
512#define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x) 511#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
513#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 512#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
514#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 513#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
515#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 514#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
diff --git a/kernel/fork.c b/kernel/fork.c
index 05b92c457010..f2b494d7c557 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -40,6 +40,7 @@
40#include <linux/tracehook.h> 40#include <linux/tracehook.h>
41#include <linux/futex.h> 41#include <linux/futex.h>
42#include <linux/compat.h> 42#include <linux/compat.h>
43#include <linux/kthread.h>
43#include <linux/task_io_accounting_ops.h> 44#include <linux/task_io_accounting_ops.h>
44#include <linux/rcupdate.h> 45#include <linux/rcupdate.h>
45#include <linux/ptrace.h> 46#include <linux/ptrace.h>
@@ -109,20 +110,25 @@ int nr_processes(void)
109} 110}
110 111
111#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 112#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
112# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL) 113# define alloc_task_struct_node(node) \
113# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk)) 114 kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node)
115# define free_task_struct(tsk) \
116 kmem_cache_free(task_struct_cachep, (tsk))
114static struct kmem_cache *task_struct_cachep; 117static struct kmem_cache *task_struct_cachep;
115#endif 118#endif
116 119
117#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR 120#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
118static inline struct thread_info *alloc_thread_info(struct task_struct *tsk) 121static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
122 int node)
119{ 123{
120#ifdef CONFIG_DEBUG_STACK_USAGE 124#ifdef CONFIG_DEBUG_STACK_USAGE
121 gfp_t mask = GFP_KERNEL | __GFP_ZERO; 125 gfp_t mask = GFP_KERNEL | __GFP_ZERO;
122#else 126#else
123 gfp_t mask = GFP_KERNEL; 127 gfp_t mask = GFP_KERNEL;
124#endif 128#endif
125 return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER); 129 struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
130
131 return page ? page_address(page) : NULL;
126} 132}
127 133
128static inline void free_thread_info(struct thread_info *ti) 134static inline void free_thread_info(struct thread_info *ti)
@@ -249,16 +255,16 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
249 struct task_struct *tsk; 255 struct task_struct *tsk;
250 struct thread_info *ti; 256 struct thread_info *ti;
251 unsigned long *stackend; 257 unsigned long *stackend;
252 258 int node = tsk_fork_get_node(orig);
253 int err; 259 int err;
254 260
255 prepare_to_copy(orig); 261 prepare_to_copy(orig);
256 262
257 tsk = alloc_task_struct(); 263 tsk = alloc_task_struct_node(node);
258 if (!tsk) 264 if (!tsk)
259 return NULL; 265 return NULL;
260 266
261 ti = alloc_thread_info(tsk); 267 ti = alloc_thread_info_node(tsk, node);
262 if (!ti) { 268 if (!ti) {
263 free_task_struct(tsk); 269 free_task_struct(tsk);
264 return NULL; 270 return NULL;
@@ -1513,38 +1519,24 @@ void __init proc_caches_init(void)
1513} 1519}
1514 1520
1515/* 1521/*
1516 * Check constraints on flags passed to the unshare system call and 1522 * Check constraints on flags passed to the unshare system call.
1517 * force unsharing of additional process context as appropriate.
1518 */ 1523 */
1519static void check_unshare_flags(unsigned long *flags_ptr) 1524static int check_unshare_flags(unsigned long unshare_flags)
1520{ 1525{
1526 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1527 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1528 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
1529 return -EINVAL;
1521 /* 1530 /*
1522 * If unsharing a thread from a thread group, must also 1531 * Not implemented, but pretend it works if there is nothing to
1523 * unshare vm. 1532 * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
1524 */ 1533 * needs to unshare vm.
1525 if (*flags_ptr & CLONE_THREAD)
1526 *flags_ptr |= CLONE_VM;
1527
1528 /*
1529 * If unsharing vm, must also unshare signal handlers.
1530 */
1531 if (*flags_ptr & CLONE_VM)
1532 *flags_ptr |= CLONE_SIGHAND;
1533
1534 /*
1535 * If unsharing namespace, must also unshare filesystem information.
1536 */ 1534 */
1537 if (*flags_ptr & CLONE_NEWNS) 1535 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
1538 *flags_ptr |= CLONE_FS; 1536 /* FIXME: get_task_mm() increments ->mm_users */
1539} 1537 if (atomic_read(&current->mm->mm_users) > 1)
1540 1538 return -EINVAL;
1541/* 1539 }
1542 * Unsharing of tasks created with CLONE_THREAD is not supported yet
1543 */
1544static int unshare_thread(unsigned long unshare_flags)
1545{
1546 if (unshare_flags & CLONE_THREAD)
1547 return -EINVAL;
1548 1540
1549 return 0; 1541 return 0;
1550} 1542}
@@ -1571,34 +1563,6 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1571} 1563}
1572 1564
1573/* 1565/*
1574 * Unsharing of sighand is not supported yet
1575 */
1576static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
1577{
1578 struct sighand_struct *sigh = current->sighand;
1579
1580 if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)
1581 return -EINVAL;
1582 else
1583 return 0;
1584}
1585
1586/*
1587 * Unshare vm if it is being shared
1588 */
1589static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
1590{
1591 struct mm_struct *mm = current->mm;
1592
1593 if ((unshare_flags & CLONE_VM) &&
1594 (mm && atomic_read(&mm->mm_users) > 1)) {
1595 return -EINVAL;
1596 }
1597
1598 return 0;
1599}
1600
1601/*
1602 * Unshare file descriptor table if it is being shared 1566 * Unshare file descriptor table if it is being shared
1603 */ 1567 */
1604static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) 1568static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
@@ -1626,45 +1590,37 @@ static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp
1626 */ 1590 */
1627SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) 1591SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1628{ 1592{
1629 int err = 0;
1630 struct fs_struct *fs, *new_fs = NULL; 1593 struct fs_struct *fs, *new_fs = NULL;
1631 struct sighand_struct *new_sigh = NULL;
1632 struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
1633 struct files_struct *fd, *new_fd = NULL; 1594 struct files_struct *fd, *new_fd = NULL;
1634 struct nsproxy *new_nsproxy = NULL; 1595 struct nsproxy *new_nsproxy = NULL;
1635 int do_sysvsem = 0; 1596 int do_sysvsem = 0;
1597 int err;
1636 1598
1637 check_unshare_flags(&unshare_flags); 1599 err = check_unshare_flags(unshare_flags);
1638 1600 if (err)
1639 /* Return -EINVAL for all unsupported flags */
1640 err = -EINVAL;
1641 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1642 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1643 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
1644 goto bad_unshare_out; 1601 goto bad_unshare_out;
1645 1602
1646 /* 1603 /*
1604 * If unsharing namespace, must also unshare filesystem information.
1605 */
1606 if (unshare_flags & CLONE_NEWNS)
1607 unshare_flags |= CLONE_FS;
1608 /*
1647 * CLONE_NEWIPC must also detach from the undolist: after switching 1609 * CLONE_NEWIPC must also detach from the undolist: after switching
1648 * to a new ipc namespace, the semaphore arrays from the old 1610 * to a new ipc namespace, the semaphore arrays from the old
1649 * namespace are unreachable. 1611 * namespace are unreachable.
1650 */ 1612 */
1651 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) 1613 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
1652 do_sysvsem = 1; 1614 do_sysvsem = 1;
1653 if ((err = unshare_thread(unshare_flags)))
1654 goto bad_unshare_out;
1655 if ((err = unshare_fs(unshare_flags, &new_fs))) 1615 if ((err = unshare_fs(unshare_flags, &new_fs)))
1656 goto bad_unshare_cleanup_thread; 1616 goto bad_unshare_out;
1657 if ((err = unshare_sighand(unshare_flags, &new_sigh)))
1658 goto bad_unshare_cleanup_fs;
1659 if ((err = unshare_vm(unshare_flags, &new_mm)))
1660 goto bad_unshare_cleanup_sigh;
1661 if ((err = unshare_fd(unshare_flags, &new_fd))) 1617 if ((err = unshare_fd(unshare_flags, &new_fd)))
1662 goto bad_unshare_cleanup_vm; 1618 goto bad_unshare_cleanup_fs;
1663 if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, 1619 if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
1664 new_fs))) 1620 new_fs)))
1665 goto bad_unshare_cleanup_fd; 1621 goto bad_unshare_cleanup_fd;
1666 1622
1667 if (new_fs || new_mm || new_fd || do_sysvsem || new_nsproxy) { 1623 if (new_fs || new_fd || do_sysvsem || new_nsproxy) {
1668 if (do_sysvsem) { 1624 if (do_sysvsem) {
1669 /* 1625 /*
1670 * CLONE_SYSVSEM is equivalent to sys_exit(). 1626 * CLONE_SYSVSEM is equivalent to sys_exit().
@@ -1690,19 +1646,6 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1690 spin_unlock(&fs->lock); 1646 spin_unlock(&fs->lock);
1691 } 1647 }
1692 1648
1693 if (new_mm) {
1694 mm = current->mm;
1695 active_mm = current->active_mm;
1696 current->mm = new_mm;
1697 current->active_mm = new_mm;
1698 if (current->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
1699 atomic_dec(&mm->oom_disable_count);
1700 atomic_inc(&new_mm->oom_disable_count);
1701 }
1702 activate_mm(active_mm, new_mm);
1703 new_mm = mm;
1704 }
1705
1706 if (new_fd) { 1649 if (new_fd) {
1707 fd = current->files; 1650 fd = current->files;
1708 current->files = new_fd; 1651 current->files = new_fd;
@@ -1719,20 +1662,10 @@ bad_unshare_cleanup_fd:
1719 if (new_fd) 1662 if (new_fd)
1720 put_files_struct(new_fd); 1663 put_files_struct(new_fd);
1721 1664
1722bad_unshare_cleanup_vm:
1723 if (new_mm)
1724 mmput(new_mm);
1725
1726bad_unshare_cleanup_sigh:
1727 if (new_sigh)
1728 if (atomic_dec_and_test(&new_sigh->count))
1729 kmem_cache_free(sighand_cachep, new_sigh);
1730
1731bad_unshare_cleanup_fs: 1665bad_unshare_cleanup_fs:
1732 if (new_fs) 1666 if (new_fs)
1733 free_fs_struct(new_fs); 1667 free_fs_struct(new_fs);
1734 1668
1735bad_unshare_cleanup_thread:
1736bad_unshare_out: 1669bad_unshare_out:
1737 return err; 1670 return err;
1738} 1671}
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 6f6d091b5757..75dcca37d61a 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -477,13 +477,11 @@ static int s_show(struct seq_file *m, void *p)
477 */ 477 */
478 type = iter->exported ? toupper(iter->type) : 478 type = iter->exported ? toupper(iter->type) :
479 tolower(iter->type); 479 tolower(iter->type);
480 seq_printf(m, "%0*lx %c %s\t[%s]\n", 480 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
481 (int)(2 * sizeof(void *)), 481 type, iter->name, iter->module_name);
482 iter->value, type, iter->name, iter->module_name);
483 } else 482 } else
484 seq_printf(m, "%0*lx %c %s\n", 483 seq_printf(m, "%pK %c %s\n", (void *)iter->value,
485 (int)(2 * sizeof(void *)), 484 iter->type, iter->name);
486 iter->value, iter->type, iter->name);
487 return 0; 485 return 0;
488} 486}
489 487
diff --git a/kernel/kthread.c b/kernel/kthread.c
index c55afba990a3..684ab3f7dd72 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -27,6 +27,7 @@ struct kthread_create_info
27 /* Information passed to kthread() from kthreadd. */ 27 /* Information passed to kthread() from kthreadd. */
28 int (*threadfn)(void *data); 28 int (*threadfn)(void *data);
29 void *data; 29 void *data;
30 int node;
30 31
31 /* Result passed back to kthread_create() from kthreadd. */ 32 /* Result passed back to kthread_create() from kthreadd. */
32 struct task_struct *result; 33 struct task_struct *result;
@@ -98,10 +99,23 @@ static int kthread(void *_create)
98 do_exit(ret); 99 do_exit(ret);
99} 100}
100 101
102/* called from do_fork() to get node information for about to be created task */
103int tsk_fork_get_node(struct task_struct *tsk)
104{
105#ifdef CONFIG_NUMA
106 if (tsk == kthreadd_task)
107 return tsk->pref_node_fork;
108#endif
109 return numa_node_id();
110}
111
101static void create_kthread(struct kthread_create_info *create) 112static void create_kthread(struct kthread_create_info *create)
102{ 113{
103 int pid; 114 int pid;
104 115
116#ifdef CONFIG_NUMA
117 current->pref_node_fork = create->node;
118#endif
105 /* We want our own signal handler (we take no signals by default). */ 119 /* We want our own signal handler (we take no signals by default). */
106 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); 120 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
107 if (pid < 0) { 121 if (pid < 0) {
@@ -111,15 +125,18 @@ static void create_kthread(struct kthread_create_info *create)
111} 125}
112 126
113/** 127/**
114 * kthread_create - create a kthread. 128 * kthread_create_on_node - create a kthread.
115 * @threadfn: the function to run until signal_pending(current). 129 * @threadfn: the function to run until signal_pending(current).
116 * @data: data ptr for @threadfn. 130 * @data: data ptr for @threadfn.
131 * @node: memory node number.
117 * @namefmt: printf-style name for the thread. 132 * @namefmt: printf-style name for the thread.
118 * 133 *
119 * Description: This helper function creates and names a kernel 134 * Description: This helper function creates and names a kernel
120 * thread. The thread will be stopped: use wake_up_process() to start 135 * thread. The thread will be stopped: use wake_up_process() to start
121 * it. See also kthread_run(). 136 * it. See also kthread_run().
122 * 137 *
138 * If thread is going to be bound on a particular cpu, give its node
139 * in @node, to get NUMA affinity for kthread stack, or else give -1.
123 * When woken, the thread will run @threadfn() with @data as its 140 * When woken, the thread will run @threadfn() with @data as its
124 * argument. @threadfn() can either call do_exit() directly if it is a 141 * argument. @threadfn() can either call do_exit() directly if it is a
125 * standalone thread for which noone will call kthread_stop(), or 142 * standalone thread for which noone will call kthread_stop(), or
@@ -129,15 +146,17 @@ static void create_kthread(struct kthread_create_info *create)
129 * 146 *
130 * Returns a task_struct or ERR_PTR(-ENOMEM). 147 * Returns a task_struct or ERR_PTR(-ENOMEM).
131 */ 148 */
132struct task_struct *kthread_create(int (*threadfn)(void *data), 149struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
133 void *data, 150 void *data,
134 const char namefmt[], 151 int node,
135 ...) 152 const char namefmt[],
153 ...)
136{ 154{
137 struct kthread_create_info create; 155 struct kthread_create_info create;
138 156
139 create.threadfn = threadfn; 157 create.threadfn = threadfn;
140 create.data = data; 158 create.data = data;
159 create.node = node;
141 init_completion(&create.done); 160 init_completion(&create.done);
142 161
143 spin_lock(&kthread_create_lock); 162 spin_lock(&kthread_create_lock);
@@ -164,7 +183,7 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
164 } 183 }
165 return create.result; 184 return create.result;
166} 185}
167EXPORT_SYMBOL(kthread_create); 186EXPORT_SYMBOL(kthread_create_on_node);
168 187
169/** 188/**
170 * kthread_bind - bind a just-created kthread to a cpu. 189 * kthread_bind - bind a just-created kthread to a cpu.
diff --git a/kernel/module.c b/kernel/module.c
index efa290ea94bf..1f9f7bc56ca1 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1168,7 +1168,7 @@ static ssize_t module_sect_show(struct module_attribute *mattr,
1168{ 1168{
1169 struct module_sect_attr *sattr = 1169 struct module_sect_attr *sattr =
1170 container_of(mattr, struct module_sect_attr, mattr); 1170 container_of(mattr, struct module_sect_attr, mattr);
1171 return sprintf(buf, "0x%lx\n", sattr->address); 1171 return sprintf(buf, "0x%pK\n", (void *)sattr->address);
1172} 1172}
1173 1173
1174static void free_sect_attrs(struct module_sect_attrs *sect_attrs) 1174static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
@@ -3224,7 +3224,7 @@ static int m_show(struct seq_file *m, void *p)
3224 mod->state == MODULE_STATE_COMING ? "Loading": 3224 mod->state == MODULE_STATE_COMING ? "Loading":
3225 "Live"); 3225 "Live");
3226 /* Used by oprofile and other similar tools. */ 3226 /* Used by oprofile and other similar tools. */
3227 seq_printf(m, " 0x%p", mod->module_core); 3227 seq_printf(m, " 0x%pK", mod->module_core);
3228 3228
3229 /* Taints info */ 3229 /* Taints info */
3230 if (mod->taints) 3230 if (mod->taints)
diff --git a/kernel/panic.c b/kernel/panic.c
index 991bb87a1704..69231670eb95 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -433,3 +433,13 @@ EXPORT_SYMBOL(__stack_chk_fail);
433 433
434core_param(panic, panic_timeout, int, 0644); 434core_param(panic, panic_timeout, int, 0644);
435core_param(pause_on_oops, pause_on_oops, int, 0644); 435core_param(pause_on_oops, pause_on_oops, int, 0644);
436
437static int __init oops_setup(char *s)
438{
439 if (!s)
440 return -EINVAL;
441 if (!strcmp(s, "panic"))
442 panic_on_oops = 1;
443 return 0;
444}
445early_param("oops", oops_setup);
diff --git a/kernel/printk.c b/kernel/printk.c
index 33284adb2189..da8ca817eae3 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -53,7 +53,7 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
53#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) 53#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
54 54
55/* printk's without a loglevel use this.. */ 55/* printk's without a loglevel use this.. */
56#define DEFAULT_MESSAGE_LOGLEVEL 4 /* KERN_WARNING */ 56#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
57 57
58/* We show everything that is MORE important than this.. */ 58/* We show everything that is MORE important than this.. */
59#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ 59#define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
@@ -113,6 +113,11 @@ static unsigned con_start; /* Index into log_buf: next char to be sent to consol
113static unsigned log_end; /* Index into log_buf: most-recently-written-char + 1 */ 113static unsigned log_end; /* Index into log_buf: most-recently-written-char + 1 */
114 114
115/* 115/*
116 * If exclusive_console is non-NULL then only this console is to be printed to.
117 */
118static struct console *exclusive_console;
119
120/*
116 * Array of consoles built from command line options (console=) 121 * Array of consoles built from command line options (console=)
117 */ 122 */
118struct console_cmdline 123struct console_cmdline
@@ -476,6 +481,8 @@ static void __call_console_drivers(unsigned start, unsigned end)
476 struct console *con; 481 struct console *con;
477 482
478 for_each_console(con) { 483 for_each_console(con) {
484 if (exclusive_console && con != exclusive_console)
485 continue;
479 if ((con->flags & CON_ENABLED) && con->write && 486 if ((con->flags & CON_ENABLED) && con->write &&
480 (cpu_online(smp_processor_id()) || 487 (cpu_online(smp_processor_id()) ||
481 (con->flags & CON_ANYTIME))) 488 (con->flags & CON_ANYTIME)))
@@ -1230,6 +1237,11 @@ void console_unlock(void)
1230 local_irq_restore(flags); 1237 local_irq_restore(flags);
1231 } 1238 }
1232 console_locked = 0; 1239 console_locked = 0;
1240
1241 /* Release the exclusive_console once it is used */
1242 if (unlikely(exclusive_console))
1243 exclusive_console = NULL;
1244
1233 up(&console_sem); 1245 up(&console_sem);
1234 spin_unlock_irqrestore(&logbuf_lock, flags); 1246 spin_unlock_irqrestore(&logbuf_lock, flags);
1235 if (wake_klogd) 1247 if (wake_klogd)
@@ -1316,6 +1328,18 @@ void console_start(struct console *console)
1316} 1328}
1317EXPORT_SYMBOL(console_start); 1329EXPORT_SYMBOL(console_start);
1318 1330
1331static int __read_mostly keep_bootcon;
1332
1333static int __init keep_bootcon_setup(char *str)
1334{
1335 keep_bootcon = 1;
1336 printk(KERN_INFO "debug: skip boot console de-registration.\n");
1337
1338 return 0;
1339}
1340
1341early_param("keep_bootcon", keep_bootcon_setup);
1342
1319/* 1343/*
1320 * The console driver calls this routine during kernel initialization 1344 * The console driver calls this routine during kernel initialization
1321 * to register the console printing procedure with printk() and to 1345 * to register the console printing procedure with printk() and to
@@ -1452,6 +1476,12 @@ void register_console(struct console *newcon)
1452 spin_lock_irqsave(&logbuf_lock, flags); 1476 spin_lock_irqsave(&logbuf_lock, flags);
1453 con_start = log_start; 1477 con_start = log_start;
1454 spin_unlock_irqrestore(&logbuf_lock, flags); 1478 spin_unlock_irqrestore(&logbuf_lock, flags);
1479 /*
1480 * We're about to replay the log buffer. Only do this to the
1481 * just-registered console to avoid excessive message spam to
1482 * the already-registered consoles.
1483 */
1484 exclusive_console = newcon;
1455 } 1485 }
1456 console_unlock(); 1486 console_unlock();
1457 console_sysfs_notify(); 1487 console_sysfs_notify();
@@ -1463,7 +1493,9 @@ void register_console(struct console *newcon)
1463 * users know there might be something in the kernel's log buffer that 1493 * users know there might be something in the kernel's log buffer that
1464 * went to the bootconsole (that they do not see on the real console) 1494 * went to the bootconsole (that they do not see on the real console)
1465 */ 1495 */
1466 if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) { 1496 if (bcon &&
1497 ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
1498 !keep_bootcon) {
1467 /* we need to iterate through twice, to make sure we print 1499 /* we need to iterate through twice, to make sure we print
1468 * everything out, before we unregister the console(s) 1500 * everything out, before we unregister the console(s)
1469 */ 1501 */
diff --git a/kernel/smp.c b/kernel/smp.c
index 7cbd0f293df4..73a195193558 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -604,6 +604,87 @@ void ipi_call_unlock_irq(void)
604} 604}
605#endif /* USE_GENERIC_SMP_HELPERS */ 605#endif /* USE_GENERIC_SMP_HELPERS */
606 606
607/* Setup configured maximum number of CPUs to activate */
608unsigned int setup_max_cpus = NR_CPUS;
609EXPORT_SYMBOL(setup_max_cpus);
610
611
612/*
613 * Setup routine for controlling SMP activation
614 *
615 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
616 * activation entirely (the MPS table probe still happens, though).
617 *
618 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
619 * greater than 0, limits the maximum number of CPUs activated in
620 * SMP mode to <NUM>.
621 */
622
623void __weak arch_disable_smp_support(void) { }
624
625static int __init nosmp(char *str)
626{
627 setup_max_cpus = 0;
628 arch_disable_smp_support();
629
630 return 0;
631}
632
633early_param("nosmp", nosmp);
634
635/* this is hard limit */
636static int __init nrcpus(char *str)
637{
638 int nr_cpus;
639
640 get_option(&str, &nr_cpus);
641 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
642 nr_cpu_ids = nr_cpus;
643
644 return 0;
645}
646
647early_param("nr_cpus", nrcpus);
648
649static int __init maxcpus(char *str)
650{
651 get_option(&str, &setup_max_cpus);
652 if (setup_max_cpus == 0)
653 arch_disable_smp_support();
654
655 return 0;
656}
657
658early_param("maxcpus", maxcpus);
659
660/* Setup number of possible processor ids */
661int nr_cpu_ids __read_mostly = NR_CPUS;
662EXPORT_SYMBOL(nr_cpu_ids);
663
664/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
665void __init setup_nr_cpu_ids(void)
666{
667 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
668}
669
670/* Called by boot processor to activate the rest. */
671void __init smp_init(void)
672{
673 unsigned int cpu;
674
675 /* FIXME: This should be done in userspace --RR */
676 for_each_present_cpu(cpu) {
677 if (num_online_cpus() >= setup_max_cpus)
678 break;
679 if (!cpu_online(cpu))
680 cpu_up(cpu);
681 }
682
683 /* Any cleanup work */
684 printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
685 smp_cpus_done(setup_max_cpus);
686}
687
607/* 688/*
608 * Call a function on all processors. May be used during early boot while 689 * Call a function on all processors. May be used during early boot while
609 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead 690 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 56e5dec837f0..735d87095172 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -845,7 +845,10 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
845 switch (action) { 845 switch (action) {
846 case CPU_UP_PREPARE: 846 case CPU_UP_PREPARE:
847 case CPU_UP_PREPARE_FROZEN: 847 case CPU_UP_PREPARE_FROZEN:
848 p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); 848 p = kthread_create_on_node(run_ksoftirqd,
849 hcpu,
850 cpu_to_node(hotcpu),
851 "ksoftirqd/%d", hotcpu);
849 if (IS_ERR(p)) { 852 if (IS_ERR(p)) {
850 printk("ksoftirqd for %i failed\n", hotcpu); 853 printk("ksoftirqd for %i failed\n", hotcpu);
851 return notifier_from_errno(PTR_ERR(p)); 854 return notifier_from_errno(PTR_ERR(p));
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 2df820b03beb..e3516b29076c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -301,8 +301,10 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
301 case CPU_UP_PREPARE: 301 case CPU_UP_PREPARE:
302 BUG_ON(stopper->thread || stopper->enabled || 302 BUG_ON(stopper->thread || stopper->enabled ||
303 !list_empty(&stopper->works)); 303 !list_empty(&stopper->works));
304 p = kthread_create(cpu_stopper_thread, stopper, "migration/%d", 304 p = kthread_create_on_node(cpu_stopper_thread,
305 cpu); 305 stopper,
306 cpu_to_node(cpu),
307 "migration/%d", cpu);
306 if (IS_ERR(p)) 308 if (IS_ERR(p))
307 return notifier_from_errno(PTR_ERR(p)); 309 return notifier_from_errno(PTR_ERR(p));
308 get_task_struct(p); 310 get_task_struct(p);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 18bb15776c57..140dce750450 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -48,12 +48,15 @@ static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
48 * Should we panic when a soft-lockup or hard-lockup occurs: 48 * Should we panic when a soft-lockup or hard-lockup occurs:
49 */ 49 */
50#ifdef CONFIG_HARDLOCKUP_DETECTOR 50#ifdef CONFIG_HARDLOCKUP_DETECTOR
51static int hardlockup_panic; 51static int hardlockup_panic =
52 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
52 53
53static int __init hardlockup_panic_setup(char *str) 54static int __init hardlockup_panic_setup(char *str)
54{ 55{
55 if (!strncmp(str, "panic", 5)) 56 if (!strncmp(str, "panic", 5))
56 hardlockup_panic = 1; 57 hardlockup_panic = 1;
58 else if (!strncmp(str, "nopanic", 7))
59 hardlockup_panic = 0;
57 else if (!strncmp(str, "0", 1)) 60 else if (!strncmp(str, "0", 1))
58 watchdog_enabled = 0; 61 watchdog_enabled = 0;
59 return 1; 62 return 1;
@@ -415,19 +418,22 @@ static int watchdog_prepare_cpu(int cpu)
415static int watchdog_enable(int cpu) 418static int watchdog_enable(int cpu)
416{ 419{
417 struct task_struct *p = per_cpu(softlockup_watchdog, cpu); 420 struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
418 int err; 421 int err = 0;
419 422
420 /* enable the perf event */ 423 /* enable the perf event */
421 err = watchdog_nmi_enable(cpu); 424 err = watchdog_nmi_enable(cpu);
422 if (err) 425
423 return err; 426 /* Regardless of err above, fall through and start softlockup */
424 427
425 /* create the watchdog thread */ 428 /* create the watchdog thread */
426 if (!p) { 429 if (!p) {
427 p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); 430 p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu);
428 if (IS_ERR(p)) { 431 if (IS_ERR(p)) {
429 printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); 432 printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
430 return PTR_ERR(p); 433 if (!err)
434 /* if hardlockup hasn't already set this */
435 err = PTR_ERR(p);
436 goto out;
431 } 437 }
432 kthread_bind(p, cpu); 438 kthread_bind(p, cpu);
433 per_cpu(watchdog_touch_ts, cpu) = 0; 439 per_cpu(watchdog_touch_ts, cpu) = 0;
@@ -435,7 +441,8 @@ static int watchdog_enable(int cpu)
435 wake_up_process(p); 441 wake_up_process(p);
436 } 442 }
437 443
438 return 0; 444out:
445 return err;
439} 446}
440 447
441static void watchdog_disable(int cpu) 448static void watchdog_disable(int cpu)
@@ -547,7 +554,13 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
547 break; 554 break;
548#endif /* CONFIG_HOTPLUG_CPU */ 555#endif /* CONFIG_HOTPLUG_CPU */
549 } 556 }
550 return notifier_from_errno(err); 557
558 /*
559 * hardlockup and softlockup are not important enough
560 * to block cpu bring up. Just always succeed and
561 * rely on printk output to flag problems.
562 */
563 return NOTIFY_OK;
551} 564}
552 565
553static struct notifier_block __cpuinitdata cpu_nfb = { 566static struct notifier_block __cpuinitdata cpu_nfb = {
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5ca7ce9ce754..04ef830690ec 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1366,8 +1366,10 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1366 worker->id = id; 1366 worker->id = id;
1367 1367
1368 if (!on_unbound_cpu) 1368 if (!on_unbound_cpu)
1369 worker->task = kthread_create(worker_thread, worker, 1369 worker->task = kthread_create_on_node(worker_thread,
1370 "kworker/%u:%d", gcwq->cpu, id); 1370 worker,
1371 cpu_to_node(gcwq->cpu),
1372 "kworker/%u:%d", gcwq->cpu, id);
1371 else 1373 else
1372 worker->task = kthread_create(worker_thread, worker, 1374 worker->task = kthread_create(worker_thread, worker,
1373 "kworker/u:%d", id); 1375 "kworker/u:%d", id);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 191c5c4c89fc..df9234c5f9d1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -9,6 +9,17 @@ config PRINTK_TIME
9 operations. This is useful for identifying long delays 9 operations. This is useful for identifying long delays
10 in kernel startup. 10 in kernel startup.
11 11
12config DEFAULT_MESSAGE_LOGLEVEL
13 int "Default message log level (1-7)"
14 range 1 7
15 default "4"
16 help
17 Default log level for printk statements with no specified priority.
18
19 This was hard-coded to KERN_WARNING since at least 2.6.10 but folks
20 that are auditing their logs closely may want to set it to a lower
21 priority.
22
12config ENABLE_WARN_DEPRECATED 23config ENABLE_WARN_DEPRECATED
13 bool "Enable __deprecated logic" 24 bool "Enable __deprecated logic"
14 default y 25 default y
@@ -171,6 +182,23 @@ config HARDLOCKUP_DETECTOR
171 def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \ 182 def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \
172 !ARCH_HAS_NMI_WATCHDOG 183 !ARCH_HAS_NMI_WATCHDOG
173 184
185config BOOTPARAM_HARDLOCKUP_PANIC
186 bool "Panic (Reboot) On Hard Lockups"
187 depends on LOCKUP_DETECTOR
188 help
189 Say Y here to enable the kernel to panic on "hard lockups",
190 which are bugs that cause the kernel to loop in kernel
191 mode with interrupts disabled for more than 60 seconds.
192
193 Say N if unsure.
194
195config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
196 int
197 depends on LOCKUP_DETECTOR
198 range 0 1
199 default 0 if !BOOTPARAM_HARDLOCKUP_PANIC
200 default 1 if BOOTPARAM_HARDLOCKUP_PANIC
201
174config BOOTPARAM_SOFTLOCKUP_PANIC 202config BOOTPARAM_SOFTLOCKUP_PANIC
175 bool "Panic (Reboot) On Soft Lockups" 203 bool "Panic (Reboot) On Soft Lockups"
176 depends on LOCKUP_DETECTOR 204 depends on LOCKUP_DETECTOR
@@ -1222,3 +1250,6 @@ source "samples/Kconfig"
1222source "lib/Kconfig.kgdb" 1250source "lib/Kconfig.kgdb"
1223 1251
1224source "lib/Kconfig.kmemcheck" 1252source "lib/Kconfig.kmemcheck"
1253
1254config TEST_KSTRTOX
1255 tristate "Test kstrto*() family of functions at runtime"
diff --git a/lib/Makefile b/lib/Makefile
index ef7ed71a6ffd..8c9de027ebb1 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -22,6 +22,8 @@ lib-y += kobject.o kref.o klist.o
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o 24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o
25obj-y += kstrtox.o
26obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
25 27
26ifeq ($(CONFIG_DEBUG_KOBJECT),y) 28ifeq ($(CONFIG_DEBUG_KOBJECT),y)
27CFLAGS_kobject.o += -DDEBUG 29CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
new file mode 100644
index 000000000000..05672e819f8c
--- /dev/null
+++ b/lib/kstrtox.c
@@ -0,0 +1,227 @@
1/*
2 * Convert integer string representation to an integer.
3 * If an integer doesn't fit into specified type, -E is returned.
4 *
5 * Integer starts with optional sign.
6 * kstrtou*() functions do not accept sign "-".
7 *
8 * Radix 0 means autodetection: leading "0x" implies radix 16,
9 * leading "0" implies radix 8, otherwise radix is 10.
10 * Autodetection hints work after optional sign, but not before.
11 *
12 * If -E is returned, result is not touched.
13 */
14#include <linux/ctype.h>
15#include <linux/errno.h>
16#include <linux/kernel.h>
17#include <linux/math64.h>
18#include <linux/module.h>
19#include <linux/types.h>
20
21static inline char _tolower(const char c)
22{
23 return c | 0x20;
24}
25
26static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
27{
28 unsigned long long acc;
29 int ok;
30
31 if (base == 0) {
32 if (s[0] == '0') {
33 if (_tolower(s[1]) == 'x' && isxdigit(s[2]))
34 base = 16;
35 else
36 base = 8;
37 } else
38 base = 10;
39 }
40 if (base == 16 && s[0] == '0' && _tolower(s[1]) == 'x')
41 s += 2;
42
43 acc = 0;
44 ok = 0;
45 while (*s) {
46 unsigned int val;
47
48 if ('0' <= *s && *s <= '9')
49 val = *s - '0';
50 else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f')
51 val = _tolower(*s) - 'a' + 10;
52 else if (*s == '\n') {
53 if (*(s + 1) == '\0')
54 break;
55 else
56 return -EINVAL;
57 } else
58 return -EINVAL;
59
60 if (val >= base)
61 return -EINVAL;
62 if (acc > div_u64(ULLONG_MAX - val, base))
63 return -ERANGE;
64 acc = acc * base + val;
65 ok = 1;
66
67 s++;
68 }
69 if (!ok)
70 return -EINVAL;
71 *res = acc;
72 return 0;
73}
74
75int kstrtoull(const char *s, unsigned int base, unsigned long long *res)
76{
77 if (s[0] == '+')
78 s++;
79 return _kstrtoull(s, base, res);
80}
81EXPORT_SYMBOL(kstrtoull);
82
83int kstrtoll(const char *s, unsigned int base, long long *res)
84{
85 unsigned long long tmp;
86 int rv;
87
88 if (s[0] == '-') {
89 rv = _kstrtoull(s + 1, base, &tmp);
90 if (rv < 0)
91 return rv;
92 if ((long long)(-tmp) >= 0)
93 return -ERANGE;
94 *res = -tmp;
95 } else {
96 rv = kstrtoull(s, base, &tmp);
97 if (rv < 0)
98 return rv;
99 if ((long long)tmp < 0)
100 return -ERANGE;
101 *res = tmp;
102 }
103 return 0;
104}
105EXPORT_SYMBOL(kstrtoll);
106
107/* Internal, do not use. */
108int _kstrtoul(const char *s, unsigned int base, unsigned long *res)
109{
110 unsigned long long tmp;
111 int rv;
112
113 rv = kstrtoull(s, base, &tmp);
114 if (rv < 0)
115 return rv;
116 if (tmp != (unsigned long long)(unsigned long)tmp)
117 return -ERANGE;
118 *res = tmp;
119 return 0;
120}
121EXPORT_SYMBOL(_kstrtoul);
122
123/* Internal, do not use. */
124int _kstrtol(const char *s, unsigned int base, long *res)
125{
126 long long tmp;
127 int rv;
128
129 rv = kstrtoll(s, base, &tmp);
130 if (rv < 0)
131 return rv;
132 if (tmp != (long long)(long)tmp)
133 return -ERANGE;
134 *res = tmp;
135 return 0;
136}
137EXPORT_SYMBOL(_kstrtol);
138
139int kstrtouint(const char *s, unsigned int base, unsigned int *res)
140{
141 unsigned long long tmp;
142 int rv;
143
144 rv = kstrtoull(s, base, &tmp);
145 if (rv < 0)
146 return rv;
147 if (tmp != (unsigned long long)(unsigned int)tmp)
148 return -ERANGE;
149 *res = tmp;
150 return 0;
151}
152EXPORT_SYMBOL(kstrtouint);
153
154int kstrtoint(const char *s, unsigned int base, int *res)
155{
156 long long tmp;
157 int rv;
158
159 rv = kstrtoll(s, base, &tmp);
160 if (rv < 0)
161 return rv;
162 if (tmp != (long long)(int)tmp)
163 return -ERANGE;
164 *res = tmp;
165 return 0;
166}
167EXPORT_SYMBOL(kstrtoint);
168
169int kstrtou16(const char *s, unsigned int base, u16 *res)
170{
171 unsigned long long tmp;
172 int rv;
173
174 rv = kstrtoull(s, base, &tmp);
175 if (rv < 0)
176 return rv;
177 if (tmp != (unsigned long long)(u16)tmp)
178 return -ERANGE;
179 *res = tmp;
180 return 0;
181}
182EXPORT_SYMBOL(kstrtou16);
183
184int kstrtos16(const char *s, unsigned int base, s16 *res)
185{
186 long long tmp;
187 int rv;
188
189 rv = kstrtoll(s, base, &tmp);
190 if (rv < 0)
191 return rv;
192 if (tmp != (long long)(s16)tmp)
193 return -ERANGE;
194 *res = tmp;
195 return 0;
196}
197EXPORT_SYMBOL(kstrtos16);
198
199int kstrtou8(const char *s, unsigned int base, u8 *res)
200{
201 unsigned long long tmp;
202 int rv;
203
204 rv = kstrtoull(s, base, &tmp);
205 if (rv < 0)
206 return rv;
207 if (tmp != (unsigned long long)(u8)tmp)
208 return -ERANGE;
209 *res = tmp;
210 return 0;
211}
212EXPORT_SYMBOL(kstrtou8);
213
214int kstrtos8(const char *s, unsigned int base, s8 *res)
215{
216 long long tmp;
217 int rv;
218
219 rv = kstrtoll(s, base, &tmp);
220 if (rv < 0)
221 return rv;
222 if (tmp != (long long)(s8)tmp)
223 return -ERANGE;
224 *res = tmp;
225 return 0;
226}
227EXPORT_SYMBOL(kstrtos8);
diff --git a/lib/show_mem.c b/lib/show_mem.c
index fdc77c82f922..d8d602b58c31 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -9,14 +9,14 @@
9#include <linux/nmi.h> 9#include <linux/nmi.h>
10#include <linux/quicklist.h> 10#include <linux/quicklist.h>
11 11
12void show_mem(void) 12void __show_mem(unsigned int filter)
13{ 13{
14 pg_data_t *pgdat; 14 pg_data_t *pgdat;
15 unsigned long total = 0, reserved = 0, shared = 0, 15 unsigned long total = 0, reserved = 0, shared = 0,
16 nonshared = 0, highmem = 0; 16 nonshared = 0, highmem = 0;
17 17
18 printk("Mem-Info:\n"); 18 printk("Mem-Info:\n");
19 show_free_areas(); 19 __show_free_areas(filter);
20 20
21 for_each_online_pgdat(pgdat) { 21 for_each_online_pgdat(pgdat) {
22 unsigned long i, flags; 22 unsigned long i, flags;
@@ -61,3 +61,8 @@ void show_mem(void)
61 quicklist_total_size()); 61 quicklist_total_size());
62#endif 62#endif
63} 63}
64
65void show_mem(void)
66{
67 __show_mem(0);
68}
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c
new file mode 100644
index 000000000000..325c2f9ecebd
--- /dev/null
+++ b/lib/test-kstrtox.c
@@ -0,0 +1,739 @@
1#include <linux/init.h>
2#include <linux/kernel.h>
3#include <linux/module.h>
4
5#define for_each_test(i, test) \
6 for (i = 0; i < sizeof(test) / sizeof(test[0]); i++)
7
8struct test_fail {
9 const char *str;
10 unsigned int base;
11};
12
13#define DEFINE_TEST_FAIL(test) \
14 const struct test_fail test[] __initdata
15
16#define DECLARE_TEST_OK(type, test_type) \
17 test_type { \
18 const char *str; \
19 unsigned int base; \
20 type expected_res; \
21 }
22
23#define DEFINE_TEST_OK(type, test) \
24 const type test[] __initdata
25
26#define TEST_FAIL(fn, type, fmt, test) \
27{ \
28 unsigned int i; \
29 \
30 for_each_test(i, test) { \
31 const struct test_fail *t = &test[i]; \
32 type tmp; \
33 int rv; \
34 \
35 tmp = 0; \
36 rv = fn(t->str, t->base, &tmp); \
37 if (rv >= 0) { \
38 WARN(1, "str '%s', base %u, expected -E, got %d/" fmt "\n", \
39 t->str, t->base, rv, tmp); \
40 continue; \
41 } \
42 } \
43}
44
45#define TEST_OK(fn, type, fmt, test) \
46{ \
47 unsigned int i; \
48 \
49 for_each_test(i, test) { \
50 const typeof(test[0]) *t = &test[i]; \
51 type res; \
52 int rv; \
53 \
54 rv = fn(t->str, t->base, &res); \
55 if (rv != 0) { \
56 WARN(1, "str '%s', base %u, expected 0/" fmt ", got %d\n", \
57 t->str, t->base, t->expected_res, rv); \
58 continue; \
59 } \
60 if (res != t->expected_res) { \
61 WARN(1, "str '%s', base %u, expected " fmt ", got " fmt "\n", \
62 t->str, t->base, t->expected_res, res); \
63 continue; \
64 } \
65 } \
66}
67
68static void __init test_kstrtoull_ok(void)
69{
70 DECLARE_TEST_OK(unsigned long long, struct test_ull);
71 static DEFINE_TEST_OK(struct test_ull, test_ull_ok) = {
72 {"0", 10, 0ULL},
73 {"1", 10, 1ULL},
74 {"127", 10, 127ULL},
75 {"128", 10, 128ULL},
76 {"129", 10, 129ULL},
77 {"255", 10, 255ULL},
78 {"256", 10, 256ULL},
79 {"257", 10, 257ULL},
80 {"32767", 10, 32767ULL},
81 {"32768", 10, 32768ULL},
82 {"32769", 10, 32769ULL},
83 {"65535", 10, 65535ULL},
84 {"65536", 10, 65536ULL},
85 {"65537", 10, 65537ULL},
86 {"2147483647", 10, 2147483647ULL},
87 {"2147483648", 10, 2147483648ULL},
88 {"2147483649", 10, 2147483649ULL},
89 {"4294967295", 10, 4294967295ULL},
90 {"4294967296", 10, 4294967296ULL},
91 {"4294967297", 10, 4294967297ULL},
92 {"9223372036854775807", 10, 9223372036854775807ULL},
93 {"9223372036854775808", 10, 9223372036854775808ULL},
94 {"9223372036854775809", 10, 9223372036854775809ULL},
95 {"18446744073709551614", 10, 18446744073709551614ULL},
96 {"18446744073709551615", 10, 18446744073709551615ULL},
97
98 {"00", 8, 00ULL},
99 {"01", 8, 01ULL},
100 {"0177", 8, 0177ULL},
101 {"0200", 8, 0200ULL},
102 {"0201", 8, 0201ULL},
103 {"0377", 8, 0377ULL},
104 {"0400", 8, 0400ULL},
105 {"0401", 8, 0401ULL},
106 {"077777", 8, 077777ULL},
107 {"0100000", 8, 0100000ULL},
108 {"0100001", 8, 0100001ULL},
109 {"0177777", 8, 0177777ULL},
110 {"0200000", 8, 0200000ULL},
111 {"0200001", 8, 0200001ULL},
112 {"017777777777", 8, 017777777777ULL},
113 {"020000000000", 8, 020000000000ULL},
114 {"020000000001", 8, 020000000001ULL},
115 {"037777777777", 8, 037777777777ULL},
116 {"040000000000", 8, 040000000000ULL},
117 {"040000000001", 8, 040000000001ULL},
118 {"0777777777777777777777", 8, 0777777777777777777777ULL},
119 {"01000000000000000000000", 8, 01000000000000000000000ULL},
120 {"01000000000000000000001", 8, 01000000000000000000001ULL},
121 {"01777777777777777777776", 8, 01777777777777777777776ULL},
122 {"01777777777777777777777", 8, 01777777777777777777777ULL},
123
124 {"0x0", 16, 0x0ULL},
125 {"0x1", 16, 0x1ULL},
126 {"0x7f", 16, 0x7fULL},
127 {"0x80", 16, 0x80ULL},
128 {"0x81", 16, 0x81ULL},
129 {"0xff", 16, 0xffULL},
130 {"0x100", 16, 0x100ULL},
131 {"0x101", 16, 0x101ULL},
132 {"0x7fff", 16, 0x7fffULL},
133 {"0x8000", 16, 0x8000ULL},
134 {"0x8001", 16, 0x8001ULL},
135 {"0xffff", 16, 0xffffULL},
136 {"0x10000", 16, 0x10000ULL},
137 {"0x10001", 16, 0x10001ULL},
138 {"0x7fffffff", 16, 0x7fffffffULL},
139 {"0x80000000", 16, 0x80000000ULL},
140 {"0x80000001", 16, 0x80000001ULL},
141 {"0xffffffff", 16, 0xffffffffULL},
142 {"0x100000000", 16, 0x100000000ULL},
143 {"0x100000001", 16, 0x100000001ULL},
144 {"0x7fffffffffffffff", 16, 0x7fffffffffffffffULL},
145 {"0x8000000000000000", 16, 0x8000000000000000ULL},
146 {"0x8000000000000001", 16, 0x8000000000000001ULL},
147 {"0xfffffffffffffffe", 16, 0xfffffffffffffffeULL},
148 {"0xffffffffffffffff", 16, 0xffffffffffffffffULL},
149
150 {"0\n", 0, 0ULL},
151 };
152 TEST_OK(kstrtoull, unsigned long long, "%llu", test_ull_ok);
153}
154
155static void __init test_kstrtoull_fail(void)
156{
157 static DEFINE_TEST_FAIL(test_ull_fail) = {
158 {"", 0},
159 {"", 8},
160 {"", 10},
161 {"", 16},
162 {"\n", 0},
163 {"\n", 8},
164 {"\n", 10},
165 {"\n", 16},
166 {"\n0", 0},
167 {"\n0", 8},
168 {"\n0", 10},
169 {"\n0", 16},
170 {"+", 0},
171 {"+", 8},
172 {"+", 10},
173 {"+", 16},
174 {"-", 0},
175 {"-", 8},
176 {"-", 10},
177 {"-", 16},
178 {"0x", 0},
179 {"0x", 16},
180 {"0X", 0},
181 {"0X", 16},
182 {"0 ", 0},
183 {"1+", 0},
184 {"1-", 0},
185 {" 2", 0},
186 /* base autodetection */
187 {"0x0z", 0},
188 {"0z", 0},
189 {"a", 0},
190 /* digit >= base */
191 {"2", 2},
192 {"8", 8},
193 {"a", 10},
194 {"A", 10},
195 {"g", 16},
196 {"G", 16},
197 /* overflow */
198 {"10000000000000000000000000000000000000000000000000000000000000000", 2},
199 {"2000000000000000000000", 8},
200 {"18446744073709551616", 10},
201 {"10000000000000000", 16},
202 /* negative */
203 {"-0", 0},
204 {"-0", 8},
205 {"-0", 10},
206 {"-0", 16},
207 {"-1", 0},
208 {"-1", 8},
209 {"-1", 10},
210 {"-1", 16},
211 /* sign is first character if any */
212 {"-+1", 0},
213 {"-+1", 8},
214 {"-+1", 10},
215 {"-+1", 16},
216 /* nothing after \n */
217 {"0\n0", 0},
218 {"0\n0", 8},
219 {"0\n0", 10},
220 {"0\n0", 16},
221 {"0\n+", 0},
222 {"0\n+", 8},
223 {"0\n+", 10},
224 {"0\n+", 16},
225 {"0\n-", 0},
226 {"0\n-", 8},
227 {"0\n-", 10},
228 {"0\n-", 16},
229 {"0\n ", 0},
230 {"0\n ", 8},
231 {"0\n ", 10},
232 {"0\n ", 16},
233 };
234 TEST_FAIL(kstrtoull, unsigned long long, "%llu", test_ull_fail);
235}
236
237static void __init test_kstrtoll_ok(void)
238{
239 DECLARE_TEST_OK(long long, struct test_ll);
240 static DEFINE_TEST_OK(struct test_ll, test_ll_ok) = {
241 {"0", 10, 0LL},
242 {"1", 10, 1LL},
243 {"127", 10, 127LL},
244 {"128", 10, 128LL},
245 {"129", 10, 129LL},
246 {"255", 10, 255LL},
247 {"256", 10, 256LL},
248 {"257", 10, 257LL},
249 {"32767", 10, 32767LL},
250 {"32768", 10, 32768LL},
251 {"32769", 10, 32769LL},
252 {"65535", 10, 65535LL},
253 {"65536", 10, 65536LL},
254 {"65537", 10, 65537LL},
255 {"2147483647", 10, 2147483647LL},
256 {"2147483648", 10, 2147483648LL},
257 {"2147483649", 10, 2147483649LL},
258 {"4294967295", 10, 4294967295LL},
259 {"4294967296", 10, 4294967296LL},
260 {"4294967297", 10, 4294967297LL},
261 {"9223372036854775807", 10, 9223372036854775807LL},
262
263 {"-1", 10, -1LL},
264 {"-2", 10, -2LL},
265 {"-9223372036854775808", 10, LLONG_MIN},
266 };
267 TEST_OK(kstrtoll, long long, "%lld", test_ll_ok);
268}
269
270static void __init test_kstrtoll_fail(void)
271{
272 static DEFINE_TEST_FAIL(test_ll_fail) = {
273 {"9223372036854775808", 10},
274 {"9223372036854775809", 10},
275 {"18446744073709551614", 10},
276 {"18446744073709551615", 10},
277 {"-9223372036854775809", 10},
278 {"-18446744073709551614", 10},
279 {"-18446744073709551615", 10},
280 /* negative zero isn't an integer in Linux */
281 {"-0", 0},
282 {"-0", 8},
283 {"-0", 10},
284 {"-0", 16},
285 /* sign is first character if any */
286 {"-+1", 0},
287 {"-+1", 8},
288 {"-+1", 10},
289 {"-+1", 16},
290 };
291 TEST_FAIL(kstrtoll, long long, "%lld", test_ll_fail);
292}
293
294static void __init test_kstrtou64_ok(void)
295{
296 DECLARE_TEST_OK(u64, struct test_u64);
297 static DEFINE_TEST_OK(struct test_u64, test_u64_ok) = {
298 {"0", 10, 0},
299 {"1", 10, 1},
300 {"126", 10, 126},
301 {"127", 10, 127},
302 {"128", 10, 128},
303 {"129", 10, 129},
304 {"254", 10, 254},
305 {"255", 10, 255},
306 {"256", 10, 256},
307 {"257", 10, 257},
308 {"32766", 10, 32766},
309 {"32767", 10, 32767},
310 {"32768", 10, 32768},
311 {"32769", 10, 32769},
312 {"65534", 10, 65534},
313 {"65535", 10, 65535},
314 {"65536", 10, 65536},
315 {"65537", 10, 65537},
316 {"2147483646", 10, 2147483646},
317 {"2147483647", 10, 2147483647},
318 {"2147483648", 10, 2147483648},
319 {"2147483649", 10, 2147483649},
320 {"4294967294", 10, 4294967294},
321 {"4294967295", 10, 4294967295},
322 {"4294967296", 10, 4294967296},
323 {"4294967297", 10, 4294967297},
324 {"9223372036854775806", 10, 9223372036854775806ULL},
325 {"9223372036854775807", 10, 9223372036854775807ULL},
326 {"9223372036854775808", 10, 9223372036854775808ULL},
327 {"9223372036854775809", 10, 9223372036854775809ULL},
328 {"18446744073709551614", 10, 18446744073709551614ULL},
329 {"18446744073709551615", 10, 18446744073709551615ULL},
330 };
331 TEST_OK(kstrtou64, u64, "%llu", test_u64_ok);
332}
333
334static void __init test_kstrtou64_fail(void)
335{
336 static DEFINE_TEST_FAIL(test_u64_fail) = {
337 {"-2", 10},
338 {"-1", 10},
339 {"18446744073709551616", 10},
340 {"18446744073709551617", 10},
341 };
342 TEST_FAIL(kstrtou64, u64, "%llu", test_u64_fail);
343}
344
345static void __init test_kstrtos64_ok(void)
346{
347 DECLARE_TEST_OK(s64, struct test_s64);
348 static DEFINE_TEST_OK(struct test_s64, test_s64_ok) = {
349 {"-128", 10, -128},
350 {"-127", 10, -127},
351 {"-1", 10, -1},
352 {"0", 10, 0},
353 {"1", 10, 1},
354 {"126", 10, 126},
355 {"127", 10, 127},
356 {"128", 10, 128},
357 {"129", 10, 129},
358 {"254", 10, 254},
359 {"255", 10, 255},
360 {"256", 10, 256},
361 {"257", 10, 257},
362 {"32766", 10, 32766},
363 {"32767", 10, 32767},
364 {"32768", 10, 32768},
365 {"32769", 10, 32769},
366 {"65534", 10, 65534},
367 {"65535", 10, 65535},
368 {"65536", 10, 65536},
369 {"65537", 10, 65537},
370 {"2147483646", 10, 2147483646},
371 {"2147483647", 10, 2147483647},
372 {"2147483648", 10, 2147483648},
373 {"2147483649", 10, 2147483649},
374 {"4294967294", 10, 4294967294},
375 {"4294967295", 10, 4294967295},
376 {"4294967296", 10, 4294967296},
377 {"4294967297", 10, 4294967297},
378 {"9223372036854775806", 10, 9223372036854775806LL},
379 {"9223372036854775807", 10, 9223372036854775807LL},
380 };
381 TEST_OK(kstrtos64, s64, "%lld", test_s64_ok);
382}
383
384static void __init test_kstrtos64_fail(void)
385{
386 static DEFINE_TEST_FAIL(test_s64_fail) = {
387 {"9223372036854775808", 10},
388 {"9223372036854775809", 10},
389 {"18446744073709551614", 10},
390 {"18446744073709551615", 10},
391 {"18446744073709551616", 10},
392 {"18446744073709551617", 10},
393 };
394 TEST_FAIL(kstrtos64, s64, "%lld", test_s64_fail);
395}
396
397static void __init test_kstrtou32_ok(void)
398{
399 DECLARE_TEST_OK(u32, struct test_u32);
400 static DEFINE_TEST_OK(struct test_u32, test_u32_ok) = {
401 {"0", 10, 0},
402 {"1", 10, 1},
403 {"126", 10, 126},
404 {"127", 10, 127},
405 {"128", 10, 128},
406 {"129", 10, 129},
407 {"254", 10, 254},
408 {"255", 10, 255},
409 {"256", 10, 256},
410 {"257", 10, 257},
411 {"32766", 10, 32766},
412 {"32767", 10, 32767},
413 {"32768", 10, 32768},
414 {"32769", 10, 32769},
415 {"65534", 10, 65534},
416 {"65535", 10, 65535},
417 {"65536", 10, 65536},
418 {"65537", 10, 65537},
419 {"2147483646", 10, 2147483646},
420 {"2147483647", 10, 2147483647},
421 {"2147483648", 10, 2147483648},
422 {"2147483649", 10, 2147483649},
423 {"4294967294", 10, 4294967294},
424 {"4294967295", 10, 4294967295},
425 };
426 TEST_OK(kstrtou32, u32, "%u", test_u32_ok);
427}
428
429static void __init test_kstrtou32_fail(void)
430{
431 static DEFINE_TEST_FAIL(test_u32_fail) = {
432 {"-2", 10},
433 {"-1", 10},
434 {"4294967296", 10},
435 {"4294967297", 10},
436 {"9223372036854775806", 10},
437 {"9223372036854775807", 10},
438 {"9223372036854775808", 10},
439 {"9223372036854775809", 10},
440 {"18446744073709551614", 10},
441 {"18446744073709551615", 10},
442 {"18446744073709551616", 10},
443 {"18446744073709551617", 10},
444 };
445 TEST_FAIL(kstrtou32, u32, "%u", test_u32_fail);
446}
447
448static void __init test_kstrtos32_ok(void)
449{
450 DECLARE_TEST_OK(s32, struct test_s32);
451 static DEFINE_TEST_OK(struct test_s32, test_s32_ok) = {
452 {"-128", 10, -128},
453 {"-127", 10, -127},
454 {"-1", 10, -1},
455 {"0", 10, 0},
456 {"1", 10, 1},
457 {"126", 10, 126},
458 {"127", 10, 127},
459 {"128", 10, 128},
460 {"129", 10, 129},
461 {"254", 10, 254},
462 {"255", 10, 255},
463 {"256", 10, 256},
464 {"257", 10, 257},
465 {"32766", 10, 32766},
466 {"32767", 10, 32767},
467 {"32768", 10, 32768},
468 {"32769", 10, 32769},
469 {"65534", 10, 65534},
470 {"65535", 10, 65535},
471 {"65536", 10, 65536},
472 {"65537", 10, 65537},
473 {"2147483646", 10, 2147483646},
474 {"2147483647", 10, 2147483647},
475 };
476 TEST_OK(kstrtos32, s32, "%d", test_s32_ok);
477}
478
479static void __init test_kstrtos32_fail(void)
480{
481 static DEFINE_TEST_FAIL(test_s32_fail) = {
482 {"2147483648", 10},
483 {"2147483649", 10},
484 {"4294967294", 10},
485 {"4294967295", 10},
486 {"4294967296", 10},
487 {"4294967297", 10},
488 {"9223372036854775806", 10},
489 {"9223372036854775807", 10},
490 {"9223372036854775808", 10},
491 {"9223372036854775809", 10},
492 {"18446744073709551614", 10},
493 {"18446744073709551615", 10},
494 {"18446744073709551616", 10},
495 {"18446744073709551617", 10},
496 };
497 TEST_FAIL(kstrtos32, s32, "%d", test_s32_fail);
498}
499
500static void __init test_kstrtou16_ok(void)
501{
502 DECLARE_TEST_OK(u16, struct test_u16);
503 static DEFINE_TEST_OK(struct test_u16, test_u16_ok) = {
504 {"0", 10, 0},
505 {"1", 10, 1},
506 {"126", 10, 126},
507 {"127", 10, 127},
508 {"128", 10, 128},
509 {"129", 10, 129},
510 {"254", 10, 254},
511 {"255", 10, 255},
512 {"256", 10, 256},
513 {"257", 10, 257},
514 {"32766", 10, 32766},
515 {"32767", 10, 32767},
516 {"32768", 10, 32768},
517 {"32769", 10, 32769},
518 {"65534", 10, 65534},
519 {"65535", 10, 65535},
520 };
521 TEST_OK(kstrtou16, u16, "%hu", test_u16_ok);
522}
523
524static void __init test_kstrtou16_fail(void)
525{
526 static DEFINE_TEST_FAIL(test_u16_fail) = {
527 {"-2", 10},
528 {"-1", 10},
529 {"65536", 10},
530 {"65537", 10},
531 {"2147483646", 10},
532 {"2147483647", 10},
533 {"2147483648", 10},
534 {"2147483649", 10},
535 {"4294967294", 10},
536 {"4294967295", 10},
537 {"4294967296", 10},
538 {"4294967297", 10},
539 {"9223372036854775806", 10},
540 {"9223372036854775807", 10},
541 {"9223372036854775808", 10},
542 {"9223372036854775809", 10},
543 {"18446744073709551614", 10},
544 {"18446744073709551615", 10},
545 {"18446744073709551616", 10},
546 {"18446744073709551617", 10},
547 };
548 TEST_FAIL(kstrtou16, u16, "%hu", test_u16_fail);
549}
550
551static void __init test_kstrtos16_ok(void)
552{
553 DECLARE_TEST_OK(s16, struct test_s16);
554 static DEFINE_TEST_OK(struct test_s16, test_s16_ok) = {
555 {"-130", 10, -130},
556 {"-129", 10, -129},
557 {"-128", 10, -128},
558 {"-127", 10, -127},
559 {"-1", 10, -1},
560 {"0", 10, 0},
561 {"1", 10, 1},
562 {"126", 10, 126},
563 {"127", 10, 127},
564 {"128", 10, 128},
565 {"129", 10, 129},
566 {"254", 10, 254},
567 {"255", 10, 255},
568 {"256", 10, 256},
569 {"257", 10, 257},
570 {"32766", 10, 32766},
571 {"32767", 10, 32767},
572 };
573 TEST_OK(kstrtos16, s16, "%hd", test_s16_ok);
574}
575
576static void __init test_kstrtos16_fail(void)
577{
578 static DEFINE_TEST_FAIL(test_s16_fail) = {
579 {"32768", 10},
580 {"32769", 10},
581 {"65534", 10},
582 {"65535", 10},
583 {"65536", 10},
584 {"65537", 10},
585 {"2147483646", 10},
586 {"2147483647", 10},
587 {"2147483648", 10},
588 {"2147483649", 10},
589 {"4294967294", 10},
590 {"4294967295", 10},
591 {"4294967296", 10},
592 {"4294967297", 10},
593 {"9223372036854775806", 10},
594 {"9223372036854775807", 10},
595 {"9223372036854775808", 10},
596 {"9223372036854775809", 10},
597 {"18446744073709551614", 10},
598 {"18446744073709551615", 10},
599 {"18446744073709551616", 10},
600 {"18446744073709551617", 10},
601 };
602 TEST_FAIL(kstrtos16, s16, "%hd", test_s16_fail);
603}
604
605static void __init test_kstrtou8_ok(void)
606{
607 DECLARE_TEST_OK(u8, struct test_u8);
608 static DEFINE_TEST_OK(struct test_u8, test_u8_ok) = {
609 {"0", 10, 0},
610 {"1", 10, 1},
611 {"126", 10, 126},
612 {"127", 10, 127},
613 {"128", 10, 128},
614 {"129", 10, 129},
615 {"254", 10, 254},
616 {"255", 10, 255},
617 };
618 TEST_OK(kstrtou8, u8, "%hhu", test_u8_ok);
619}
620
621static void __init test_kstrtou8_fail(void)
622{
623 static DEFINE_TEST_FAIL(test_u8_fail) = {
624 {"-2", 10},
625 {"-1", 10},
626 {"256", 10},
627 {"257", 10},
628 {"32766", 10},
629 {"32767", 10},
630 {"32768", 10},
631 {"32769", 10},
632 {"65534", 10},
633 {"65535", 10},
634 {"65536", 10},
635 {"65537", 10},
636 {"2147483646", 10},
637 {"2147483647", 10},
638 {"2147483648", 10},
639 {"2147483649", 10},
640 {"4294967294", 10},
641 {"4294967295", 10},
642 {"4294967296", 10},
643 {"4294967297", 10},
644 {"9223372036854775806", 10},
645 {"9223372036854775807", 10},
646 {"9223372036854775808", 10},
647 {"9223372036854775809", 10},
648 {"18446744073709551614", 10},
649 {"18446744073709551615", 10},
650 {"18446744073709551616", 10},
651 {"18446744073709551617", 10},
652 };
653 TEST_FAIL(kstrtou8, u8, "%hhu", test_u8_fail);
654}
655
656static void __init test_kstrtos8_ok(void)
657{
658 DECLARE_TEST_OK(s8, struct test_s8);
659 static DEFINE_TEST_OK(struct test_s8, test_s8_ok) = {
660 {"-128", 10, -128},
661 {"-127", 10, -127},
662 {"-1", 10, -1},
663 {"0", 10, 0},
664 {"1", 10, 1},
665 {"126", 10, 126},
666 {"127", 10, 127},
667 };
668 TEST_OK(kstrtos8, s8, "%hhd", test_s8_ok);
669}
670
671static void __init test_kstrtos8_fail(void)
672{
673 static DEFINE_TEST_FAIL(test_s8_fail) = {
674 {"-130", 10},
675 {"-129", 10},
676 {"128", 10},
677 {"129", 10},
678 {"254", 10},
679 {"255", 10},
680 {"256", 10},
681 {"257", 10},
682 {"32766", 10},
683 {"32767", 10},
684 {"32768", 10},
685 {"32769", 10},
686 {"65534", 10},
687 {"65535", 10},
688 {"65536", 10},
689 {"65537", 10},
690 {"2147483646", 10},
691 {"2147483647", 10},
692 {"2147483648", 10},
693 {"2147483649", 10},
694 {"4294967294", 10},
695 {"4294967295", 10},
696 {"4294967296", 10},
697 {"4294967297", 10},
698 {"9223372036854775806", 10},
699 {"9223372036854775807", 10},
700 {"9223372036854775808", 10},
701 {"9223372036854775809", 10},
702 {"18446744073709551614", 10},
703 {"18446744073709551615", 10},
704 {"18446744073709551616", 10},
705 {"18446744073709551617", 10},
706 };
707 TEST_FAIL(kstrtos8, s8, "%hhd", test_s8_fail);
708}
709
710static int __init test_kstrtox_init(void)
711{
712 test_kstrtoull_ok();
713 test_kstrtoull_fail();
714 test_kstrtoll_ok();
715 test_kstrtoll_fail();
716
717 test_kstrtou64_ok();
718 test_kstrtou64_fail();
719 test_kstrtos64_ok();
720 test_kstrtos64_fail();
721
722 test_kstrtou32_ok();
723 test_kstrtou32_fail();
724 test_kstrtos32_ok();
725 test_kstrtos32_fail();
726
727 test_kstrtou16_ok();
728 test_kstrtou16_fail();
729 test_kstrtos16_ok();
730 test_kstrtos16_fail();
731
732 test_kstrtou8_ok();
733 test_kstrtou8_fail();
734 test_kstrtos8_ok();
735 test_kstrtos8_fail();
736 return -EINVAL;
737}
738module_init(test_kstrtox_init);
739MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index d3023df8477f..02bcdd5feac4 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -120,147 +120,6 @@ long long simple_strtoll(const char *cp, char **endp, unsigned int base)
120} 120}
121EXPORT_SYMBOL(simple_strtoll); 121EXPORT_SYMBOL(simple_strtoll);
122 122
123/**
124 * strict_strtoul - convert a string to an unsigned long strictly
125 * @cp: The string to be converted
126 * @base: The number base to use
127 * @res: The converted result value
128 *
129 * strict_strtoul converts a string to an unsigned long only if the
130 * string is really an unsigned long string, any string containing
131 * any invalid char at the tail will be rejected and -EINVAL is returned,
132 * only a newline char at the tail is acceptible because people generally
133 * change a module parameter in the following way:
134 *
135 * echo 1024 > /sys/module/e1000/parameters/copybreak
136 *
137 * echo will append a newline to the tail.
138 *
139 * It returns 0 if conversion is successful and *res is set to the converted
140 * value, otherwise it returns -EINVAL and *res is set to 0.
141 *
142 * simple_strtoul just ignores the successive invalid characters and
143 * return the converted value of prefix part of the string.
144 */
145int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
146{
147 char *tail;
148 unsigned long val;
149
150 *res = 0;
151 if (!*cp)
152 return -EINVAL;
153
154 val = simple_strtoul(cp, &tail, base);
155 if (tail == cp)
156 return -EINVAL;
157
158 if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) {
159 *res = val;
160 return 0;
161 }
162
163 return -EINVAL;
164}
165EXPORT_SYMBOL(strict_strtoul);
166
167/**
168 * strict_strtol - convert a string to a long strictly
169 * @cp: The string to be converted
170 * @base: The number base to use
171 * @res: The converted result value
172 *
173 * strict_strtol is similiar to strict_strtoul, but it allows the first
174 * character of a string is '-'.
175 *
176 * It returns 0 if conversion is successful and *res is set to the converted
177 * value, otherwise it returns -EINVAL and *res is set to 0.
178 */
179int strict_strtol(const char *cp, unsigned int base, long *res)
180{
181 int ret;
182 if (*cp == '-') {
183 ret = strict_strtoul(cp + 1, base, (unsigned long *)res);
184 if (!ret)
185 *res = -(*res);
186 } else {
187 ret = strict_strtoul(cp, base, (unsigned long *)res);
188 }
189
190 return ret;
191}
192EXPORT_SYMBOL(strict_strtol);
193
194/**
195 * strict_strtoull - convert a string to an unsigned long long strictly
196 * @cp: The string to be converted
197 * @base: The number base to use
198 * @res: The converted result value
199 *
200 * strict_strtoull converts a string to an unsigned long long only if the
201 * string is really an unsigned long long string, any string containing
202 * any invalid char at the tail will be rejected and -EINVAL is returned,
203 * only a newline char at the tail is acceptible because people generally
204 * change a module parameter in the following way:
205 *
206 * echo 1024 > /sys/module/e1000/parameters/copybreak
207 *
208 * echo will append a newline to the tail of the string.
209 *
210 * It returns 0 if conversion is successful and *res is set to the converted
211 * value, otherwise it returns -EINVAL and *res is set to 0.
212 *
213 * simple_strtoull just ignores the successive invalid characters and
214 * return the converted value of prefix part of the string.
215 */
216int strict_strtoull(const char *cp, unsigned int base, unsigned long long *res)
217{
218 char *tail;
219 unsigned long long val;
220
221 *res = 0;
222 if (!*cp)
223 return -EINVAL;
224
225 val = simple_strtoull(cp, &tail, base);
226 if (tail == cp)
227 return -EINVAL;
228 if ((tail[0] == '\0') || (tail[0] == '\n' && tail[1] == '\0')) {
229 *res = val;
230 return 0;
231 }
232
233 return -EINVAL;
234}
235EXPORT_SYMBOL(strict_strtoull);
236
237/**
238 * strict_strtoll - convert a string to a long long strictly
239 * @cp: The string to be converted
240 * @base: The number base to use
241 * @res: The converted result value
242 *
243 * strict_strtoll is similiar to strict_strtoull, but it allows the first
244 * character of a string is '-'.
245 *
246 * It returns 0 if conversion is successful and *res is set to the converted
247 * value, otherwise it returns -EINVAL and *res is set to 0.
248 */
249int strict_strtoll(const char *cp, unsigned int base, long long *res)
250{
251 int ret;
252 if (*cp == '-') {
253 ret = strict_strtoull(cp + 1, base, (unsigned long long *)res);
254 if (!ret)
255 *res = -(*res);
256 } else {
257 ret = strict_strtoull(cp, base, (unsigned long long *)res);
258 }
259
260 return ret;
261}
262EXPORT_SYMBOL(strict_strtoll);
263
264static noinline_for_stack 123static noinline_for_stack
265int skip_atoi(const char **s) 124int skip_atoi(const char **s)
266{ 125{
@@ -991,7 +850,7 @@ static noinline_for_stack
991char *pointer(const char *fmt, char *buf, char *end, void *ptr, 850char *pointer(const char *fmt, char *buf, char *end, void *ptr,
992 struct printf_spec spec) 851 struct printf_spec spec)
993{ 852{
994 if (!ptr) { 853 if (!ptr && *fmt != 'K') {
995 /* 854 /*
996 * Print (null) with the same width as a pointer so it makes 855 * Print (null) with the same width as a pointer so it makes
997 * tabular output look nice. 856 * tabular output look nice.
@@ -1047,16 +906,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1047 if (spec.field_width == -1) 906 if (spec.field_width == -1)
1048 spec.field_width = 2 * sizeof(void *); 907 spec.field_width = 2 * sizeof(void *);
1049 return string(buf, end, "pK-error", spec); 908 return string(buf, end, "pK-error", spec);
1050 } else if ((kptr_restrict == 0) ||
1051 (kptr_restrict == 1 &&
1052 has_capability_noaudit(current, CAP_SYSLOG)))
1053 break;
1054
1055 if (spec.field_width == -1) {
1056 spec.field_width = 2 * sizeof(void *);
1057 spec.flags |= ZEROPAD;
1058 } 909 }
1059 return number(buf, end, 0, spec); 910 if (!((kptr_restrict == 0) ||
911 (kptr_restrict == 1 &&
912 has_capability_noaudit(current, CAP_SYSLOG))))
913 ptr = NULL;
914 break;
1060 } 915 }
1061 spec.flags |= SMALL; 916 spec.flags |= SMALL;
1062 if (spec.field_width == -1) { 917 if (spec.field_width == -1) {
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index 46a31e5f49c3..d63381e8e333 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -176,6 +176,7 @@ int zlib_deflateInit2(
176 deflate_state *s; 176 deflate_state *s;
177 int noheader = 0; 177 int noheader = 0;
178 deflate_workspace *mem; 178 deflate_workspace *mem;
179 char *next;
179 180
180 ush *overlay; 181 ush *overlay;
181 /* We overlay pending_buf and d_buf+l_buf. This works since the average 182 /* We overlay pending_buf and d_buf+l_buf. This works since the average
@@ -199,6 +200,21 @@ int zlib_deflateInit2(
199 strategy < 0 || strategy > Z_HUFFMAN_ONLY) { 200 strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
200 return Z_STREAM_ERROR; 201 return Z_STREAM_ERROR;
201 } 202 }
203
204 /*
205 * Direct the workspace's pointers to the chunks that were allocated
206 * along with the deflate_workspace struct.
207 */
208 next = (char *) mem;
209 next += sizeof(*mem);
210 mem->window_memory = (Byte *) next;
211 next += zlib_deflate_window_memsize(windowBits);
212 mem->prev_memory = (Pos *) next;
213 next += zlib_deflate_prev_memsize(windowBits);
214 mem->head_memory = (Pos *) next;
215 next += zlib_deflate_head_memsize(memLevel);
216 mem->overlay_memory = next;
217
202 s = (deflate_state *) &(mem->deflate_memory); 218 s = (deflate_state *) &(mem->deflate_memory);
203 strm->state = (struct internal_state *)s; 219 strm->state = (struct internal_state *)s;
204 s->strm = strm; 220 s->strm = strm;
@@ -1247,7 +1263,18 @@ static block_state deflate_slow(
1247 return flush == Z_FINISH ? finish_done : block_done; 1263 return flush == Z_FINISH ? finish_done : block_done;
1248} 1264}
1249 1265
1250int zlib_deflate_workspacesize(void) 1266int zlib_deflate_workspacesize(int windowBits, int memLevel)
1251{ 1267{
1252 return sizeof(deflate_workspace); 1268 if (windowBits < 0) /* undocumented feature: suppress zlib header */
1269 windowBits = -windowBits;
1270
1271 /* Since the return value is typically passed to vmalloc() unchecked... */
1272 BUG_ON(memLevel < 1 || memLevel > MAX_MEM_LEVEL || windowBits < 9 ||
1273 windowBits > 15);
1274
1275 return sizeof(deflate_workspace)
1276 + zlib_deflate_window_memsize(windowBits)
1277 + zlib_deflate_prev_memsize(windowBits)
1278 + zlib_deflate_head_memsize(memLevel)
1279 + zlib_deflate_overlay_memsize(memLevel);
1253} 1280}
diff --git a/lib/zlib_deflate/defutil.h b/lib/zlib_deflate/defutil.h
index 6b15a909ca3f..b640b6402e99 100644
--- a/lib/zlib_deflate/defutil.h
+++ b/lib/zlib_deflate/defutil.h
@@ -241,12 +241,21 @@ typedef struct deflate_state {
241typedef struct deflate_workspace { 241typedef struct deflate_workspace {
242 /* State memory for the deflator */ 242 /* State memory for the deflator */
243 deflate_state deflate_memory; 243 deflate_state deflate_memory;
244 Byte window_memory[2 * (1 << MAX_WBITS)]; 244 Byte *window_memory;
245 Pos prev_memory[1 << MAX_WBITS]; 245 Pos *prev_memory;
246 Pos head_memory[1 << (MAX_MEM_LEVEL + 7)]; 246 Pos *head_memory;
247 char overlay_memory[(1 << (MAX_MEM_LEVEL + 6)) * (sizeof(ush)+2)]; 247 char *overlay_memory;
248} deflate_workspace; 248} deflate_workspace;
249 249
250#define zlib_deflate_window_memsize(windowBits) \
251 (2 * (1 << (windowBits)) * sizeof(Byte))
252#define zlib_deflate_prev_memsize(windowBits) \
253 ((1 << (windowBits)) * sizeof(Pos))
254#define zlib_deflate_head_memsize(memLevel) \
255 ((1 << ((memLevel)+7)) * sizeof(Pos))
256#define zlib_deflate_overlay_memsize(memLevel) \
257 ((1 << ((memLevel)+6)) * (sizeof(ush)+2))
258
250/* Output a byte on the stream. 259/* Output a byte on the stream.
251 * IN assertion: there is enough room in pending_buf. 260 * IN assertion: there is enough room in pending_buf.
252 */ 261 */
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index af7cfb43d2f0..8b1a477162dc 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -1,27 +1,24 @@
1config DEBUG_PAGEALLOC 1config DEBUG_PAGEALLOC
2 bool "Debug page memory allocations" 2 bool "Debug page memory allocations"
3 depends on DEBUG_KERNEL && ARCH_SUPPORTS_DEBUG_PAGEALLOC 3 depends on DEBUG_KERNEL
4 depends on !HIBERNATION || !PPC && !SPARC 4 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
5 depends on !KMEMCHECK 5 depends on !KMEMCHECK
6 select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
6 ---help--- 7 ---help---
7 Unmap pages from the kernel linear mapping after free_pages(). 8 Unmap pages from the kernel linear mapping after free_pages().
8 This results in a large slowdown, but helps to find certain types 9 This results in a large slowdown, but helps to find certain types
9 of memory corruption. 10 of memory corruption.
10 11
12 For architectures which don't enable ARCH_SUPPORTS_DEBUG_PAGEALLOC,
13 fill the pages with poison patterns after free_pages() and verify
14 the patterns before alloc_pages(). Additionally,
15 this option cannot be enabled in combination with hibernation as
16 that would result in incorrect warnings of memory corruption after
17 a resume because free pages are not saved to the suspend image.
18
11config WANT_PAGE_DEBUG_FLAGS 19config WANT_PAGE_DEBUG_FLAGS
12 bool 20 bool
13 21
14config PAGE_POISONING 22config PAGE_POISONING
15 bool "Debug page memory allocations" 23 bool
16 depends on DEBUG_KERNEL && !ARCH_SUPPORTS_DEBUG_PAGEALLOC
17 depends on !HIBERNATION
18 select DEBUG_PAGEALLOC
19 select WANT_PAGE_DEBUG_FLAGS 24 select WANT_PAGE_DEBUG_FLAGS
20 ---help---
21 Fill the pages with poison patterns after free_pages() and verify
22 the patterns before alloc_pages(). This results in a large slowdown,
23 but helps to find certain types of memory corruption.
24
25 This option cannot be enabled in combination with hibernation as
26 that would result in incorrect warnings of memory corruption after
27 a resume because free pages are not saved to the suspend image.
diff --git a/mm/compaction.c b/mm/compaction.c
index 8be430b812de..021a2960ef9e 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -42,8 +42,6 @@ struct compact_control {
42 unsigned int order; /* order a direct compactor needs */ 42 unsigned int order; /* order a direct compactor needs */
43 int migratetype; /* MOVABLE, RECLAIMABLE etc */ 43 int migratetype; /* MOVABLE, RECLAIMABLE etc */
44 struct zone *zone; 44 struct zone *zone;
45
46 int compact_mode;
47}; 45};
48 46
49static unsigned long release_freepages(struct list_head *freelist) 47static unsigned long release_freepages(struct list_head *freelist)
@@ -155,7 +153,6 @@ static void isolate_freepages(struct zone *zone,
155 * pages on cc->migratepages. We stop searching if the migrate 153 * pages on cc->migratepages. We stop searching if the migrate
156 * and free page scanners meet or enough free pages are isolated. 154 * and free page scanners meet or enough free pages are isolated.
157 */ 155 */
158 spin_lock_irqsave(&zone->lock, flags);
159 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 156 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
160 pfn -= pageblock_nr_pages) { 157 pfn -= pageblock_nr_pages) {
161 unsigned long isolated; 158 unsigned long isolated;
@@ -178,9 +175,19 @@ static void isolate_freepages(struct zone *zone,
178 if (!suitable_migration_target(page)) 175 if (!suitable_migration_target(page))
179 continue; 176 continue;
180 177
181 /* Found a block suitable for isolating free pages from */ 178 /*
182 isolated = isolate_freepages_block(zone, pfn, freelist); 179 * Found a block suitable for isolating free pages from. Now
183 nr_freepages += isolated; 180 * we disabled interrupts, double check things are ok and
181 * isolate the pages. This is to minimise the time IRQs
182 * are disabled
183 */
184 isolated = 0;
185 spin_lock_irqsave(&zone->lock, flags);
186 if (suitable_migration_target(page)) {
187 isolated = isolate_freepages_block(zone, pfn, freelist);
188 nr_freepages += isolated;
189 }
190 spin_unlock_irqrestore(&zone->lock, flags);
184 191
185 /* 192 /*
186 * Record the highest PFN we isolated pages from. When next 193 * Record the highest PFN we isolated pages from. When next
@@ -190,7 +197,6 @@ static void isolate_freepages(struct zone *zone,
190 if (isolated) 197 if (isolated)
191 high_pfn = max(high_pfn, pfn); 198 high_pfn = max(high_pfn, pfn);
192 } 199 }
193 spin_unlock_irqrestore(&zone->lock, flags);
194 200
195 /* split_free_page does not map the pages */ 201 /* split_free_page does not map the pages */
196 list_for_each_entry(page, freelist, lru) { 202 list_for_each_entry(page, freelist, lru) {
@@ -271,9 +277,27 @@ static unsigned long isolate_migratepages(struct zone *zone,
271 } 277 }
272 278
273 /* Time to isolate some pages for migration */ 279 /* Time to isolate some pages for migration */
280 cond_resched();
274 spin_lock_irq(&zone->lru_lock); 281 spin_lock_irq(&zone->lru_lock);
275 for (; low_pfn < end_pfn; low_pfn++) { 282 for (; low_pfn < end_pfn; low_pfn++) {
276 struct page *page; 283 struct page *page;
284 bool locked = true;
285
286 /* give a chance to irqs before checking need_resched() */
287 if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) {
288 spin_unlock_irq(&zone->lru_lock);
289 locked = false;
290 }
291 if (need_resched() || spin_is_contended(&zone->lru_lock)) {
292 if (locked)
293 spin_unlock_irq(&zone->lru_lock);
294 cond_resched();
295 spin_lock_irq(&zone->lru_lock);
296 if (fatal_signal_pending(current))
297 break;
298 } else if (!locked)
299 spin_lock_irq(&zone->lru_lock);
300
277 if (!pfn_valid_within(low_pfn)) 301 if (!pfn_valid_within(low_pfn))
278 continue; 302 continue;
279 nr_scanned++; 303 nr_scanned++;
@@ -397,10 +421,7 @@ static int compact_finished(struct zone *zone,
397 return COMPACT_COMPLETE; 421 return COMPACT_COMPLETE;
398 422
399 /* Compaction run is not finished if the watermark is not met */ 423 /* Compaction run is not finished if the watermark is not met */
400 if (cc->compact_mode != COMPACT_MODE_KSWAPD) 424 watermark = low_wmark_pages(zone);
401 watermark = low_wmark_pages(zone);
402 else
403 watermark = high_wmark_pages(zone);
404 watermark += (1 << cc->order); 425 watermark += (1 << cc->order);
405 426
406 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 427 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
@@ -413,15 +434,6 @@ static int compact_finished(struct zone *zone,
413 if (cc->order == -1) 434 if (cc->order == -1)
414 return COMPACT_CONTINUE; 435 return COMPACT_CONTINUE;
415 436
416 /*
417 * Generating only one page of the right order is not enough
418 * for kswapd, we must continue until we're above the high
419 * watermark as a pool for high order GFP_ATOMIC allocations
420 * too.
421 */
422 if (cc->compact_mode == COMPACT_MODE_KSWAPD)
423 return COMPACT_CONTINUE;
424
425 /* Direct compactor: Is a suitable page free? */ 437 /* Direct compactor: Is a suitable page free? */
426 for (order = cc->order; order < MAX_ORDER; order++) { 438 for (order = cc->order; order < MAX_ORDER; order++) {
427 /* Job done if page is free of the right migratetype */ 439 /* Job done if page is free of the right migratetype */
@@ -508,12 +520,13 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
508 520
509 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 521 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
510 unsigned long nr_migrate, nr_remaining; 522 unsigned long nr_migrate, nr_remaining;
523 int err;
511 524
512 if (!isolate_migratepages(zone, cc)) 525 if (!isolate_migratepages(zone, cc))
513 continue; 526 continue;
514 527
515 nr_migrate = cc->nr_migratepages; 528 nr_migrate = cc->nr_migratepages;
516 migrate_pages(&cc->migratepages, compaction_alloc, 529 err = migrate_pages(&cc->migratepages, compaction_alloc,
517 (unsigned long)cc, false, 530 (unsigned long)cc, false,
518 cc->sync); 531 cc->sync);
519 update_nr_listpages(cc); 532 update_nr_listpages(cc);
@@ -527,7 +540,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
527 nr_remaining); 540 nr_remaining);
528 541
529 /* Release LRU pages not migrated */ 542 /* Release LRU pages not migrated */
530 if (!list_empty(&cc->migratepages)) { 543 if (err) {
531 putback_lru_pages(&cc->migratepages); 544 putback_lru_pages(&cc->migratepages);
532 cc->nr_migratepages = 0; 545 cc->nr_migratepages = 0;
533 } 546 }
@@ -543,8 +556,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
543 556
544unsigned long compact_zone_order(struct zone *zone, 557unsigned long compact_zone_order(struct zone *zone,
545 int order, gfp_t gfp_mask, 558 int order, gfp_t gfp_mask,
546 bool sync, 559 bool sync)
547 int compact_mode)
548{ 560{
549 struct compact_control cc = { 561 struct compact_control cc = {
550 .nr_freepages = 0, 562 .nr_freepages = 0,
@@ -553,7 +565,6 @@ unsigned long compact_zone_order(struct zone *zone,
553 .migratetype = allocflags_to_migratetype(gfp_mask), 565 .migratetype = allocflags_to_migratetype(gfp_mask),
554 .zone = zone, 566 .zone = zone,
555 .sync = sync, 567 .sync = sync,
556 .compact_mode = compact_mode,
557 }; 568 };
558 INIT_LIST_HEAD(&cc.freepages); 569 INIT_LIST_HEAD(&cc.freepages);
559 INIT_LIST_HEAD(&cc.migratepages); 570 INIT_LIST_HEAD(&cc.migratepages);
@@ -599,8 +610,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
599 nodemask) { 610 nodemask) {
600 int status; 611 int status;
601 612
602 status = compact_zone_order(zone, order, gfp_mask, sync, 613 status = compact_zone_order(zone, order, gfp_mask, sync);
603 COMPACT_MODE_DIRECT_RECLAIM);
604 rc = max(status, rc); 614 rc = max(status, rc);
605 615
606 /* If a normal allocation would succeed, stop compacting */ 616 /* If a normal allocation would succeed, stop compacting */
@@ -631,7 +641,6 @@ static int compact_node(int nid)
631 .nr_freepages = 0, 641 .nr_freepages = 0,
632 .nr_migratepages = 0, 642 .nr_migratepages = 0,
633 .order = -1, 643 .order = -1,
634 .compact_mode = COMPACT_MODE_DIRECT_RECLAIM,
635 }; 644 };
636 645
637 zone = &pgdat->node_zones[zoneid]; 646 zone = &pgdat->node_zones[zoneid];
diff --git a/mm/filemap.c b/mm/filemap.c
index 83a45d35468b..f807afda86f2 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -108,11 +108,11 @@
108 */ 108 */
109 109
110/* 110/*
111 * Remove a page from the page cache and free it. Caller has to make 111 * Delete a page from the page cache and free it. Caller has to make
112 * sure the page is locked and that nobody else uses it - or that usage 112 * sure the page is locked and that nobody else uses it - or that usage
113 * is safe. The caller must hold the mapping's tree_lock. 113 * is safe. The caller must hold the mapping's tree_lock.
114 */ 114 */
115void __remove_from_page_cache(struct page *page) 115void __delete_from_page_cache(struct page *page)
116{ 116{
117 struct address_space *mapping = page->mapping; 117 struct address_space *mapping = page->mapping;
118 118
@@ -137,7 +137,15 @@ void __remove_from_page_cache(struct page *page)
137 } 137 }
138} 138}
139 139
140void remove_from_page_cache(struct page *page) 140/**
141 * delete_from_page_cache - delete page from page cache
142 * @page: the page which the kernel is trying to remove from page cache
143 *
144 * This must be called only on pages that have been verified to be in the page
145 * cache and locked. It will never put the page into the free list, the caller
146 * has a reference on the page.
147 */
148void delete_from_page_cache(struct page *page)
141{ 149{
142 struct address_space *mapping = page->mapping; 150 struct address_space *mapping = page->mapping;
143 void (*freepage)(struct page *); 151 void (*freepage)(struct page *);
@@ -146,14 +154,15 @@ void remove_from_page_cache(struct page *page)
146 154
147 freepage = mapping->a_ops->freepage; 155 freepage = mapping->a_ops->freepage;
148 spin_lock_irq(&mapping->tree_lock); 156 spin_lock_irq(&mapping->tree_lock);
149 __remove_from_page_cache(page); 157 __delete_from_page_cache(page);
150 spin_unlock_irq(&mapping->tree_lock); 158 spin_unlock_irq(&mapping->tree_lock);
151 mem_cgroup_uncharge_cache_page(page); 159 mem_cgroup_uncharge_cache_page(page);
152 160
153 if (freepage) 161 if (freepage)
154 freepage(page); 162 freepage(page);
163 page_cache_release(page);
155} 164}
156EXPORT_SYMBOL(remove_from_page_cache); 165EXPORT_SYMBOL(delete_from_page_cache);
157 166
158static int sync_page(void *word) 167static int sync_page(void *word)
159{ 168{
@@ -387,6 +396,76 @@ int filemap_write_and_wait_range(struct address_space *mapping,
387EXPORT_SYMBOL(filemap_write_and_wait_range); 396EXPORT_SYMBOL(filemap_write_and_wait_range);
388 397
389/** 398/**
399 * replace_page_cache_page - replace a pagecache page with a new one
400 * @old: page to be replaced
401 * @new: page to replace with
402 * @gfp_mask: allocation mode
403 *
404 * This function replaces a page in the pagecache with a new one. On
405 * success it acquires the pagecache reference for the new page and
406 * drops it for the old page. Both the old and new pages must be
407 * locked. This function does not add the new page to the LRU, the
408 * caller must do that.
409 *
410 * The remove + add is atomic. The only way this function can fail is
411 * memory allocation failure.
412 */
413int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
414{
415 int error;
416 struct mem_cgroup *memcg = NULL;
417
418 VM_BUG_ON(!PageLocked(old));
419 VM_BUG_ON(!PageLocked(new));
420 VM_BUG_ON(new->mapping);
421
422 /*
423 * This is not page migration, but prepare_migration and
424 * end_migration does enough work for charge replacement.
425 *
426 * In the longer term we probably want a specialized function
427 * for moving the charge from old to new in a more efficient
428 * manner.
429 */
430 error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
431 if (error)
432 return error;
433
434 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
435 if (!error) {
436 struct address_space *mapping = old->mapping;
437 void (*freepage)(struct page *);
438
439 pgoff_t offset = old->index;
440 freepage = mapping->a_ops->freepage;
441
442 page_cache_get(new);
443 new->mapping = mapping;
444 new->index = offset;
445
446 spin_lock_irq(&mapping->tree_lock);
447 __delete_from_page_cache(old);
448 error = radix_tree_insert(&mapping->page_tree, offset, new);
449 BUG_ON(error);
450 mapping->nrpages++;
451 __inc_zone_page_state(new, NR_FILE_PAGES);
452 if (PageSwapBacked(new))
453 __inc_zone_page_state(new, NR_SHMEM);
454 spin_unlock_irq(&mapping->tree_lock);
455 radix_tree_preload_end();
456 if (freepage)
457 freepage(old);
458 page_cache_release(old);
459 mem_cgroup_end_migration(memcg, old, new, true);
460 } else {
461 mem_cgroup_end_migration(memcg, old, new, false);
462 }
463
464 return error;
465}
466EXPORT_SYMBOL_GPL(replace_page_cache_page);
467
468/**
390 * add_to_page_cache_locked - add a locked page to the pagecache 469 * add_to_page_cache_locked - add a locked page to the pagecache
391 * @page: page to add 470 * @page: page to add
392 * @mapping: the page's address_space 471 * @mapping: the page's address_space
@@ -621,8 +700,10 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
621 __lock_page(page); 700 __lock_page(page);
622 return 1; 701 return 1;
623 } else { 702 } else {
624 up_read(&mm->mmap_sem); 703 if (!(flags & FAULT_FLAG_RETRY_NOWAIT)) {
625 wait_on_page_locked(page); 704 up_read(&mm->mmap_sem);
705 wait_on_page_locked(page);
706 }
626 return 0; 707 return 0;
627 } 708 }
628} 709}
@@ -782,9 +863,13 @@ repeat:
782 page = radix_tree_deref_slot((void **)pages[i]); 863 page = radix_tree_deref_slot((void **)pages[i]);
783 if (unlikely(!page)) 864 if (unlikely(!page))
784 continue; 865 continue;
866
867 /*
868 * This can only trigger when the entry at index 0 moves out
869 * of or back to the root: none yet gotten, safe to restart.
870 */
785 if (radix_tree_deref_retry(page)) { 871 if (radix_tree_deref_retry(page)) {
786 if (ret) 872 WARN_ON(start | i);
787 start = pages[ret-1]->index;
788 goto restart; 873 goto restart;
789 } 874 }
790 875
@@ -800,6 +885,13 @@ repeat:
800 pages[ret] = page; 885 pages[ret] = page;
801 ret++; 886 ret++;
802 } 887 }
888
889 /*
890 * If all entries were removed before we could secure them,
891 * try again, because callers stop trying once 0 is returned.
892 */
893 if (unlikely(!ret && nr_found))
894 goto restart;
803 rcu_read_unlock(); 895 rcu_read_unlock();
804 return ret; 896 return ret;
805} 897}
@@ -834,6 +926,11 @@ repeat:
834 page = radix_tree_deref_slot((void **)pages[i]); 926 page = radix_tree_deref_slot((void **)pages[i]);
835 if (unlikely(!page)) 927 if (unlikely(!page))
836 continue; 928 continue;
929
930 /*
931 * This can only trigger when the entry at index 0 moves out
932 * of or back to the root: none yet gotten, safe to restart.
933 */
837 if (radix_tree_deref_retry(page)) 934 if (radix_tree_deref_retry(page))
838 goto restart; 935 goto restart;
839 936
@@ -894,6 +991,11 @@ repeat:
894 page = radix_tree_deref_slot((void **)pages[i]); 991 page = radix_tree_deref_slot((void **)pages[i]);
895 if (unlikely(!page)) 992 if (unlikely(!page))
896 continue; 993 continue;
994
995 /*
996 * This can only trigger when the entry at index 0 moves out
997 * of or back to the root: none yet gotten, safe to restart.
998 */
897 if (radix_tree_deref_retry(page)) 999 if (radix_tree_deref_retry(page))
898 goto restart; 1000 goto restart;
899 1001
@@ -909,6 +1011,13 @@ repeat:
909 pages[ret] = page; 1011 pages[ret] = page;
910 ret++; 1012 ret++;
911 } 1013 }
1014
1015 /*
1016 * If all entries were removed before we could secure them,
1017 * try again, because callers stop trying once 0 is returned.
1018 */
1019 if (unlikely(!ret && nr_found))
1020 goto restart;
912 rcu_read_unlock(); 1021 rcu_read_unlock();
913 1022
914 if (ret) 1023 if (ret)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 113e35c47502..0a619e0e2e0b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -643,23 +643,24 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
643 return ret; 643 return ret;
644} 644}
645 645
646static inline gfp_t alloc_hugepage_gfpmask(int defrag) 646static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
647{ 647{
648 return GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT); 648 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
649} 649}
650 650
651static inline struct page *alloc_hugepage_vma(int defrag, 651static inline struct page *alloc_hugepage_vma(int defrag,
652 struct vm_area_struct *vma, 652 struct vm_area_struct *vma,
653 unsigned long haddr, int nd) 653 unsigned long haddr, int nd,
654 gfp_t extra_gfp)
654{ 655{
655 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), 656 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
656 HPAGE_PMD_ORDER, vma, haddr, nd); 657 HPAGE_PMD_ORDER, vma, haddr, nd);
657} 658}
658 659
659#ifndef CONFIG_NUMA 660#ifndef CONFIG_NUMA
660static inline struct page *alloc_hugepage(int defrag) 661static inline struct page *alloc_hugepage(int defrag)
661{ 662{
662 return alloc_pages(alloc_hugepage_gfpmask(defrag), 663 return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
663 HPAGE_PMD_ORDER); 664 HPAGE_PMD_ORDER);
664} 665}
665#endif 666#endif
@@ -678,7 +679,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
678 if (unlikely(khugepaged_enter(vma))) 679 if (unlikely(khugepaged_enter(vma)))
679 return VM_FAULT_OOM; 680 return VM_FAULT_OOM;
680 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 681 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
681 vma, haddr, numa_node_id()); 682 vma, haddr, numa_node_id(), 0);
682 if (unlikely(!page)) 683 if (unlikely(!page))
683 goto out; 684 goto out;
684 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { 685 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
@@ -799,7 +800,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
799 } 800 }
800 801
801 for (i = 0; i < HPAGE_PMD_NR; i++) { 802 for (i = 0; i < HPAGE_PMD_NR; i++) {
802 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, 803 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
804 __GFP_OTHER_NODE,
803 vma, address, page_to_nid(page)); 805 vma, address, page_to_nid(page));
804 if (unlikely(!pages[i] || 806 if (unlikely(!pages[i] ||
805 mem_cgroup_newpage_charge(pages[i], mm, 807 mem_cgroup_newpage_charge(pages[i], mm,
@@ -902,7 +904,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
902 if (transparent_hugepage_enabled(vma) && 904 if (transparent_hugepage_enabled(vma) &&
903 !transparent_hugepage_debug_cow()) 905 !transparent_hugepage_debug_cow())
904 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 906 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
905 vma, haddr, numa_node_id()); 907 vma, haddr, numa_node_id(), 0);
906 else 908 else
907 new_page = NULL; 909 new_page = NULL;
908 910
@@ -1779,7 +1781,7 @@ static void collapse_huge_page(struct mm_struct *mm,
1779 * scalability. 1781 * scalability.
1780 */ 1782 */
1781 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, 1783 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
1782 node); 1784 node, __GFP_OTHER_NODE);
1783 if (unlikely(!new_page)) { 1785 if (unlikely(!new_page)) {
1784 up_read(&mm->mmap_sem); 1786 up_read(&mm->mmap_sem);
1785 *hpage = ERR_PTR(-ENOMEM); 1787 *hpage = ERR_PTR(-ENOMEM);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bb0b7c128015..06de5aa4d644 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1872,8 +1872,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1872 unsigned long tmp; 1872 unsigned long tmp;
1873 int ret; 1873 int ret;
1874 1874
1875 if (!write) 1875 tmp = h->max_huge_pages;
1876 tmp = h->max_huge_pages;
1877 1876
1878 if (write && h->order >= MAX_ORDER) 1877 if (write && h->order >= MAX_ORDER)
1879 return -EINVAL; 1878 return -EINVAL;
@@ -1938,8 +1937,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
1938 unsigned long tmp; 1937 unsigned long tmp;
1939 int ret; 1938 int ret;
1940 1939
1941 if (!write) 1940 tmp = h->nr_overcommit_huge_pages;
1942 tmp = h->nr_overcommit_huge_pages;
1943 1941
1944 if (write && h->order >= MAX_ORDER) 1942 if (write && h->order >= MAX_ORDER)
1945 return -EINVAL; 1943 return -EINVAL;
diff --git a/mm/ksm.c b/mm/ksm.c
index c2b2a94f9d67..1bbe785aa559 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -301,20 +301,6 @@ static inline int in_stable_tree(struct rmap_item *rmap_item)
301 return rmap_item->address & STABLE_FLAG; 301 return rmap_item->address & STABLE_FLAG;
302} 302}
303 303
304static void hold_anon_vma(struct rmap_item *rmap_item,
305 struct anon_vma *anon_vma)
306{
307 rmap_item->anon_vma = anon_vma;
308 get_anon_vma(anon_vma);
309}
310
311static void ksm_drop_anon_vma(struct rmap_item *rmap_item)
312{
313 struct anon_vma *anon_vma = rmap_item->anon_vma;
314
315 drop_anon_vma(anon_vma);
316}
317
318/* 304/*
319 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's 305 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
320 * page tables after it has passed through ksm_exit() - which, if necessary, 306 * page tables after it has passed through ksm_exit() - which, if necessary,
@@ -397,7 +383,7 @@ static void break_cow(struct rmap_item *rmap_item)
397 * It is not an accident that whenever we want to break COW 383 * It is not an accident that whenever we want to break COW
398 * to undo, we also need to drop a reference to the anon_vma. 384 * to undo, we also need to drop a reference to the anon_vma.
399 */ 385 */
400 ksm_drop_anon_vma(rmap_item); 386 put_anon_vma(rmap_item->anon_vma);
401 387
402 down_read(&mm->mmap_sem); 388 down_read(&mm->mmap_sem);
403 if (ksm_test_exit(mm)) 389 if (ksm_test_exit(mm))
@@ -466,7 +452,7 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node)
466 ksm_pages_sharing--; 452 ksm_pages_sharing--;
467 else 453 else
468 ksm_pages_shared--; 454 ksm_pages_shared--;
469 ksm_drop_anon_vma(rmap_item); 455 put_anon_vma(rmap_item->anon_vma);
470 rmap_item->address &= PAGE_MASK; 456 rmap_item->address &= PAGE_MASK;
471 cond_resched(); 457 cond_resched();
472 } 458 }
@@ -554,7 +540,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
554 else 540 else
555 ksm_pages_shared--; 541 ksm_pages_shared--;
556 542
557 ksm_drop_anon_vma(rmap_item); 543 put_anon_vma(rmap_item->anon_vma);
558 rmap_item->address &= PAGE_MASK; 544 rmap_item->address &= PAGE_MASK;
559 545
560 } else if (rmap_item->address & UNSTABLE_FLAG) { 546 } else if (rmap_item->address & UNSTABLE_FLAG) {
@@ -949,7 +935,8 @@ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
949 goto out; 935 goto out;
950 936
951 /* Must get reference to anon_vma while still holding mmap_sem */ 937 /* Must get reference to anon_vma while still holding mmap_sem */
952 hold_anon_vma(rmap_item, vma->anon_vma); 938 rmap_item->anon_vma = vma->anon_vma;
939 get_anon_vma(vma->anon_vma);
953out: 940out:
954 up_read(&mm->mmap_sem); 941 up_read(&mm->mmap_sem);
955 return err; 942 return err;
diff --git a/mm/memblock.c b/mm/memblock.c
index 4618fda975a0..a0562d1a6ad4 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -58,28 +58,6 @@ static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, p
58 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 58 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
59} 59}
60 60
61static long __init_memblock memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1,
62 phys_addr_t base2, phys_addr_t size2)
63{
64 if (base2 == base1 + size1)
65 return 1;
66 else if (base1 == base2 + size2)
67 return -1;
68
69 return 0;
70}
71
72static long __init_memblock memblock_regions_adjacent(struct memblock_type *type,
73 unsigned long r1, unsigned long r2)
74{
75 phys_addr_t base1 = type->regions[r1].base;
76 phys_addr_t size1 = type->regions[r1].size;
77 phys_addr_t base2 = type->regions[r2].base;
78 phys_addr_t size2 = type->regions[r2].size;
79
80 return memblock_addrs_adjacent(base1, size1, base2, size2);
81}
82
83long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) 61long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
84{ 62{
85 unsigned long i; 63 unsigned long i;
@@ -206,14 +184,13 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
206 type->regions[i].size = type->regions[i + 1].size; 184 type->regions[i].size = type->regions[i + 1].size;
207 } 185 }
208 type->cnt--; 186 type->cnt--;
209}
210 187
211/* Assumption: base addr of region 1 < base addr of region 2 */ 188 /* Special case for empty arrays */
212static void __init_memblock memblock_coalesce_regions(struct memblock_type *type, 189 if (type->cnt == 0) {
213 unsigned long r1, unsigned long r2) 190 type->cnt = 1;
214{ 191 type->regions[0].base = 0;
215 type->regions[r1].size += type->regions[r2].size; 192 type->regions[0].size = 0;
216 memblock_remove_region(type, r2); 193 }
217} 194}
218 195
219/* Defined below but needed now */ 196/* Defined below but needed now */
@@ -276,7 +253,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
276 return 0; 253 return 0;
277 254
278 /* Add the new reserved region now. Should not fail ! */ 255 /* Add the new reserved region now. Should not fail ! */
279 BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0); 256 BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size));
280 257
281 /* If the array wasn't our static init one, then free it. We only do 258 /* If the array wasn't our static init one, then free it. We only do
282 * that before SLAB is available as later on, we don't know whether 259 * that before SLAB is available as later on, we don't know whether
@@ -296,58 +273,99 @@ extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1
296 return 1; 273 return 1;
297} 274}
298 275
299static long __init_memblock memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) 276static long __init_memblock memblock_add_region(struct memblock_type *type,
277 phys_addr_t base, phys_addr_t size)
300{ 278{
301 unsigned long coalesced = 0; 279 phys_addr_t end = base + size;
302 long adjacent, i; 280 int i, slot = -1;
303
304 if ((type->cnt == 1) && (type->regions[0].size == 0)) {
305 type->regions[0].base = base;
306 type->regions[0].size = size;
307 return 0;
308 }
309 281
310 /* First try and coalesce this MEMBLOCK with another. */ 282 /* First try and coalesce this MEMBLOCK with others */
311 for (i = 0; i < type->cnt; i++) { 283 for (i = 0; i < type->cnt; i++) {
312 phys_addr_t rgnbase = type->regions[i].base; 284 struct memblock_region *rgn = &type->regions[i];
313 phys_addr_t rgnsize = type->regions[i].size; 285 phys_addr_t rend = rgn->base + rgn->size;
286
287 /* Exit if there's no possible hits */
288 if (rgn->base > end || rgn->size == 0)
289 break;
314 290
315 if ((rgnbase == base) && (rgnsize == size)) 291 /* Check if we are fully enclosed within an existing
316 /* Already have this region, so we're done */ 292 * block
293 */
294 if (rgn->base <= base && rend >= end)
317 return 0; 295 return 0;
318 296
319 adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize); 297 /* Check if we overlap or are adjacent with the bottom
320 /* Check if arch allows coalescing */ 298 * of a block.
321 if (adjacent != 0 && type == &memblock.memory && 299 */
322 !memblock_memory_can_coalesce(base, size, rgnbase, rgnsize)) 300 if (base < rgn->base && end >= rgn->base) {
323 break; 301 /* If we can't coalesce, create a new block */
324 if (adjacent > 0) { 302 if (!memblock_memory_can_coalesce(base, size,
325 type->regions[i].base -= size; 303 rgn->base,
326 type->regions[i].size += size; 304 rgn->size)) {
327 coalesced++; 305 /* Overlap & can't coalesce are mutually
328 break; 306 * exclusive, if you do that, be prepared
329 } else if (adjacent < 0) { 307 * for trouble
330 type->regions[i].size += size; 308 */
331 coalesced++; 309 WARN_ON(end != rgn->base);
332 break; 310 goto new_block;
311 }
312 /* We extend the bottom of the block down to our
313 * base
314 */
315 rgn->base = base;
316 rgn->size = rend - base;
317
318 /* Return if we have nothing else to allocate
319 * (fully coalesced)
320 */
321 if (rend >= end)
322 return 0;
323
324 /* We continue processing from the end of the
325 * coalesced block.
326 */
327 base = rend;
328 size = end - base;
329 }
330
331 /* Now check if we overlap or are adjacent with the
332 * top of a block
333 */
334 if (base <= rend && end >= rend) {
335 /* If we can't coalesce, create a new block */
336 if (!memblock_memory_can_coalesce(rgn->base,
337 rgn->size,
338 base, size)) {
339 /* Overlap & can't coalesce are mutually
340 * exclusive, if you do that, be prepared
341 * for trouble
342 */
343 WARN_ON(rend != base);
344 goto new_block;
345 }
346 /* We adjust our base down to enclose the
347 * original block and destroy it. It will be
348 * part of our new allocation. Since we've
349 * freed an entry, we know we won't fail
350 * to allocate one later, so we won't risk
351 * losing the original block allocation.
352 */
353 size += (base - rgn->base);
354 base = rgn->base;
355 memblock_remove_region(type, i--);
333 } 356 }
334 } 357 }
335 358
336 /* If we plugged a hole, we may want to also coalesce with the 359 /* If the array is empty, special case, replace the fake
337 * next region 360 * filler region and return
338 */ 361 */
339 if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1) && 362 if ((type->cnt == 1) && (type->regions[0].size == 0)) {
340 ((type != &memblock.memory || memblock_memory_can_coalesce(type->regions[i].base, 363 type->regions[0].base = base;
341 type->regions[i].size, 364 type->regions[0].size = size;
342 type->regions[i+1].base, 365 return 0;
343 type->regions[i+1].size)))) {
344 memblock_coalesce_regions(type, i, i+1);
345 coalesced++;
346 } 366 }
347 367
348 if (coalesced) 368 new_block:
349 return coalesced;
350
351 /* If we are out of space, we fail. It's too late to resize the array 369 /* If we are out of space, we fail. It's too late to resize the array
352 * but then this shouldn't have happened in the first place. 370 * but then this shouldn't have happened in the first place.
353 */ 371 */
@@ -362,13 +380,14 @@ static long __init_memblock memblock_add_region(struct memblock_type *type, phys
362 } else { 380 } else {
363 type->regions[i+1].base = base; 381 type->regions[i+1].base = base;
364 type->regions[i+1].size = size; 382 type->regions[i+1].size = size;
383 slot = i + 1;
365 break; 384 break;
366 } 385 }
367 } 386 }
368
369 if (base < type->regions[0].base) { 387 if (base < type->regions[0].base) {
370 type->regions[0].base = base; 388 type->regions[0].base = base;
371 type->regions[0].size = size; 389 type->regions[0].size = size;
390 slot = 0;
372 } 391 }
373 type->cnt++; 392 type->cnt++;
374 393
@@ -376,7 +395,8 @@ static long __init_memblock memblock_add_region(struct memblock_type *type, phys
376 * our allocation and return an error 395 * our allocation and return an error
377 */ 396 */
378 if (type->cnt == type->max && memblock_double_array(type)) { 397 if (type->cnt == type->max && memblock_double_array(type)) {
379 type->cnt--; 398 BUG_ON(slot < 0);
399 memblock_remove_region(type, slot);
380 return -1; 400 return -1;
381 } 401 }
382 402
@@ -389,52 +409,55 @@ long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
389 409
390} 410}
391 411
392static long __init_memblock __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size) 412static long __init_memblock __memblock_remove(struct memblock_type *type,
413 phys_addr_t base, phys_addr_t size)
393{ 414{
394 phys_addr_t rgnbegin, rgnend;
395 phys_addr_t end = base + size; 415 phys_addr_t end = base + size;
396 int i; 416 int i;
397 417
398 rgnbegin = rgnend = 0; /* supress gcc warnings */ 418 /* Walk through the array for collisions */
399 419 for (i = 0; i < type->cnt; i++) {
400 /* Find the region where (base, size) belongs to */ 420 struct memblock_region *rgn = &type->regions[i];
401 for (i=0; i < type->cnt; i++) { 421 phys_addr_t rend = rgn->base + rgn->size;
402 rgnbegin = type->regions[i].base;
403 rgnend = rgnbegin + type->regions[i].size;
404 422
405 if ((rgnbegin <= base) && (end <= rgnend)) 423 /* Nothing more to do, exit */
424 if (rgn->base > end || rgn->size == 0)
406 break; 425 break;
407 }
408 426
409 /* Didn't find the region */ 427 /* If we fully enclose the block, drop it */
410 if (i == type->cnt) 428 if (base <= rgn->base && end >= rend) {
411 return -1; 429 memblock_remove_region(type, i--);
430 continue;
431 }
412 432
413 /* Check to see if we are removing entire region */ 433 /* If we are fully enclosed within a block
414 if ((rgnbegin == base) && (rgnend == end)) { 434 * then we need to split it and we are done
415 memblock_remove_region(type, i); 435 */
416 return 0; 436 if (base > rgn->base && end < rend) {
417 } 437 rgn->size = base - rgn->base;
438 if (!memblock_add_region(type, end, rend - end))
439 return 0;
440 /* Failure to split is bad, we at least
441 * restore the block before erroring
442 */
443 rgn->size = rend - rgn->base;
444 WARN_ON(1);
445 return -1;
446 }
418 447
419 /* Check to see if region is matching at the front */ 448 /* Check if we need to trim the bottom of a block */
420 if (rgnbegin == base) { 449 if (rgn->base < end && rend > end) {
421 type->regions[i].base = end; 450 rgn->size -= end - rgn->base;
422 type->regions[i].size -= size; 451 rgn->base = end;
423 return 0; 452 break;
424 } 453 }
425 454
426 /* Check to see if the region is matching at the end */ 455 /* And check if we need to trim the top of a block */
427 if (rgnend == end) { 456 if (base < rend)
428 type->regions[i].size -= size; 457 rgn->size -= rend - base;
429 return 0;
430 }
431 458
432 /* 459 }
433 * We need to split the entry - adjust the current one to the 460 return 0;
434 * beginging of the hole and add the region after hole.
435 */
436 type->regions[i].size = base - type->regions[i].base;
437 return memblock_add_region(type, end, rgnend - end);
438} 461}
439 462
440long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 463long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
@@ -467,7 +490,7 @@ phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, ph
467 490
468 found = memblock_find_base(size, align, 0, max_addr); 491 found = memblock_find_base(size, align, 0, max_addr);
469 if (found != MEMBLOCK_ERROR && 492 if (found != MEMBLOCK_ERROR &&
470 memblock_add_region(&memblock.reserved, found, size) >= 0) 493 !memblock_add_region(&memblock.reserved, found, size))
471 return found; 494 return found;
472 495
473 return 0; 496 return 0;
@@ -548,7 +571,7 @@ static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
548 if (this_nid == nid) { 571 if (this_nid == nid) {
549 phys_addr_t ret = memblock_find_region(start, this_end, size, align); 572 phys_addr_t ret = memblock_find_region(start, this_end, size, align);
550 if (ret != MEMBLOCK_ERROR && 573 if (ret != MEMBLOCK_ERROR &&
551 memblock_add_region(&memblock.reserved, ret, size) >= 0) 574 !memblock_add_region(&memblock.reserved, ret, size))
552 return ret; 575 return ret;
553 } 576 }
554 start = this_end; 577 start = this_end;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index da53a252b259..e1ee6ad9c971 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -829,6 +829,32 @@ void mem_cgroup_del_lru(struct page *page)
829 mem_cgroup_del_lru_list(page, page_lru(page)); 829 mem_cgroup_del_lru_list(page, page_lru(page));
830} 830}
831 831
832/*
833 * Writeback is about to end against a page which has been marked for immediate
834 * reclaim. If it still appears to be reclaimable, move it to the tail of the
835 * inactive list.
836 */
837void mem_cgroup_rotate_reclaimable_page(struct page *page)
838{
839 struct mem_cgroup_per_zone *mz;
840 struct page_cgroup *pc;
841 enum lru_list lru = page_lru(page);
842
843 if (mem_cgroup_disabled())
844 return;
845
846 pc = lookup_page_cgroup(page);
847 /* unused or root page is not rotated. */
848 if (!PageCgroupUsed(pc))
849 return;
850 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
851 smp_rmb();
852 if (mem_cgroup_is_root(pc->mem_cgroup))
853 return;
854 mz = page_cgroup_zoneinfo(pc);
855 list_move_tail(&pc->lru, &mz->lists[lru]);
856}
857
832void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) 858void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
833{ 859{
834 struct mem_cgroup_per_zone *mz; 860 struct mem_cgroup_per_zone *mz;
@@ -2883,7 +2909,7 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2883 * page belongs to. 2909 * page belongs to.
2884 */ 2910 */
2885int mem_cgroup_prepare_migration(struct page *page, 2911int mem_cgroup_prepare_migration(struct page *page,
2886 struct page *newpage, struct mem_cgroup **ptr) 2912 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
2887{ 2913{
2888 struct page_cgroup *pc; 2914 struct page_cgroup *pc;
2889 struct mem_cgroup *mem = NULL; 2915 struct mem_cgroup *mem = NULL;
@@ -2940,7 +2966,7 @@ int mem_cgroup_prepare_migration(struct page *page,
2940 return 0; 2966 return 0;
2941 2967
2942 *ptr = mem; 2968 *ptr = mem;
2943 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false, PAGE_SIZE); 2969 ret = __mem_cgroup_try_charge(NULL, gfp_mask, ptr, false, PAGE_SIZE);
2944 css_put(&mem->css);/* drop extra refcnt */ 2970 css_put(&mem->css);/* drop extra refcnt */
2945 if (ret || *ptr == NULL) { 2971 if (ret || *ptr == NULL) {
2946 if (PageAnon(page)) { 2972 if (PageAnon(page)) {
@@ -4737,7 +4763,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4737 pte_t *pte; 4763 pte_t *pte;
4738 spinlock_t *ptl; 4764 spinlock_t *ptl;
4739 4765
4740 VM_BUG_ON(pmd_trans_huge(*pmd)); 4766 split_huge_page_pmd(walk->mm, pmd);
4767
4741 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4768 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4742 for (; addr != end; pte++, addr += PAGE_SIZE) 4769 for (; addr != end; pte++, addr += PAGE_SIZE)
4743 if (is_target_pte_for_mc(vma, addr, *pte, NULL)) 4770 if (is_target_pte_for_mc(vma, addr, *pte, NULL))
@@ -4899,8 +4926,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4899 pte_t *pte; 4926 pte_t *pte;
4900 spinlock_t *ptl; 4927 spinlock_t *ptl;
4901 4928
4929 split_huge_page_pmd(walk->mm, pmd);
4902retry: 4930retry:
4903 VM_BUG_ON(pmd_trans_huge(*pmd));
4904 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 4931 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4905 for (; addr != end; addr += PAGE_SIZE) { 4932 for (; addr != end; addr += PAGE_SIZE) {
4906 pte_t ptent = *(pte++); 4933 pte_t ptent = *(pte++);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 99ccb4472623..e0af336530c6 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1130,7 +1130,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
1130 1130
1131 /* 1131 /*
1132 * Now take care of user space mappings. 1132 * Now take care of user space mappings.
1133 * Abort on fail: __remove_from_page_cache() assumes unmapped page. 1133 * Abort on fail: __delete_from_page_cache() assumes unmapped page.
1134 */ 1134 */
1135 if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) { 1135 if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
1136 printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn); 1136 printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
diff --git a/mm/memory.c b/mm/memory.c
index e48945ab362b..615be5127ce1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1569,6 +1569,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1569 fault_flags |= FAULT_FLAG_WRITE; 1569 fault_flags |= FAULT_FLAG_WRITE;
1570 if (nonblocking) 1570 if (nonblocking)
1571 fault_flags |= FAULT_FLAG_ALLOW_RETRY; 1571 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
1572 if (foll_flags & FOLL_NOWAIT)
1573 fault_flags |= (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT);
1572 1574
1573 ret = handle_mm_fault(mm, vma, start, 1575 ret = handle_mm_fault(mm, vma, start,
1574 fault_flags); 1576 fault_flags);
@@ -1595,7 +1597,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1595 tsk->min_flt++; 1597 tsk->min_flt++;
1596 1598
1597 if (ret & VM_FAULT_RETRY) { 1599 if (ret & VM_FAULT_RETRY) {
1598 *nonblocking = 0; 1600 if (nonblocking)
1601 *nonblocking = 0;
1599 return i; 1602 return i;
1600 } 1603 }
1601 1604
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 78062ab641ff..959a8b8c7350 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1979,8 +1979,7 @@ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1979 case MPOL_INTERLEAVE: 1979 case MPOL_INTERLEAVE:
1980 return nodes_equal(a->v.nodes, b->v.nodes); 1980 return nodes_equal(a->v.nodes, b->v.nodes);
1981 case MPOL_PREFERRED: 1981 case MPOL_PREFERRED:
1982 return a->v.preferred_node == b->v.preferred_node && 1982 return a->v.preferred_node == b->v.preferred_node;
1983 a->flags == b->flags;
1984 default: 1983 default:
1985 BUG(); 1984 BUG();
1986 return 0; 1985 return 0;
diff --git a/mm/migrate.c b/mm/migrate.c
index 352de555626c..89e5c3fe8bbc 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -564,7 +564,7 @@ static int fallback_migrate_page(struct address_space *mapping,
564 * == 0 - success 564 * == 0 - success
565 */ 565 */
566static int move_to_new_page(struct page *newpage, struct page *page, 566static int move_to_new_page(struct page *newpage, struct page *page,
567 int remap_swapcache) 567 int remap_swapcache, bool sync)
568{ 568{
569 struct address_space *mapping; 569 struct address_space *mapping;
570 int rc; 570 int rc;
@@ -586,18 +586,28 @@ static int move_to_new_page(struct page *newpage, struct page *page,
586 mapping = page_mapping(page); 586 mapping = page_mapping(page);
587 if (!mapping) 587 if (!mapping)
588 rc = migrate_page(mapping, newpage, page); 588 rc = migrate_page(mapping, newpage, page);
589 else if (mapping->a_ops->migratepage) 589 else {
590 /* 590 /*
591 * Most pages have a mapping and most filesystems 591 * Do not writeback pages if !sync and migratepage is
592 * should provide a migration function. Anonymous 592 * not pointing to migrate_page() which is nonblocking
593 * pages are part of swap space which also has its 593 * (swapcache/tmpfs uses migratepage = migrate_page).
594 * own migration function. This is the most common
595 * path for page migration.
596 */ 594 */
597 rc = mapping->a_ops->migratepage(mapping, 595 if (PageDirty(page) && !sync &&
598 newpage, page); 596 mapping->a_ops->migratepage != migrate_page)
599 else 597 rc = -EBUSY;
600 rc = fallback_migrate_page(mapping, newpage, page); 598 else if (mapping->a_ops->migratepage)
599 /*
600 * Most pages have a mapping and most filesystems
601 * should provide a migration function. Anonymous
602 * pages are part of swap space which also has its
603 * own migration function. This is the most common
604 * path for page migration.
605 */
606 rc = mapping->a_ops->migratepage(mapping,
607 newpage, page);
608 else
609 rc = fallback_migrate_page(mapping, newpage, page);
610 }
601 611
602 if (rc) { 612 if (rc) {
603 newpage->mapping = NULL; 613 newpage->mapping = NULL;
@@ -641,7 +651,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
641 rc = -EAGAIN; 651 rc = -EAGAIN;
642 652
643 if (!trylock_page(page)) { 653 if (!trylock_page(page)) {
644 if (!force) 654 if (!force || !sync)
645 goto move_newpage; 655 goto move_newpage;
646 656
647 /* 657 /*
@@ -678,7 +688,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
678 } 688 }
679 689
680 /* charge against new page */ 690 /* charge against new page */
681 charge = mem_cgroup_prepare_migration(page, newpage, &mem); 691 charge = mem_cgroup_prepare_migration(page, newpage, &mem, GFP_KERNEL);
682 if (charge == -ENOMEM) { 692 if (charge == -ENOMEM) {
683 rc = -ENOMEM; 693 rc = -ENOMEM;
684 goto unlock; 694 goto unlock;
@@ -686,7 +696,15 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
686 BUG_ON(charge); 696 BUG_ON(charge);
687 697
688 if (PageWriteback(page)) { 698 if (PageWriteback(page)) {
689 if (!force || !sync) 699 /*
700 * For !sync, there is no point retrying as the retry loop
701 * is expected to be too short for PageWriteback to be cleared
702 */
703 if (!sync) {
704 rc = -EBUSY;
705 goto uncharge;
706 }
707 if (!force)
690 goto uncharge; 708 goto uncharge;
691 wait_on_page_writeback(page); 709 wait_on_page_writeback(page);
692 } 710 }
@@ -757,14 +775,14 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
757 775
758skip_unmap: 776skip_unmap:
759 if (!page_mapped(page)) 777 if (!page_mapped(page))
760 rc = move_to_new_page(newpage, page, remap_swapcache); 778 rc = move_to_new_page(newpage, page, remap_swapcache, sync);
761 779
762 if (rc && remap_swapcache) 780 if (rc && remap_swapcache)
763 remove_migration_ptes(page, page); 781 remove_migration_ptes(page, page);
764 782
765 /* Drop an anon_vma reference if we took one */ 783 /* Drop an anon_vma reference if we took one */
766 if (anon_vma) 784 if (anon_vma)
767 drop_anon_vma(anon_vma); 785 put_anon_vma(anon_vma);
768 786
769uncharge: 787uncharge:
770 if (!charge) 788 if (!charge)
@@ -850,13 +868,13 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
850 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 868 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
851 869
852 if (!page_mapped(hpage)) 870 if (!page_mapped(hpage))
853 rc = move_to_new_page(new_hpage, hpage, 1); 871 rc = move_to_new_page(new_hpage, hpage, 1, sync);
854 872
855 if (rc) 873 if (rc)
856 remove_migration_ptes(hpage, hpage); 874 remove_migration_ptes(hpage, hpage);
857 875
858 if (anon_vma) 876 if (anon_vma)
859 drop_anon_vma(anon_vma); 877 put_anon_vma(anon_vma);
860out: 878out:
861 unlock_page(hpage); 879 unlock_page(hpage);
862 880
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 7dcca55ede7c..3100bc57036b 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -31,6 +31,7 @@
31#include <linux/memcontrol.h> 31#include <linux/memcontrol.h>
32#include <linux/mempolicy.h> 32#include <linux/mempolicy.h>
33#include <linux/security.h> 33#include <linux/security.h>
34#include <linux/ptrace.h>
34 35
35int sysctl_panic_on_oom; 36int sysctl_panic_on_oom;
36int sysctl_oom_kill_allocating_task; 37int sysctl_oom_kill_allocating_task;
@@ -292,13 +293,15 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
292 unsigned long totalpages, struct mem_cgroup *mem, 293 unsigned long totalpages, struct mem_cgroup *mem,
293 const nodemask_t *nodemask) 294 const nodemask_t *nodemask)
294{ 295{
295 struct task_struct *p; 296 struct task_struct *g, *p;
296 struct task_struct *chosen = NULL; 297 struct task_struct *chosen = NULL;
297 *ppoints = 0; 298 *ppoints = 0;
298 299
299 for_each_process(p) { 300 do_each_thread(g, p) {
300 unsigned int points; 301 unsigned int points;
301 302
303 if (!p->mm)
304 continue;
302 if (oom_unkillable_task(p, mem, nodemask)) 305 if (oom_unkillable_task(p, mem, nodemask))
303 continue; 306 continue;
304 307
@@ -314,22 +317,29 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
314 if (test_tsk_thread_flag(p, TIF_MEMDIE)) 317 if (test_tsk_thread_flag(p, TIF_MEMDIE))
315 return ERR_PTR(-1UL); 318 return ERR_PTR(-1UL);
316 319
317 /* 320 if (p->flags & PF_EXITING) {
318 * This is in the process of releasing memory so wait for it 321 /*
319 * to finish before killing some other task by mistake. 322 * If p is the current task and is in the process of
320 * 323 * releasing memory, we allow the "kill" to set
321 * However, if p is the current task, we allow the 'kill' to 324 * TIF_MEMDIE, which will allow it to gain access to
322 * go ahead if it is exiting: this will simply set TIF_MEMDIE, 325 * memory reserves. Otherwise, it may stall forever.
323 * which will allow it to gain access to memory reserves in 326 *
324 * the process of exiting and releasing its resources. 327 * The loop isn't broken here, however, in case other
325 * Otherwise we could get an easy OOM deadlock. 328 * threads are found to have already been oom killed.
326 */ 329 */
327 if (thread_group_empty(p) && (p->flags & PF_EXITING) && p->mm) { 330 if (p == current) {
328 if (p != current) 331 chosen = p;
329 return ERR_PTR(-1UL); 332 *ppoints = 1000;
330 333 } else {
331 chosen = p; 334 /*
332 *ppoints = 1000; 335 * If this task is not being ptraced on exit,
336 * then wait for it to finish before killing
337 * some other task unnecessarily.
338 */
339 if (!(task_ptrace(p->group_leader) &
340 PT_TRACE_EXIT))
341 return ERR_PTR(-1UL);
342 }
333 } 343 }
334 344
335 points = oom_badness(p, mem, nodemask, totalpages); 345 points = oom_badness(p, mem, nodemask, totalpages);
@@ -337,7 +347,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
337 chosen = p; 347 chosen = p;
338 *ppoints = points; 348 *ppoints = points;
339 } 349 }
340 } 350 } while_each_thread(g, p);
341 351
342 return chosen; 352 return chosen;
343} 353}
@@ -396,7 +406,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
396 task_unlock(current); 406 task_unlock(current);
397 dump_stack(); 407 dump_stack();
398 mem_cgroup_print_oom_info(mem, p); 408 mem_cgroup_print_oom_info(mem, p);
399 show_mem(); 409 __show_mem(SHOW_MEM_FILTER_NODES);
400 if (sysctl_oom_dump_tasks) 410 if (sysctl_oom_dump_tasks)
401 dump_tasks(mem, nodemask); 411 dump_tasks(mem, nodemask);
402} 412}
@@ -491,6 +501,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
491 list_for_each_entry(child, &t->children, sibling) { 501 list_for_each_entry(child, &t->children, sibling) {
492 unsigned int child_points; 502 unsigned int child_points;
493 503
504 if (child->mm == p->mm)
505 continue;
494 /* 506 /*
495 * oom_badness() returns 0 if the thread is unkillable 507 * oom_badness() returns 0 if the thread is unkillable
496 */ 508 */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2cb01f6ec5d0..632b46479c94 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -927,7 +927,7 @@ retry:
927 break; 927 break;
928 } 928 }
929 929
930 done_index = page->index + 1; 930 done_index = page->index;
931 931
932 lock_page(page); 932 lock_page(page);
933 933
@@ -977,6 +977,7 @@ continue_unlock:
977 * not be suitable for data integrity 977 * not be suitable for data integrity
978 * writeout). 978 * writeout).
979 */ 979 */
980 done_index = page->index + 1;
980 done = 1; 981 done = 1;
981 break; 982 break;
982 } 983 }
@@ -1211,6 +1212,17 @@ int set_page_dirty(struct page *page)
1211 1212
1212 if (likely(mapping)) { 1213 if (likely(mapping)) {
1213 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; 1214 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1215 /*
1216 * readahead/lru_deactivate_page could remain
1217 * PG_readahead/PG_reclaim due to race with end_page_writeback
1218 * About readahead, if the page is written, the flags would be
1219 * reset. So no problem.
1220 * About lru_deactivate_page, if the page is redirty, the flag
1221 * will be reset. So no problem. but if the page is used by readahead
1222 * it will confuse readahead and make it restart the size rampup
1223 * process. But it's a trivial problem.
1224 */
1225 ClearPageReclaim(page);
1214#ifdef CONFIG_BLOCK 1226#ifdef CONFIG_BLOCK
1215 if (!spd) 1227 if (!spd)
1216 spd = __set_page_dirty_buffers; 1228 spd = __set_page_dirty_buffers;
@@ -1266,7 +1278,6 @@ int clear_page_dirty_for_io(struct page *page)
1266 1278
1267 BUG_ON(!PageLocked(page)); 1279 BUG_ON(!PageLocked(page));
1268 1280
1269 ClearPageReclaim(page);
1270 if (mapping && mapping_cap_account_dirty(mapping)) { 1281 if (mapping && mapping_cap_account_dirty(mapping)) {
1271 /* 1282 /*
1272 * Yes, Virginia, this is indeed insane. 1283 * Yes, Virginia, this is indeed insane.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7945247b1e53..3a58221f4c22 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -614,6 +614,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
614 list = &pcp->lists[migratetype]; 614 list = &pcp->lists[migratetype];
615 } while (list_empty(list)); 615 } while (list_empty(list));
616 616
617 /* This is the only non-empty list. Free them all. */
618 if (batch_free == MIGRATE_PCPTYPES)
619 batch_free = to_free;
620
617 do { 621 do {
618 page = list_entry(list->prev, struct page, lru); 622 page = list_entry(list->prev, struct page, lru);
619 /* must delete as __free_one_page list manipulates */ 623 /* must delete as __free_one_page list manipulates */
@@ -863,9 +867,8 @@ static int move_freepages(struct zone *zone,
863 } 867 }
864 868
865 order = page_order(page); 869 order = page_order(page);
866 list_del(&page->lru); 870 list_move(&page->lru,
867 list_add(&page->lru, 871 &zone->free_area[order].free_list[migratetype]);
868 &zone->free_area[order].free_list[migratetype]);
869 page += 1 << order; 872 page += 1 << order;
870 pages_moved += 1 << order; 873 pages_moved += 1 << order;
871 } 874 }
@@ -1333,7 +1336,7 @@ again:
1333 } 1336 }
1334 1337
1335 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1338 __count_zone_vm_events(PGALLOC, zone, 1 << order);
1336 zone_statistics(preferred_zone, zone); 1339 zone_statistics(preferred_zone, zone, gfp_flags);
1337 local_irq_restore(flags); 1340 local_irq_restore(flags);
1338 1341
1339 VM_BUG_ON(bad_range(zone, page)); 1342 VM_BUG_ON(bad_range(zone, page));
@@ -1714,6 +1717,20 @@ try_next_zone:
1714 return page; 1717 return page;
1715} 1718}
1716 1719
1720/*
1721 * Large machines with many possible nodes should not always dump per-node
1722 * meminfo in irq context.
1723 */
1724static inline bool should_suppress_show_mem(void)
1725{
1726 bool ret = false;
1727
1728#if NODES_SHIFT > 8
1729 ret = in_interrupt();
1730#endif
1731 return ret;
1732}
1733
1717static inline int 1734static inline int
1718should_alloc_retry(gfp_t gfp_mask, unsigned int order, 1735should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1719 unsigned long pages_reclaimed) 1736 unsigned long pages_reclaimed)
@@ -2085,7 +2102,7 @@ rebalance:
2085 sync_migration); 2102 sync_migration);
2086 if (page) 2103 if (page)
2087 goto got_pg; 2104 goto got_pg;
2088 sync_migration = true; 2105 sync_migration = !(gfp_mask & __GFP_NO_KSWAPD);
2089 2106
2090 /* Try direct reclaim and then allocating */ 2107 /* Try direct reclaim and then allocating */
2091 page = __alloc_pages_direct_reclaim(gfp_mask, order, 2108 page = __alloc_pages_direct_reclaim(gfp_mask, order,
@@ -2157,11 +2174,25 @@ rebalance:
2157 2174
2158nopage: 2175nopage:
2159 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 2176 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
2160 printk(KERN_WARNING "%s: page allocation failure." 2177 unsigned int filter = SHOW_MEM_FILTER_NODES;
2161 " order:%d, mode:0x%x\n", 2178
2179 /*
2180 * This documents exceptions given to allocations in certain
2181 * contexts that are allowed to allocate outside current's set
2182 * of allowed nodes.
2183 */
2184 if (!(gfp_mask & __GFP_NOMEMALLOC))
2185 if (test_thread_flag(TIF_MEMDIE) ||
2186 (current->flags & (PF_MEMALLOC | PF_EXITING)))
2187 filter &= ~SHOW_MEM_FILTER_NODES;
2188 if (in_interrupt() || !wait)
2189 filter &= ~SHOW_MEM_FILTER_NODES;
2190
2191 pr_warning("%s: page allocation failure. order:%d, mode:0x%x\n",
2162 current->comm, order, gfp_mask); 2192 current->comm, order, gfp_mask);
2163 dump_stack(); 2193 dump_stack();
2164 show_mem(); 2194 if (!should_suppress_show_mem())
2195 __show_mem(filter);
2165 } 2196 }
2166 return page; 2197 return page;
2167got_pg: 2198got_pg:
@@ -2411,19 +2442,42 @@ void si_meminfo_node(struct sysinfo *val, int nid)
2411} 2442}
2412#endif 2443#endif
2413 2444
2445/*
2446 * Determine whether the zone's node should be displayed or not, depending on
2447 * whether SHOW_MEM_FILTER_NODES was passed to __show_free_areas().
2448 */
2449static bool skip_free_areas_zone(unsigned int flags, const struct zone *zone)
2450{
2451 bool ret = false;
2452
2453 if (!(flags & SHOW_MEM_FILTER_NODES))
2454 goto out;
2455
2456 get_mems_allowed();
2457 ret = !node_isset(zone->zone_pgdat->node_id,
2458 cpuset_current_mems_allowed);
2459 put_mems_allowed();
2460out:
2461 return ret;
2462}
2463
2414#define K(x) ((x) << (PAGE_SHIFT-10)) 2464#define K(x) ((x) << (PAGE_SHIFT-10))
2415 2465
2416/* 2466/*
2417 * Show free area list (used inside shift_scroll-lock stuff) 2467 * Show free area list (used inside shift_scroll-lock stuff)
2418 * We also calculate the percentage fragmentation. We do this by counting the 2468 * We also calculate the percentage fragmentation. We do this by counting the
2419 * memory on each free list with the exception of the first item on the list. 2469 * memory on each free list with the exception of the first item on the list.
2470 * Suppresses nodes that are not allowed by current's cpuset if
2471 * SHOW_MEM_FILTER_NODES is passed.
2420 */ 2472 */
2421void show_free_areas(void) 2473void __show_free_areas(unsigned int filter)
2422{ 2474{
2423 int cpu; 2475 int cpu;
2424 struct zone *zone; 2476 struct zone *zone;
2425 2477
2426 for_each_populated_zone(zone) { 2478 for_each_populated_zone(zone) {
2479 if (skip_free_areas_zone(filter, zone))
2480 continue;
2427 show_node(zone); 2481 show_node(zone);
2428 printk("%s per-cpu:\n", zone->name); 2482 printk("%s per-cpu:\n", zone->name);
2429 2483
@@ -2465,6 +2519,8 @@ void show_free_areas(void)
2465 for_each_populated_zone(zone) { 2519 for_each_populated_zone(zone) {
2466 int i; 2520 int i;
2467 2521
2522 if (skip_free_areas_zone(filter, zone))
2523 continue;
2468 show_node(zone); 2524 show_node(zone);
2469 printk("%s" 2525 printk("%s"
2470 " free:%lukB" 2526 " free:%lukB"
@@ -2532,6 +2588,8 @@ void show_free_areas(void)
2532 for_each_populated_zone(zone) { 2588 for_each_populated_zone(zone) {
2533 unsigned long nr[MAX_ORDER], flags, order, total = 0; 2589 unsigned long nr[MAX_ORDER], flags, order, total = 0;
2534 2590
2591 if (skip_free_areas_zone(filter, zone))
2592 continue;
2535 show_node(zone); 2593 show_node(zone);
2536 printk("%s: ", zone->name); 2594 printk("%s: ", zone->name);
2537 2595
@@ -2551,6 +2609,11 @@ void show_free_areas(void)
2551 show_swap_cache_info(); 2609 show_swap_cache_info();
2552} 2610}
2553 2611
2612void show_free_areas(void)
2613{
2614 __show_free_areas(0);
2615}
2616
2554static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 2617static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2555{ 2618{
2556 zoneref->zone = zone; 2619 zoneref->zone = zone;
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 5bffada7cde1..59a3cd4c799d 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -243,12 +243,7 @@ static int __meminit page_cgroup_callback(struct notifier_block *self,
243 break; 243 break;
244 } 244 }
245 245
246 if (ret) 246 return notifier_from_errno(ret);
247 ret = notifier_from_errno(ret);
248 else
249 ret = NOTIFY_OK;
250
251 return ret;
252} 247}
253 248
254#endif 249#endif
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 7cfa6ae02303..c3450d533611 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -33,19 +33,35 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
33 33
34 pmd = pmd_offset(pud, addr); 34 pmd = pmd_offset(pud, addr);
35 do { 35 do {
36again:
36 next = pmd_addr_end(addr, end); 37 next = pmd_addr_end(addr, end);
37 split_huge_page_pmd(walk->mm, pmd); 38 if (pmd_none(*pmd)) {
38 if (pmd_none_or_clear_bad(pmd)) {
39 if (walk->pte_hole) 39 if (walk->pte_hole)
40 err = walk->pte_hole(addr, next, walk); 40 err = walk->pte_hole(addr, next, walk);
41 if (err) 41 if (err)
42 break; 42 break;
43 continue; 43 continue;
44 } 44 }
45 /*
46 * This implies that each ->pmd_entry() handler
47 * needs to know about pmd_trans_huge() pmds
48 */
45 if (walk->pmd_entry) 49 if (walk->pmd_entry)
46 err = walk->pmd_entry(pmd, addr, next, walk); 50 err = walk->pmd_entry(pmd, addr, next, walk);
47 if (!err && walk->pte_entry) 51 if (err)
48 err = walk_pte_range(pmd, addr, next, walk); 52 break;
53
54 /*
55 * Check this here so we only break down trans_huge
56 * pages when we _need_ to
57 */
58 if (!walk->pte_entry)
59 continue;
60
61 split_huge_page_pmd(walk->mm, pmd);
62 if (pmd_none_or_clear_bad(pmd))
63 goto again;
64 err = walk_pte_range(pmd, addr, next, walk);
49 if (err) 65 if (err)
50 break; 66 break;
51 } while (pmd++, addr = next, addr != end); 67 } while (pmd++, addr = next, addr != end);
diff --git a/mm/rmap.c b/mm/rmap.c
index 941bf82e8961..4a8e99a0fb97 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -67,11 +67,24 @@ static struct kmem_cache *anon_vma_chain_cachep;
67 67
68static inline struct anon_vma *anon_vma_alloc(void) 68static inline struct anon_vma *anon_vma_alloc(void)
69{ 69{
70 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 70 struct anon_vma *anon_vma;
71
72 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
73 if (anon_vma) {
74 atomic_set(&anon_vma->refcount, 1);
75 /*
76 * Initialise the anon_vma root to point to itself. If called
77 * from fork, the root will be reset to the parents anon_vma.
78 */
79 anon_vma->root = anon_vma;
80 }
81
82 return anon_vma;
71} 83}
72 84
73void anon_vma_free(struct anon_vma *anon_vma) 85static inline void anon_vma_free(struct anon_vma *anon_vma)
74{ 86{
87 VM_BUG_ON(atomic_read(&anon_vma->refcount));
75 kmem_cache_free(anon_vma_cachep, anon_vma); 88 kmem_cache_free(anon_vma_cachep, anon_vma);
76} 89}
77 90
@@ -133,11 +146,6 @@ int anon_vma_prepare(struct vm_area_struct *vma)
133 if (unlikely(!anon_vma)) 146 if (unlikely(!anon_vma))
134 goto out_enomem_free_avc; 147 goto out_enomem_free_avc;
135 allocated = anon_vma; 148 allocated = anon_vma;
136 /*
137 * This VMA had no anon_vma yet. This anon_vma is
138 * the root of any anon_vma tree that might form.
139 */
140 anon_vma->root = anon_vma;
141 } 149 }
142 150
143 anon_vma_lock(anon_vma); 151 anon_vma_lock(anon_vma);
@@ -156,7 +164,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
156 anon_vma_unlock(anon_vma); 164 anon_vma_unlock(anon_vma);
157 165
158 if (unlikely(allocated)) 166 if (unlikely(allocated))
159 anon_vma_free(allocated); 167 put_anon_vma(allocated);
160 if (unlikely(avc)) 168 if (unlikely(avc))
161 anon_vma_chain_free(avc); 169 anon_vma_chain_free(avc);
162 } 170 }
@@ -241,9 +249,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
241 */ 249 */
242 anon_vma->root = pvma->anon_vma->root; 250 anon_vma->root = pvma->anon_vma->root;
243 /* 251 /*
244 * With KSM refcounts, an anon_vma can stay around longer than the 252 * With refcounts, an anon_vma can stay around longer than the
245 * process it belongs to. The root anon_vma needs to be pinned 253 * process it belongs to. The root anon_vma needs to be pinned until
246 * until this anon_vma is freed, because the lock lives in the root. 254 * this anon_vma is freed, because the lock lives in the root.
247 */ 255 */
248 get_anon_vma(anon_vma->root); 256 get_anon_vma(anon_vma->root);
249 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 257 /* Mark this anon_vma as the one where our new (COWed) pages go. */
@@ -253,7 +261,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
253 return 0; 261 return 0;
254 262
255 out_error_free_anon_vma: 263 out_error_free_anon_vma:
256 anon_vma_free(anon_vma); 264 put_anon_vma(anon_vma);
257 out_error: 265 out_error:
258 unlink_anon_vmas(vma); 266 unlink_anon_vmas(vma);
259 return -ENOMEM; 267 return -ENOMEM;
@@ -272,15 +280,11 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
272 list_del(&anon_vma_chain->same_anon_vma); 280 list_del(&anon_vma_chain->same_anon_vma);
273 281
274 /* We must garbage collect the anon_vma if it's empty */ 282 /* We must garbage collect the anon_vma if it's empty */
275 empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma); 283 empty = list_empty(&anon_vma->head);
276 anon_vma_unlock(anon_vma); 284 anon_vma_unlock(anon_vma);
277 285
278 if (empty) { 286 if (empty)
279 /* We no longer need the root anon_vma */ 287 put_anon_vma(anon_vma);
280 if (anon_vma->root != anon_vma)
281 drop_anon_vma(anon_vma->root);
282 anon_vma_free(anon_vma);
283 }
284} 288}
285 289
286void unlink_anon_vmas(struct vm_area_struct *vma) 290void unlink_anon_vmas(struct vm_area_struct *vma)
@@ -303,7 +307,7 @@ static void anon_vma_ctor(void *data)
303 struct anon_vma *anon_vma = data; 307 struct anon_vma *anon_vma = data;
304 308
305 spin_lock_init(&anon_vma->lock); 309 spin_lock_init(&anon_vma->lock);
306 anonvma_external_refcount_init(anon_vma); 310 atomic_set(&anon_vma->refcount, 0);
307 INIT_LIST_HEAD(&anon_vma->head); 311 INIT_LIST_HEAD(&anon_vma->head);
308} 312}
309 313
@@ -1486,41 +1490,15 @@ int try_to_munlock(struct page *page)
1486 return try_to_unmap_file(page, TTU_MUNLOCK); 1490 return try_to_unmap_file(page, TTU_MUNLOCK);
1487} 1491}
1488 1492
1489#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) 1493void __put_anon_vma(struct anon_vma *anon_vma)
1490/*
1491 * Drop an anon_vma refcount, freeing the anon_vma and anon_vma->root
1492 * if necessary. Be careful to do all the tests under the lock. Once
1493 * we know we are the last user, nobody else can get a reference and we
1494 * can do the freeing without the lock.
1495 */
1496void drop_anon_vma(struct anon_vma *anon_vma)
1497{ 1494{
1498 BUG_ON(atomic_read(&anon_vma->external_refcount) <= 0); 1495 struct anon_vma *root = anon_vma->root;
1499 if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->root->lock)) {
1500 struct anon_vma *root = anon_vma->root;
1501 int empty = list_empty(&anon_vma->head);
1502 int last_root_user = 0;
1503 int root_empty = 0;
1504 1496
1505 /* 1497 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1506 * The refcount on a non-root anon_vma got dropped. Drop 1498 anon_vma_free(root);
1507 * the refcount on the root and check if we need to free it.
1508 */
1509 if (empty && anon_vma != root) {
1510 BUG_ON(atomic_read(&root->external_refcount) <= 0);
1511 last_root_user = atomic_dec_and_test(&root->external_refcount);
1512 root_empty = list_empty(&root->head);
1513 }
1514 anon_vma_unlock(anon_vma);
1515 1499
1516 if (empty) { 1500 anon_vma_free(anon_vma);
1517 anon_vma_free(anon_vma);
1518 if (root_empty && last_root_user)
1519 anon_vma_free(root);
1520 }
1521 }
1522} 1501}
1523#endif
1524 1502
1525#ifdef CONFIG_MIGRATION 1503#ifdef CONFIG_MIGRATION
1526/* 1504/*
diff --git a/mm/shmem.c b/mm/shmem.c
index 048a95a5244d..91ce9a1024d7 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1081,7 +1081,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1081 shmem_recalc_inode(inode); 1081 shmem_recalc_inode(inode);
1082 1082
1083 if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 1083 if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1084 remove_from_page_cache(page); 1084 delete_from_page_cache(page);
1085 shmem_swp_set(info, entry, swap.val); 1085 shmem_swp_set(info, entry, swap.val);
1086 shmem_swp_unmap(entry); 1086 shmem_swp_unmap(entry);
1087 if (list_empty(&info->swaplist)) 1087 if (list_empty(&info->swaplist))
@@ -1091,7 +1091,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1091 spin_unlock(&info->lock); 1091 spin_unlock(&info->lock);
1092 swap_shmem_alloc(swap); 1092 swap_shmem_alloc(swap);
1093 BUG_ON(page_mapped(page)); 1093 BUG_ON(page_mapped(page));
1094 page_cache_release(page); /* pagecache ref */
1095 swap_writepage(page, wbc); 1094 swap_writepage(page, wbc);
1096 if (inode) { 1095 if (inode) {
1097 mutex_lock(&shmem_swaplist_mutex); 1096 mutex_lock(&shmem_swaplist_mutex);
@@ -2794,5 +2793,6 @@ int shmem_zero_setup(struct vm_area_struct *vma)
2794 fput(vma->vm_file); 2793 fput(vma->vm_file);
2795 vma->vm_file = file; 2794 vma->vm_file = file;
2796 vma->vm_ops = &shmem_vm_ops; 2795 vma->vm_ops = &shmem_vm_ops;
2796 vma->vm_flags |= VM_CAN_NONLINEAR;
2797 return 0; 2797 return 0;
2798} 2798}
diff --git a/mm/slab.c b/mm/slab.c
index 37961d1f584f..568803f121a8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -191,22 +191,6 @@ typedef unsigned int kmem_bufctl_t;
191#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3) 191#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
192 192
193/* 193/*
194 * struct slab
195 *
196 * Manages the objs in a slab. Placed either at the beginning of mem allocated
197 * for a slab, or allocated from an general cache.
198 * Slabs are chained into three list: fully used, partial, fully free slabs.
199 */
200struct slab {
201 struct list_head list;
202 unsigned long colouroff;
203 void *s_mem; /* including colour offset */
204 unsigned int inuse; /* num of objs active in slab */
205 kmem_bufctl_t free;
206 unsigned short nodeid;
207};
208
209/*
210 * struct slab_rcu 194 * struct slab_rcu
211 * 195 *
212 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to 196 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
@@ -219,8 +203,6 @@ struct slab {
219 * 203 *
220 * rcu_read_lock before reading the address, then rcu_read_unlock after 204 * rcu_read_lock before reading the address, then rcu_read_unlock after
221 * taking the spinlock within the structure expected at that address. 205 * taking the spinlock within the structure expected at that address.
222 *
223 * We assume struct slab_rcu can overlay struct slab when destroying.
224 */ 206 */
225struct slab_rcu { 207struct slab_rcu {
226 struct rcu_head head; 208 struct rcu_head head;
@@ -229,6 +211,27 @@ struct slab_rcu {
229}; 211};
230 212
231/* 213/*
214 * struct slab
215 *
216 * Manages the objs in a slab. Placed either at the beginning of mem allocated
217 * for a slab, or allocated from an general cache.
218 * Slabs are chained into three list: fully used, partial, fully free slabs.
219 */
220struct slab {
221 union {
222 struct {
223 struct list_head list;
224 unsigned long colouroff;
225 void *s_mem; /* including colour offset */
226 unsigned int inuse; /* num of objs active in slab */
227 kmem_bufctl_t free;
228 unsigned short nodeid;
229 };
230 struct slab_rcu __slab_cover_slab_rcu;
231 };
232};
233
234/*
232 * struct array_cache 235 * struct array_cache
233 * 236 *
234 * Purpose: 237 * Purpose:
@@ -1387,7 +1390,7 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
1387 break; 1390 break;
1388 } 1391 }
1389out: 1392out:
1390 return ret ? notifier_from_errno(ret) : NOTIFY_OK; 1393 return notifier_from_errno(ret);
1391} 1394}
1392#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ 1395#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1393 1396
@@ -2147,8 +2150,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2147 * 2150 *
2148 * @name must be valid until the cache is destroyed. This implies that 2151 * @name must be valid until the cache is destroyed. This implies that
2149 * the module calling this has to destroy the cache before getting unloaded. 2152 * the module calling this has to destroy the cache before getting unloaded.
2150 * Note that kmem_cache_name() is not guaranteed to return the same pointer,
2151 * therefore applications must manage it themselves.
2152 * 2153 *
2153 * The flags are 2154 * The flags are
2154 * 2155 *
@@ -2288,8 +2289,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2288 if (ralign < align) { 2289 if (ralign < align) {
2289 ralign = align; 2290 ralign = align;
2290 } 2291 }
2291 /* disable debug if not aligning with REDZONE_ALIGN */ 2292 /* disable debug if necessary */
2292 if (ralign & (__alignof__(unsigned long long) - 1)) 2293 if (ralign > __alignof__(unsigned long long))
2293 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2294 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2294 /* 2295 /*
2295 * 4) Store it. 2296 * 4) Store it.
@@ -2315,8 +2316,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2315 */ 2316 */
2316 if (flags & SLAB_RED_ZONE) { 2317 if (flags & SLAB_RED_ZONE) {
2317 /* add space for red zone words */ 2318 /* add space for red zone words */
2318 cachep->obj_offset += align; 2319 cachep->obj_offset += sizeof(unsigned long long);
2319 size += align + sizeof(unsigned long long); 2320 size += 2 * sizeof(unsigned long long);
2320 } 2321 }
2321 if (flags & SLAB_STORE_USER) { 2322 if (flags & SLAB_STORE_USER) {
2322 /* user store requires one word storage behind the end of 2323 /* user store requires one word storage behind the end of
@@ -3840,12 +3841,6 @@ unsigned int kmem_cache_size(struct kmem_cache *cachep)
3840} 3841}
3841EXPORT_SYMBOL(kmem_cache_size); 3842EXPORT_SYMBOL(kmem_cache_size);
3842 3843
3843const char *kmem_cache_name(struct kmem_cache *cachep)
3844{
3845 return cachep->name;
3846}
3847EXPORT_SYMBOL_GPL(kmem_cache_name);
3848
3849/* 3844/*
3850 * This initializes kmem_list3 or resizes various caches for all nodes. 3845 * This initializes kmem_list3 or resizes various caches for all nodes.
3851 */ 3846 */
diff --git a/mm/slob.c b/mm/slob.c
index 3588eaaef726..46e0aee33a23 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -666,12 +666,6 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
666} 666}
667EXPORT_SYMBOL(kmem_cache_size); 667EXPORT_SYMBOL(kmem_cache_size);
668 668
669const char *kmem_cache_name(struct kmem_cache *c)
670{
671 return c->name;
672}
673EXPORT_SYMBOL(kmem_cache_name);
674
675int kmem_cache_shrink(struct kmem_cache *d) 669int kmem_cache_shrink(struct kmem_cache *d)
676{ 670{
677 return 0; 671 return 0;
diff --git a/mm/slub.c b/mm/slub.c
index e15aa7f193c9..93de30db95f5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -217,7 +217,7 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)
217 217
218#endif 218#endif
219 219
220static inline void stat(struct kmem_cache *s, enum stat_item si) 220static inline void stat(const struct kmem_cache *s, enum stat_item si)
221{ 221{
222#ifdef CONFIG_SLUB_STATS 222#ifdef CONFIG_SLUB_STATS
223 __this_cpu_inc(s->cpu_slab->stat[si]); 223 __this_cpu_inc(s->cpu_slab->stat[si]);
@@ -281,11 +281,40 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
281 return (p - addr) / s->size; 281 return (p - addr) / s->size;
282} 282}
283 283
284static inline size_t slab_ksize(const struct kmem_cache *s)
285{
286#ifdef CONFIG_SLUB_DEBUG
287 /*
288 * Debugging requires use of the padding between object
289 * and whatever may come after it.
290 */
291 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
292 return s->objsize;
293
294#endif
295 /*
296 * If we have the need to store the freelist pointer
297 * back there or track user information then we can
298 * only use the space before that information.
299 */
300 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
301 return s->inuse;
302 /*
303 * Else we can use all the padding etc for the allocation
304 */
305 return s->size;
306}
307
308static inline int order_objects(int order, unsigned long size, int reserved)
309{
310 return ((PAGE_SIZE << order) - reserved) / size;
311}
312
284static inline struct kmem_cache_order_objects oo_make(int order, 313static inline struct kmem_cache_order_objects oo_make(int order,
285 unsigned long size) 314 unsigned long size, int reserved)
286{ 315{
287 struct kmem_cache_order_objects x = { 316 struct kmem_cache_order_objects x = {
288 (order << OO_SHIFT) + (PAGE_SIZE << order) / size 317 (order << OO_SHIFT) + order_objects(order, size, reserved)
289 }; 318 };
290 319
291 return x; 320 return x;
@@ -617,7 +646,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
617 return 1; 646 return 1;
618 647
619 start = page_address(page); 648 start = page_address(page);
620 length = (PAGE_SIZE << compound_order(page)); 649 length = (PAGE_SIZE << compound_order(page)) - s->reserved;
621 end = start + length; 650 end = start + length;
622 remainder = length % s->size; 651 remainder = length % s->size;
623 if (!remainder) 652 if (!remainder)
@@ -698,7 +727,7 @@ static int check_slab(struct kmem_cache *s, struct page *page)
698 return 0; 727 return 0;
699 } 728 }
700 729
701 maxobj = (PAGE_SIZE << compound_order(page)) / s->size; 730 maxobj = order_objects(compound_order(page), s->size, s->reserved);
702 if (page->objects > maxobj) { 731 if (page->objects > maxobj) {
703 slab_err(s, page, "objects %u > max %u", 732 slab_err(s, page, "objects %u > max %u",
704 s->name, page->objects, maxobj); 733 s->name, page->objects, maxobj);
@@ -748,7 +777,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
748 nr++; 777 nr++;
749 } 778 }
750 779
751 max_objects = (PAGE_SIZE << compound_order(page)) / s->size; 780 max_objects = order_objects(compound_order(page), s->size, s->reserved);
752 if (max_objects > MAX_OBJS_PER_PAGE) 781 if (max_objects > MAX_OBJS_PER_PAGE)
753 max_objects = MAX_OBJS_PER_PAGE; 782 max_objects = MAX_OBJS_PER_PAGE;
754 783
@@ -800,21 +829,31 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
800static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) 829static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
801{ 830{
802 flags &= gfp_allowed_mask; 831 flags &= gfp_allowed_mask;
803 kmemcheck_slab_alloc(s, flags, object, s->objsize); 832 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
804 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); 833 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
805} 834}
806 835
807static inline void slab_free_hook(struct kmem_cache *s, void *x) 836static inline void slab_free_hook(struct kmem_cache *s, void *x)
808{ 837{
809 kmemleak_free_recursive(x, s->flags); 838 kmemleak_free_recursive(x, s->flags);
810}
811 839
812static inline void slab_free_hook_irq(struct kmem_cache *s, void *object) 840 /*
813{ 841 * Trouble is that we may no longer disable interupts in the fast path
814 kmemcheck_slab_free(s, object, s->objsize); 842 * So in order to make the debug calls that expect irqs to be
815 debug_check_no_locks_freed(object, s->objsize); 843 * disabled we need to disable interrupts temporarily.
816 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 844 */
817 debug_check_no_obj_freed(object, s->objsize); 845#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
846 {
847 unsigned long flags;
848
849 local_irq_save(flags);
850 kmemcheck_slab_free(s, x, s->objsize);
851 debug_check_no_locks_freed(x, s->objsize);
852 if (!(s->flags & SLAB_DEBUG_OBJECTS))
853 debug_check_no_obj_freed(x, s->objsize);
854 local_irq_restore(flags);
855 }
856#endif
818} 857}
819 858
820/* 859/*
@@ -1101,9 +1140,6 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1101 1140
1102static inline void slab_free_hook(struct kmem_cache *s, void *x) {} 1141static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
1103 1142
1104static inline void slab_free_hook_irq(struct kmem_cache *s,
1105 void *object) {}
1106
1107#endif /* CONFIG_SLUB_DEBUG */ 1143#endif /* CONFIG_SLUB_DEBUG */
1108 1144
1109/* 1145/*
@@ -1249,21 +1285,38 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1249 __free_pages(page, order); 1285 __free_pages(page, order);
1250} 1286}
1251 1287
1288#define need_reserve_slab_rcu \
1289 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1290
1252static void rcu_free_slab(struct rcu_head *h) 1291static void rcu_free_slab(struct rcu_head *h)
1253{ 1292{
1254 struct page *page; 1293 struct page *page;
1255 1294
1256 page = container_of((struct list_head *)h, struct page, lru); 1295 if (need_reserve_slab_rcu)
1296 page = virt_to_head_page(h);
1297 else
1298 page = container_of((struct list_head *)h, struct page, lru);
1299
1257 __free_slab(page->slab, page); 1300 __free_slab(page->slab, page);
1258} 1301}
1259 1302
1260static void free_slab(struct kmem_cache *s, struct page *page) 1303static void free_slab(struct kmem_cache *s, struct page *page)
1261{ 1304{
1262 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { 1305 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1263 /* 1306 struct rcu_head *head;
1264 * RCU free overloads the RCU head over the LRU 1307
1265 */ 1308 if (need_reserve_slab_rcu) {
1266 struct rcu_head *head = (void *)&page->lru; 1309 int order = compound_order(page);
1310 int offset = (PAGE_SIZE << order) - s->reserved;
1311
1312 VM_BUG_ON(s->reserved != sizeof(*head));
1313 head = page_address(page) + offset;
1314 } else {
1315 /*
1316 * RCU free overloads the RCU head over the LRU
1317 */
1318 head = (void *)&page->lru;
1319 }
1267 1320
1268 call_rcu(head, rcu_free_slab); 1321 call_rcu(head, rcu_free_slab);
1269 } else 1322 } else
@@ -1487,6 +1540,78 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1487 } 1540 }
1488} 1541}
1489 1542
1543#ifdef CONFIG_CMPXCHG_LOCAL
1544#ifdef CONFIG_PREEMPT
1545/*
1546 * Calculate the next globally unique transaction for disambiguiation
1547 * during cmpxchg. The transactions start with the cpu number and are then
1548 * incremented by CONFIG_NR_CPUS.
1549 */
1550#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
1551#else
1552/*
1553 * No preemption supported therefore also no need to check for
1554 * different cpus.
1555 */
1556#define TID_STEP 1
1557#endif
1558
1559static inline unsigned long next_tid(unsigned long tid)
1560{
1561 return tid + TID_STEP;
1562}
1563
1564static inline unsigned int tid_to_cpu(unsigned long tid)
1565{
1566 return tid % TID_STEP;
1567}
1568
1569static inline unsigned long tid_to_event(unsigned long tid)
1570{
1571 return tid / TID_STEP;
1572}
1573
1574static inline unsigned int init_tid(int cpu)
1575{
1576 return cpu;
1577}
1578
1579static inline void note_cmpxchg_failure(const char *n,
1580 const struct kmem_cache *s, unsigned long tid)
1581{
1582#ifdef SLUB_DEBUG_CMPXCHG
1583 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1584
1585 printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
1586
1587#ifdef CONFIG_PREEMPT
1588 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
1589 printk("due to cpu change %d -> %d\n",
1590 tid_to_cpu(tid), tid_to_cpu(actual_tid));
1591 else
1592#endif
1593 if (tid_to_event(tid) != tid_to_event(actual_tid))
1594 printk("due to cpu running other code. Event %ld->%ld\n",
1595 tid_to_event(tid), tid_to_event(actual_tid));
1596 else
1597 printk("for unknown reason: actual=%lx was=%lx target=%lx\n",
1598 actual_tid, tid, next_tid(tid));
1599#endif
1600 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
1601}
1602
1603#endif
1604
1605void init_kmem_cache_cpus(struct kmem_cache *s)
1606{
1607#if defined(CONFIG_CMPXCHG_LOCAL) && defined(CONFIG_PREEMPT)
1608 int cpu;
1609
1610 for_each_possible_cpu(cpu)
1611 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
1612#endif
1613
1614}
1490/* 1615/*
1491 * Remove the cpu slab 1616 * Remove the cpu slab
1492 */ 1617 */
@@ -1518,6 +1643,9 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
1518 page->inuse--; 1643 page->inuse--;
1519 } 1644 }
1520 c->page = NULL; 1645 c->page = NULL;
1646#ifdef CONFIG_CMPXCHG_LOCAL
1647 c->tid = next_tid(c->tid);
1648#endif
1521 unfreeze_slab(s, page, tail); 1649 unfreeze_slab(s, page, tail);
1522} 1650}
1523 1651
@@ -1652,6 +1780,19 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
1652{ 1780{
1653 void **object; 1781 void **object;
1654 struct page *new; 1782 struct page *new;
1783#ifdef CONFIG_CMPXCHG_LOCAL
1784 unsigned long flags;
1785
1786 local_irq_save(flags);
1787#ifdef CONFIG_PREEMPT
1788 /*
1789 * We may have been preempted and rescheduled on a different
1790 * cpu before disabling interrupts. Need to reload cpu area
1791 * pointer.
1792 */
1793 c = this_cpu_ptr(s->cpu_slab);
1794#endif
1795#endif
1655 1796
1656 /* We handle __GFP_ZERO in the caller */ 1797 /* We handle __GFP_ZERO in the caller */
1657 gfpflags &= ~__GFP_ZERO; 1798 gfpflags &= ~__GFP_ZERO;
@@ -1678,6 +1819,10 @@ load_freelist:
1678 c->node = page_to_nid(c->page); 1819 c->node = page_to_nid(c->page);
1679unlock_out: 1820unlock_out:
1680 slab_unlock(c->page); 1821 slab_unlock(c->page);
1822#ifdef CONFIG_CMPXCHG_LOCAL
1823 c->tid = next_tid(c->tid);
1824 local_irq_restore(flags);
1825#endif
1681 stat(s, ALLOC_SLOWPATH); 1826 stat(s, ALLOC_SLOWPATH);
1682 return object; 1827 return object;
1683 1828
@@ -1713,6 +1858,9 @@ new_slab:
1713 } 1858 }
1714 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) 1859 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
1715 slab_out_of_memory(s, gfpflags, node); 1860 slab_out_of_memory(s, gfpflags, node);
1861#ifdef CONFIG_CMPXCHG_LOCAL
1862 local_irq_restore(flags);
1863#endif
1716 return NULL; 1864 return NULL;
1717debug: 1865debug:
1718 if (!alloc_debug_processing(s, c->page, object, addr)) 1866 if (!alloc_debug_processing(s, c->page, object, addr))
@@ -1739,23 +1887,76 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
1739{ 1887{
1740 void **object; 1888 void **object;
1741 struct kmem_cache_cpu *c; 1889 struct kmem_cache_cpu *c;
1890#ifdef CONFIG_CMPXCHG_LOCAL
1891 unsigned long tid;
1892#else
1742 unsigned long flags; 1893 unsigned long flags;
1894#endif
1743 1895
1744 if (slab_pre_alloc_hook(s, gfpflags)) 1896 if (slab_pre_alloc_hook(s, gfpflags))
1745 return NULL; 1897 return NULL;
1746 1898
1899#ifndef CONFIG_CMPXCHG_LOCAL
1747 local_irq_save(flags); 1900 local_irq_save(flags);
1901#else
1902redo:
1903#endif
1904
1905 /*
1906 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
1907 * enabled. We may switch back and forth between cpus while
1908 * reading from one cpu area. That does not matter as long
1909 * as we end up on the original cpu again when doing the cmpxchg.
1910 */
1748 c = __this_cpu_ptr(s->cpu_slab); 1911 c = __this_cpu_ptr(s->cpu_slab);
1912
1913#ifdef CONFIG_CMPXCHG_LOCAL
1914 /*
1915 * The transaction ids are globally unique per cpu and per operation on
1916 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
1917 * occurs on the right processor and that there was no operation on the
1918 * linked list in between.
1919 */
1920 tid = c->tid;
1921 barrier();
1922#endif
1923
1749 object = c->freelist; 1924 object = c->freelist;
1750 if (unlikely(!object || !node_match(c, node))) 1925 if (unlikely(!object || !node_match(c, node)))
1751 1926
1752 object = __slab_alloc(s, gfpflags, node, addr, c); 1927 object = __slab_alloc(s, gfpflags, node, addr, c);
1753 1928
1754 else { 1929 else {
1930#ifdef CONFIG_CMPXCHG_LOCAL
1931 /*
1932 * The cmpxchg will only match if there was no additonal
1933 * operation and if we are on the right processor.
1934 *
1935 * The cmpxchg does the following atomically (without lock semantics!)
1936 * 1. Relocate first pointer to the current per cpu area.
1937 * 2. Verify that tid and freelist have not been changed
1938 * 3. If they were not changed replace tid and freelist
1939 *
1940 * Since this is without lock semantics the protection is only against
1941 * code executing on this cpu *not* from access by other cpus.
1942 */
1943 if (unlikely(!this_cpu_cmpxchg_double(
1944 s->cpu_slab->freelist, s->cpu_slab->tid,
1945 object, tid,
1946 get_freepointer(s, object), next_tid(tid)))) {
1947
1948 note_cmpxchg_failure("slab_alloc", s, tid);
1949 goto redo;
1950 }
1951#else
1755 c->freelist = get_freepointer(s, object); 1952 c->freelist = get_freepointer(s, object);
1953#endif
1756 stat(s, ALLOC_FASTPATH); 1954 stat(s, ALLOC_FASTPATH);
1757 } 1955 }
1956
1957#ifndef CONFIG_CMPXCHG_LOCAL
1758 local_irq_restore(flags); 1958 local_irq_restore(flags);
1959#endif
1759 1960
1760 if (unlikely(gfpflags & __GFP_ZERO) && object) 1961 if (unlikely(gfpflags & __GFP_ZERO) && object)
1761 memset(object, 0, s->objsize); 1962 memset(object, 0, s->objsize);
@@ -1833,9 +2034,13 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
1833{ 2034{
1834 void *prior; 2035 void *prior;
1835 void **object = (void *)x; 2036 void **object = (void *)x;
2037#ifdef CONFIG_CMPXCHG_LOCAL
2038 unsigned long flags;
1836 2039
1837 stat(s, FREE_SLOWPATH); 2040 local_irq_save(flags);
2041#endif
1838 slab_lock(page); 2042 slab_lock(page);
2043 stat(s, FREE_SLOWPATH);
1839 2044
1840 if (kmem_cache_debug(s)) 2045 if (kmem_cache_debug(s))
1841 goto debug; 2046 goto debug;
@@ -1865,6 +2070,9 @@ checks_ok:
1865 2070
1866out_unlock: 2071out_unlock:
1867 slab_unlock(page); 2072 slab_unlock(page);
2073#ifdef CONFIG_CMPXCHG_LOCAL
2074 local_irq_restore(flags);
2075#endif
1868 return; 2076 return;
1869 2077
1870slab_empty: 2078slab_empty:
@@ -1876,6 +2084,9 @@ slab_empty:
1876 stat(s, FREE_REMOVE_PARTIAL); 2084 stat(s, FREE_REMOVE_PARTIAL);
1877 } 2085 }
1878 slab_unlock(page); 2086 slab_unlock(page);
2087#ifdef CONFIG_CMPXCHG_LOCAL
2088 local_irq_restore(flags);
2089#endif
1879 stat(s, FREE_SLAB); 2090 stat(s, FREE_SLAB);
1880 discard_slab(s, page); 2091 discard_slab(s, page);
1881 return; 2092 return;
@@ -1902,23 +2113,56 @@ static __always_inline void slab_free(struct kmem_cache *s,
1902{ 2113{
1903 void **object = (void *)x; 2114 void **object = (void *)x;
1904 struct kmem_cache_cpu *c; 2115 struct kmem_cache_cpu *c;
2116#ifdef CONFIG_CMPXCHG_LOCAL
2117 unsigned long tid;
2118#else
1905 unsigned long flags; 2119 unsigned long flags;
2120#endif
1906 2121
1907 slab_free_hook(s, x); 2122 slab_free_hook(s, x);
1908 2123
2124#ifndef CONFIG_CMPXCHG_LOCAL
1909 local_irq_save(flags); 2125 local_irq_save(flags);
2126
2127#else
2128redo:
2129#endif
2130
2131 /*
2132 * Determine the currently cpus per cpu slab.
2133 * The cpu may change afterward. However that does not matter since
2134 * data is retrieved via this pointer. If we are on the same cpu
2135 * during the cmpxchg then the free will succedd.
2136 */
1910 c = __this_cpu_ptr(s->cpu_slab); 2137 c = __this_cpu_ptr(s->cpu_slab);
1911 2138
1912 slab_free_hook_irq(s, x); 2139#ifdef CONFIG_CMPXCHG_LOCAL
2140 tid = c->tid;
2141 barrier();
2142#endif
1913 2143
1914 if (likely(page == c->page && c->node != NUMA_NO_NODE)) { 2144 if (likely(page == c->page && c->node != NUMA_NO_NODE)) {
1915 set_freepointer(s, object, c->freelist); 2145 set_freepointer(s, object, c->freelist);
2146
2147#ifdef CONFIG_CMPXCHG_LOCAL
2148 if (unlikely(!this_cpu_cmpxchg_double(
2149 s->cpu_slab->freelist, s->cpu_slab->tid,
2150 c->freelist, tid,
2151 object, next_tid(tid)))) {
2152
2153 note_cmpxchg_failure("slab_free", s, tid);
2154 goto redo;
2155 }
2156#else
1916 c->freelist = object; 2157 c->freelist = object;
2158#endif
1917 stat(s, FREE_FASTPATH); 2159 stat(s, FREE_FASTPATH);
1918 } else 2160 } else
1919 __slab_free(s, page, x, addr); 2161 __slab_free(s, page, x, addr);
1920 2162
2163#ifndef CONFIG_CMPXCHG_LOCAL
1921 local_irq_restore(flags); 2164 local_irq_restore(flags);
2165#endif
1922} 2166}
1923 2167
1924void kmem_cache_free(struct kmem_cache *s, void *x) 2168void kmem_cache_free(struct kmem_cache *s, void *x)
@@ -1988,13 +2232,13 @@ static int slub_nomerge;
1988 * the smallest order which will fit the object. 2232 * the smallest order which will fit the object.
1989 */ 2233 */
1990static inline int slab_order(int size, int min_objects, 2234static inline int slab_order(int size, int min_objects,
1991 int max_order, int fract_leftover) 2235 int max_order, int fract_leftover, int reserved)
1992{ 2236{
1993 int order; 2237 int order;
1994 int rem; 2238 int rem;
1995 int min_order = slub_min_order; 2239 int min_order = slub_min_order;
1996 2240
1997 if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE) 2241 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
1998 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 2242 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
1999 2243
2000 for (order = max(min_order, 2244 for (order = max(min_order,
@@ -2003,10 +2247,10 @@ static inline int slab_order(int size, int min_objects,
2003 2247
2004 unsigned long slab_size = PAGE_SIZE << order; 2248 unsigned long slab_size = PAGE_SIZE << order;
2005 2249
2006 if (slab_size < min_objects * size) 2250 if (slab_size < min_objects * size + reserved)
2007 continue; 2251 continue;
2008 2252
2009 rem = slab_size % size; 2253 rem = (slab_size - reserved) % size;
2010 2254
2011 if (rem <= slab_size / fract_leftover) 2255 if (rem <= slab_size / fract_leftover)
2012 break; 2256 break;
@@ -2016,7 +2260,7 @@ static inline int slab_order(int size, int min_objects,
2016 return order; 2260 return order;
2017} 2261}
2018 2262
2019static inline int calculate_order(int size) 2263static inline int calculate_order(int size, int reserved)
2020{ 2264{
2021 int order; 2265 int order;
2022 int min_objects; 2266 int min_objects;
@@ -2034,14 +2278,14 @@ static inline int calculate_order(int size)
2034 min_objects = slub_min_objects; 2278 min_objects = slub_min_objects;
2035 if (!min_objects) 2279 if (!min_objects)
2036 min_objects = 4 * (fls(nr_cpu_ids) + 1); 2280 min_objects = 4 * (fls(nr_cpu_ids) + 1);
2037 max_objects = (PAGE_SIZE << slub_max_order)/size; 2281 max_objects = order_objects(slub_max_order, size, reserved);
2038 min_objects = min(min_objects, max_objects); 2282 min_objects = min(min_objects, max_objects);
2039 2283
2040 while (min_objects > 1) { 2284 while (min_objects > 1) {
2041 fraction = 16; 2285 fraction = 16;
2042 while (fraction >= 4) { 2286 while (fraction >= 4) {
2043 order = slab_order(size, min_objects, 2287 order = slab_order(size, min_objects,
2044 slub_max_order, fraction); 2288 slub_max_order, fraction, reserved);
2045 if (order <= slub_max_order) 2289 if (order <= slub_max_order)
2046 return order; 2290 return order;
2047 fraction /= 2; 2291 fraction /= 2;
@@ -2053,14 +2297,14 @@ static inline int calculate_order(int size)
2053 * We were unable to place multiple objects in a slab. Now 2297 * We were unable to place multiple objects in a slab. Now
2054 * lets see if we can place a single object there. 2298 * lets see if we can place a single object there.
2055 */ 2299 */
2056 order = slab_order(size, 1, slub_max_order, 1); 2300 order = slab_order(size, 1, slub_max_order, 1, reserved);
2057 if (order <= slub_max_order) 2301 if (order <= slub_max_order)
2058 return order; 2302 return order;
2059 2303
2060 /* 2304 /*
2061 * Doh this slab cannot be placed using slub_max_order. 2305 * Doh this slab cannot be placed using slub_max_order.
2062 */ 2306 */
2063 order = slab_order(size, 1, MAX_ORDER, 1); 2307 order = slab_order(size, 1, MAX_ORDER, 1, reserved);
2064 if (order < MAX_ORDER) 2308 if (order < MAX_ORDER)
2065 return order; 2309 return order;
2066 return -ENOSYS; 2310 return -ENOSYS;
@@ -2110,9 +2354,23 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2110 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 2354 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2111 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); 2355 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
2112 2356
2357#ifdef CONFIG_CMPXCHG_LOCAL
2358 /*
2359 * Must align to double word boundary for the double cmpxchg instructions
2360 * to work.
2361 */
2362 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *));
2363#else
2364 /* Regular alignment is sufficient */
2113 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu); 2365 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
2366#endif
2367
2368 if (!s->cpu_slab)
2369 return 0;
2114 2370
2115 return s->cpu_slab != NULL; 2371 init_kmem_cache_cpus(s);
2372
2373 return 1;
2116} 2374}
2117 2375
2118static struct kmem_cache *kmem_cache_node; 2376static struct kmem_cache *kmem_cache_node;
@@ -2311,7 +2569,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2311 if (forced_order >= 0) 2569 if (forced_order >= 0)
2312 order = forced_order; 2570 order = forced_order;
2313 else 2571 else
2314 order = calculate_order(size); 2572 order = calculate_order(size, s->reserved);
2315 2573
2316 if (order < 0) 2574 if (order < 0)
2317 return 0; 2575 return 0;
@@ -2329,8 +2587,8 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2329 /* 2587 /*
2330 * Determine the number of objects per slab 2588 * Determine the number of objects per slab
2331 */ 2589 */
2332 s->oo = oo_make(order, size); 2590 s->oo = oo_make(order, size, s->reserved);
2333 s->min = oo_make(get_order(size), size); 2591 s->min = oo_make(get_order(size), size, s->reserved);
2334 if (oo_objects(s->oo) > oo_objects(s->max)) 2592 if (oo_objects(s->oo) > oo_objects(s->max))
2335 s->max = s->oo; 2593 s->max = s->oo;
2336 2594
@@ -2349,6 +2607,10 @@ static int kmem_cache_open(struct kmem_cache *s,
2349 s->objsize = size; 2607 s->objsize = size;
2350 s->align = align; 2608 s->align = align;
2351 s->flags = kmem_cache_flags(size, flags, name, ctor); 2609 s->flags = kmem_cache_flags(size, flags, name, ctor);
2610 s->reserved = 0;
2611
2612 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
2613 s->reserved = sizeof(struct rcu_head);
2352 2614
2353 if (!calculate_sizes(s, -1)) 2615 if (!calculate_sizes(s, -1))
2354 goto error; 2616 goto error;
@@ -2399,12 +2661,6 @@ unsigned int kmem_cache_size(struct kmem_cache *s)
2399} 2661}
2400EXPORT_SYMBOL(kmem_cache_size); 2662EXPORT_SYMBOL(kmem_cache_size);
2401 2663
2402const char *kmem_cache_name(struct kmem_cache *s)
2403{
2404 return s->name;
2405}
2406EXPORT_SYMBOL(kmem_cache_name);
2407
2408static void list_slab_objects(struct kmem_cache *s, struct page *page, 2664static void list_slab_objects(struct kmem_cache *s, struct page *page,
2409 const char *text) 2665 const char *text)
2410{ 2666{
@@ -2696,7 +2952,6 @@ EXPORT_SYMBOL(__kmalloc_node);
2696size_t ksize(const void *object) 2952size_t ksize(const void *object)
2697{ 2953{
2698 struct page *page; 2954 struct page *page;
2699 struct kmem_cache *s;
2700 2955
2701 if (unlikely(object == ZERO_SIZE_PTR)) 2956 if (unlikely(object == ZERO_SIZE_PTR))
2702 return 0; 2957 return 0;
@@ -2707,28 +2962,8 @@ size_t ksize(const void *object)
2707 WARN_ON(!PageCompound(page)); 2962 WARN_ON(!PageCompound(page));
2708 return PAGE_SIZE << compound_order(page); 2963 return PAGE_SIZE << compound_order(page);
2709 } 2964 }
2710 s = page->slab;
2711
2712#ifdef CONFIG_SLUB_DEBUG
2713 /*
2714 * Debugging requires use of the padding between object
2715 * and whatever may come after it.
2716 */
2717 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2718 return s->objsize;
2719 2965
2720#endif 2966 return slab_ksize(page->slab);
2721 /*
2722 * If we have the need to store the freelist pointer
2723 * back there or track user information then we can
2724 * only use the space before that information.
2725 */
2726 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2727 return s->inuse;
2728 /*
2729 * Else we can use all the padding etc for the allocation
2730 */
2731 return s->size;
2732} 2967}
2733EXPORT_SYMBOL(ksize); 2968EXPORT_SYMBOL(ksize);
2734 2969
@@ -4017,6 +4252,12 @@ static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4017} 4252}
4018SLAB_ATTR_RO(destroy_by_rcu); 4253SLAB_ATTR_RO(destroy_by_rcu);
4019 4254
4255static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4256{
4257 return sprintf(buf, "%d\n", s->reserved);
4258}
4259SLAB_ATTR_RO(reserved);
4260
4020#ifdef CONFIG_SLUB_DEBUG 4261#ifdef CONFIG_SLUB_DEBUG
4021static ssize_t slabs_show(struct kmem_cache *s, char *buf) 4262static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4022{ 4263{
@@ -4303,6 +4544,7 @@ static struct attribute *slab_attrs[] = {
4303 &reclaim_account_attr.attr, 4544 &reclaim_account_attr.attr,
4304 &destroy_by_rcu_attr.attr, 4545 &destroy_by_rcu_attr.attr,
4305 &shrink_attr.attr, 4546 &shrink_attr.attr,
4547 &reserved_attr.attr,
4306#ifdef CONFIG_SLUB_DEBUG 4548#ifdef CONFIG_SLUB_DEBUG
4307 &total_objects_attr.attr, 4549 &total_objects_attr.attr,
4308 &slabs_attr.attr, 4550 &slabs_attr.attr,
diff --git a/mm/swap.c b/mm/swap.c
index c02f93611a84..a448db377cb0 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -39,6 +39,7 @@ int page_cluster;
39 39
40static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); 40static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
41static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); 41static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
42static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
42 43
43/* 44/*
44 * This path almost never happens for VM activity - pages are normally 45 * This path almost never happens for VM activity - pages are normally
@@ -178,15 +179,13 @@ void put_pages_list(struct list_head *pages)
178} 179}
179EXPORT_SYMBOL(put_pages_list); 180EXPORT_SYMBOL(put_pages_list);
180 181
181/* 182static void pagevec_lru_move_fn(struct pagevec *pvec,
182 * pagevec_move_tail() must be called with IRQ disabled. 183 void (*move_fn)(struct page *page, void *arg),
183 * Otherwise this may cause nasty races. 184 void *arg)
184 */
185static void pagevec_move_tail(struct pagevec *pvec)
186{ 185{
187 int i; 186 int i;
188 int pgmoved = 0;
189 struct zone *zone = NULL; 187 struct zone *zone = NULL;
188 unsigned long flags = 0;
190 189
191 for (i = 0; i < pagevec_count(pvec); i++) { 190 for (i = 0; i < pagevec_count(pvec); i++) {
192 struct page *page = pvec->pages[i]; 191 struct page *page = pvec->pages[i];
@@ -194,29 +193,50 @@ static void pagevec_move_tail(struct pagevec *pvec)
194 193
195 if (pagezone != zone) { 194 if (pagezone != zone) {
196 if (zone) 195 if (zone)
197 spin_unlock(&zone->lru_lock); 196 spin_unlock_irqrestore(&zone->lru_lock, flags);
198 zone = pagezone; 197 zone = pagezone;
199 spin_lock(&zone->lru_lock); 198 spin_lock_irqsave(&zone->lru_lock, flags);
200 }
201 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
202 int lru = page_lru_base_type(page);
203 list_move_tail(&page->lru, &zone->lru[lru].list);
204 pgmoved++;
205 } 199 }
200
201 (*move_fn)(page, arg);
206 } 202 }
207 if (zone) 203 if (zone)
208 spin_unlock(&zone->lru_lock); 204 spin_unlock_irqrestore(&zone->lru_lock, flags);
209 __count_vm_events(PGROTATED, pgmoved);
210 release_pages(pvec->pages, pvec->nr, pvec->cold); 205 release_pages(pvec->pages, pvec->nr, pvec->cold);
211 pagevec_reinit(pvec); 206 pagevec_reinit(pvec);
212} 207}
213 208
209static void pagevec_move_tail_fn(struct page *page, void *arg)
210{
211 int *pgmoved = arg;
212 struct zone *zone = page_zone(page);
213
214 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
215 enum lru_list lru = page_lru_base_type(page);
216 list_move_tail(&page->lru, &zone->lru[lru].list);
217 mem_cgroup_rotate_reclaimable_page(page);
218 (*pgmoved)++;
219 }
220}
221
222/*
223 * pagevec_move_tail() must be called with IRQ disabled.
224 * Otherwise this may cause nasty races.
225 */
226static void pagevec_move_tail(struct pagevec *pvec)
227{
228 int pgmoved = 0;
229
230 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
231 __count_vm_events(PGROTATED, pgmoved);
232}
233
214/* 234/*
215 * Writeback is about to end against a page which has been marked for immediate 235 * Writeback is about to end against a page which has been marked for immediate
216 * reclaim. If it still appears to be reclaimable, move it to the tail of the 236 * reclaim. If it still appears to be reclaimable, move it to the tail of the
217 * inactive list. 237 * inactive list.
218 */ 238 */
219void rotate_reclaimable_page(struct page *page) 239void rotate_reclaimable_page(struct page *page)
220{ 240{
221 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && 241 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
222 !PageUnevictable(page) && PageLRU(page)) { 242 !PageUnevictable(page) && PageLRU(page)) {
@@ -347,6 +367,71 @@ void add_page_to_unevictable_list(struct page *page)
347} 367}
348 368
349/* 369/*
370 * If the page can not be invalidated, it is moved to the
371 * inactive list to speed up its reclaim. It is moved to the
372 * head of the list, rather than the tail, to give the flusher
373 * threads some time to write it out, as this is much more
374 * effective than the single-page writeout from reclaim.
375 *
376 * If the page isn't page_mapped and dirty/writeback, the page
377 * could reclaim asap using PG_reclaim.
378 *
379 * 1. active, mapped page -> none
380 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
381 * 3. inactive, mapped page -> none
382 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
383 * 5. inactive, clean -> inactive, tail
384 * 6. Others -> none
385 *
386 * In 4, why it moves inactive's head, the VM expects the page would
387 * be write it out by flusher threads as this is much more effective
388 * than the single-page writeout from reclaim.
389 */
390static void lru_deactivate_fn(struct page *page, void *arg)
391{
392 int lru, file;
393 bool active;
394 struct zone *zone = page_zone(page);
395
396 if (!PageLRU(page))
397 return;
398
399 /* Some processes are using the page */
400 if (page_mapped(page))
401 return;
402
403 active = PageActive(page);
404
405 file = page_is_file_cache(page);
406 lru = page_lru_base_type(page);
407 del_page_from_lru_list(zone, page, lru + active);
408 ClearPageActive(page);
409 ClearPageReferenced(page);
410 add_page_to_lru_list(zone, page, lru);
411
412 if (PageWriteback(page) || PageDirty(page)) {
413 /*
414 * PG_reclaim could be raced with end_page_writeback
415 * It can make readahead confusing. But race window
416 * is _really_ small and it's non-critical problem.
417 */
418 SetPageReclaim(page);
419 } else {
420 /*
421 * The page's writeback ends up during pagevec
422 * We moves tha page into tail of inactive.
423 */
424 list_move_tail(&page->lru, &zone->lru[lru].list);
425 mem_cgroup_rotate_reclaimable_page(page);
426 __count_vm_event(PGROTATED);
427 }
428
429 if (active)
430 __count_vm_event(PGDEACTIVATE);
431 update_page_reclaim_stat(zone, page, file, 0);
432}
433
434/*
350 * Drain pages out of the cpu's pagevecs. 435 * Drain pages out of the cpu's pagevecs.
351 * Either "cpu" is the current CPU, and preemption has already been 436 * Either "cpu" is the current CPU, and preemption has already been
352 * disabled; or "cpu" is being hot-unplugged, and is already dead. 437 * disabled; or "cpu" is being hot-unplugged, and is already dead.
@@ -372,6 +457,29 @@ static void drain_cpu_pagevecs(int cpu)
372 pagevec_move_tail(pvec); 457 pagevec_move_tail(pvec);
373 local_irq_restore(flags); 458 local_irq_restore(flags);
374 } 459 }
460
461 pvec = &per_cpu(lru_deactivate_pvecs, cpu);
462 if (pagevec_count(pvec))
463 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
464}
465
466/**
467 * deactivate_page - forcefully deactivate a page
468 * @page: page to deactivate
469 *
470 * This function hints the VM that @page is a good reclaim candidate,
471 * for example if its invalidation fails due to the page being dirty
472 * or under writeback.
473 */
474void deactivate_page(struct page *page)
475{
476 if (likely(get_page_unless_zero(page))) {
477 struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
478
479 if (!pagevec_add(pvec, page))
480 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
481 put_cpu_var(lru_deactivate_pvecs);
482 }
375} 483}
376 484
377void lru_add_drain(void) 485void lru_add_drain(void)
@@ -516,44 +624,33 @@ void lru_add_page_tail(struct zone* zone,
516 } 624 }
517} 625}
518 626
627static void ____pagevec_lru_add_fn(struct page *page, void *arg)
628{
629 enum lru_list lru = (enum lru_list)arg;
630 struct zone *zone = page_zone(page);
631 int file = is_file_lru(lru);
632 int active = is_active_lru(lru);
633
634 VM_BUG_ON(PageActive(page));
635 VM_BUG_ON(PageUnevictable(page));
636 VM_BUG_ON(PageLRU(page));
637
638 SetPageLRU(page);
639 if (active)
640 SetPageActive(page);
641 update_page_reclaim_stat(zone, page, file, active);
642 add_page_to_lru_list(zone, page, lru);
643}
644
519/* 645/*
520 * Add the passed pages to the LRU, then drop the caller's refcount 646 * Add the passed pages to the LRU, then drop the caller's refcount
521 * on them. Reinitialises the caller's pagevec. 647 * on them. Reinitialises the caller's pagevec.
522 */ 648 */
523void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) 649void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
524{ 650{
525 int i;
526 struct zone *zone = NULL;
527
528 VM_BUG_ON(is_unevictable_lru(lru)); 651 VM_BUG_ON(is_unevictable_lru(lru));
529 652
530 for (i = 0; i < pagevec_count(pvec); i++) { 653 pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru);
531 struct page *page = pvec->pages[i];
532 struct zone *pagezone = page_zone(page);
533 int file;
534 int active;
535
536 if (pagezone != zone) {
537 if (zone)
538 spin_unlock_irq(&zone->lru_lock);
539 zone = pagezone;
540 spin_lock_irq(&zone->lru_lock);
541 }
542 VM_BUG_ON(PageActive(page));
543 VM_BUG_ON(PageUnevictable(page));
544 VM_BUG_ON(PageLRU(page));
545 SetPageLRU(page);
546 active = is_active_lru(lru);
547 file = is_file_lru(lru);
548 if (active)
549 SetPageActive(page);
550 update_page_reclaim_stat(zone, page, file, active);
551 add_page_to_lru_list(zone, page, lru);
552 }
553 if (zone)
554 spin_unlock_irq(&zone->lru_lock);
555 release_pages(pvec->pages, pvec->nr, pvec->cold);
556 pagevec_reinit(pvec);
557} 654}
558 655
559EXPORT_SYMBOL(____pagevec_lru_add); 656EXPORT_SYMBOL(____pagevec_lru_add);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 0341c5700e34..71b42ec55b78 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -212,8 +212,8 @@ static int wait_for_discard(void *word)
212#define SWAPFILE_CLUSTER 256 212#define SWAPFILE_CLUSTER 256
213#define LATENCY_LIMIT 256 213#define LATENCY_LIMIT 256
214 214
215static inline unsigned long scan_swap_map(struct swap_info_struct *si, 215static unsigned long scan_swap_map(struct swap_info_struct *si,
216 unsigned char usage) 216 unsigned char usage)
217{ 217{
218 unsigned long offset; 218 unsigned long offset;
219 unsigned long scan_base; 219 unsigned long scan_base;
@@ -1550,6 +1550,36 @@ bad_bmap:
1550 goto out; 1550 goto out;
1551} 1551}
1552 1552
1553static void enable_swap_info(struct swap_info_struct *p, int prio,
1554 unsigned char *swap_map)
1555{
1556 int i, prev;
1557
1558 spin_lock(&swap_lock);
1559 if (prio >= 0)
1560 p->prio = prio;
1561 else
1562 p->prio = --least_priority;
1563 p->swap_map = swap_map;
1564 p->flags |= SWP_WRITEOK;
1565 nr_swap_pages += p->pages;
1566 total_swap_pages += p->pages;
1567
1568 /* insert swap space into swap_list: */
1569 prev = -1;
1570 for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
1571 if (p->prio >= swap_info[i]->prio)
1572 break;
1573 prev = i;
1574 }
1575 p->next = i;
1576 if (prev < 0)
1577 swap_list.head = swap_list.next = p->type;
1578 else
1579 swap_info[prev]->next = p->type;
1580 spin_unlock(&swap_lock);
1581}
1582
1553SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) 1583SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1554{ 1584{
1555 struct swap_info_struct *p = NULL; 1585 struct swap_info_struct *p = NULL;
@@ -1621,25 +1651,14 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1621 current->flags &= ~PF_OOM_ORIGIN; 1651 current->flags &= ~PF_OOM_ORIGIN;
1622 1652
1623 if (err) { 1653 if (err) {
1654 /*
1655 * reading p->prio and p->swap_map outside the lock is
1656 * safe here because only sys_swapon and sys_swapoff
1657 * change them, and there can be no other sys_swapon or
1658 * sys_swapoff for this swap_info_struct at this point.
1659 */
1624 /* re-insert swap space back into swap_list */ 1660 /* re-insert swap space back into swap_list */
1625 spin_lock(&swap_lock); 1661 enable_swap_info(p, p->prio, p->swap_map);
1626 if (p->prio < 0)
1627 p->prio = --least_priority;
1628 prev = -1;
1629 for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
1630 if (p->prio >= swap_info[i]->prio)
1631 break;
1632 prev = i;
1633 }
1634 p->next = i;
1635 if (prev < 0)
1636 swap_list.head = swap_list.next = type;
1637 else
1638 swap_info[prev]->next = type;
1639 nr_swap_pages += p->pages;
1640 total_swap_pages += p->pages;
1641 p->flags |= SWP_WRITEOK;
1642 spin_unlock(&swap_lock);
1643 goto out_dput; 1662 goto out_dput;
1644 } 1663 }
1645 1664
@@ -1844,49 +1863,24 @@ static int __init max_swapfiles_check(void)
1844late_initcall(max_swapfiles_check); 1863late_initcall(max_swapfiles_check);
1845#endif 1864#endif
1846 1865
1847/* 1866static struct swap_info_struct *alloc_swap_info(void)
1848 * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
1849 *
1850 * The swapon system call
1851 */
1852SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1853{ 1867{
1854 struct swap_info_struct *p; 1868 struct swap_info_struct *p;
1855 char *name = NULL;
1856 struct block_device *bdev = NULL;
1857 struct file *swap_file = NULL;
1858 struct address_space *mapping;
1859 unsigned int type; 1869 unsigned int type;
1860 int i, prev;
1861 int error;
1862 union swap_header *swap_header;
1863 unsigned int nr_good_pages;
1864 int nr_extents = 0;
1865 sector_t span;
1866 unsigned long maxpages;
1867 unsigned long swapfilepages;
1868 unsigned char *swap_map = NULL;
1869 struct page *page = NULL;
1870 struct inode *inode = NULL;
1871 int did_down = 0;
1872
1873 if (!capable(CAP_SYS_ADMIN))
1874 return -EPERM;
1875 1870
1876 p = kzalloc(sizeof(*p), GFP_KERNEL); 1871 p = kzalloc(sizeof(*p), GFP_KERNEL);
1877 if (!p) 1872 if (!p)
1878 return -ENOMEM; 1873 return ERR_PTR(-ENOMEM);
1879 1874
1880 spin_lock(&swap_lock); 1875 spin_lock(&swap_lock);
1881 for (type = 0; type < nr_swapfiles; type++) { 1876 for (type = 0; type < nr_swapfiles; type++) {
1882 if (!(swap_info[type]->flags & SWP_USED)) 1877 if (!(swap_info[type]->flags & SWP_USED))
1883 break; 1878 break;
1884 } 1879 }
1885 error = -EPERM;
1886 if (type >= MAX_SWAPFILES) { 1880 if (type >= MAX_SWAPFILES) {
1887 spin_unlock(&swap_lock); 1881 spin_unlock(&swap_lock);
1888 kfree(p); 1882 kfree(p);
1889 goto out; 1883 return ERR_PTR(-EPERM);
1890 } 1884 }
1891 if (type >= nr_swapfiles) { 1885 if (type >= nr_swapfiles) {
1892 p->type = type; 1886 p->type = type;
@@ -1911,81 +1905,49 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1911 p->next = -1; 1905 p->next = -1;
1912 spin_unlock(&swap_lock); 1906 spin_unlock(&swap_lock);
1913 1907
1914 name = getname(specialfile); 1908 return p;
1915 error = PTR_ERR(name); 1909}
1916 if (IS_ERR(name)) {
1917 name = NULL;
1918 goto bad_swap_2;
1919 }
1920 swap_file = filp_open(name, O_RDWR|O_LARGEFILE, 0);
1921 error = PTR_ERR(swap_file);
1922 if (IS_ERR(swap_file)) {
1923 swap_file = NULL;
1924 goto bad_swap_2;
1925 }
1926
1927 p->swap_file = swap_file;
1928 mapping = swap_file->f_mapping;
1929 inode = mapping->host;
1930
1931 error = -EBUSY;
1932 for (i = 0; i < nr_swapfiles; i++) {
1933 struct swap_info_struct *q = swap_info[i];
1934 1910
1935 if (i == type || !q->swap_file) 1911static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
1936 continue; 1912{
1937 if (mapping == q->swap_file->f_mapping) 1913 int error;
1938 goto bad_swap;
1939 }
1940 1914
1941 error = -EINVAL;
1942 if (S_ISBLK(inode->i_mode)) { 1915 if (S_ISBLK(inode->i_mode)) {
1943 bdev = bdgrab(I_BDEV(inode)); 1916 p->bdev = bdgrab(I_BDEV(inode));
1944 error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, 1917 error = blkdev_get(p->bdev,
1918 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1945 sys_swapon); 1919 sys_swapon);
1946 if (error < 0) { 1920 if (error < 0) {
1947 bdev = NULL; 1921 p->bdev = NULL;
1948 error = -EINVAL; 1922 return -EINVAL;
1949 goto bad_swap;
1950 } 1923 }
1951 p->old_block_size = block_size(bdev); 1924 p->old_block_size = block_size(p->bdev);
1952 error = set_blocksize(bdev, PAGE_SIZE); 1925 error = set_blocksize(p->bdev, PAGE_SIZE);
1953 if (error < 0) 1926 if (error < 0)
1954 goto bad_swap; 1927 return error;
1955 p->bdev = bdev;
1956 p->flags |= SWP_BLKDEV; 1928 p->flags |= SWP_BLKDEV;
1957 } else if (S_ISREG(inode->i_mode)) { 1929 } else if (S_ISREG(inode->i_mode)) {
1958 p->bdev = inode->i_sb->s_bdev; 1930 p->bdev = inode->i_sb->s_bdev;
1959 mutex_lock(&inode->i_mutex); 1931 mutex_lock(&inode->i_mutex);
1960 did_down = 1; 1932 if (IS_SWAPFILE(inode))
1961 if (IS_SWAPFILE(inode)) { 1933 return -EBUSY;
1962 error = -EBUSY; 1934 } else
1963 goto bad_swap; 1935 return -EINVAL;
1964 }
1965 } else {
1966 goto bad_swap;
1967 }
1968 1936
1969 swapfilepages = i_size_read(inode) >> PAGE_SHIFT; 1937 return 0;
1938}
1970 1939
1971 /* 1940static unsigned long read_swap_header(struct swap_info_struct *p,
1972 * Read the swap header. 1941 union swap_header *swap_header,
1973 */ 1942 struct inode *inode)
1974 if (!mapping->a_ops->readpage) { 1943{
1975 error = -EINVAL; 1944 int i;
1976 goto bad_swap; 1945 unsigned long maxpages;
1977 } 1946 unsigned long swapfilepages;
1978 page = read_mapping_page(mapping, 0, swap_file);
1979 if (IS_ERR(page)) {
1980 error = PTR_ERR(page);
1981 goto bad_swap;
1982 }
1983 swap_header = kmap(page);
1984 1947
1985 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) { 1948 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
1986 printk(KERN_ERR "Unable to find swap-space signature\n"); 1949 printk(KERN_ERR "Unable to find swap-space signature\n");
1987 error = -EINVAL; 1950 return 0;
1988 goto bad_swap;
1989 } 1951 }
1990 1952
1991 /* swap partition endianess hack... */ 1953 /* swap partition endianess hack... */
@@ -2001,8 +1963,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2001 printk(KERN_WARNING 1963 printk(KERN_WARNING
2002 "Unable to handle swap header version %d\n", 1964 "Unable to handle swap header version %d\n",
2003 swap_header->info.version); 1965 swap_header->info.version);
2004 error = -EINVAL; 1966 return 0;
2005 goto bad_swap;
2006 } 1967 }
2007 1968
2008 p->lowest_bit = 1; 1969 p->lowest_bit = 1;
@@ -2033,61 +1994,155 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2033 } 1994 }
2034 p->highest_bit = maxpages - 1; 1995 p->highest_bit = maxpages - 1;
2035 1996
2036 error = -EINVAL;
2037 if (!maxpages) 1997 if (!maxpages)
2038 goto bad_swap; 1998 return 0;
1999 swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
2039 if (swapfilepages && maxpages > swapfilepages) { 2000 if (swapfilepages && maxpages > swapfilepages) {
2040 printk(KERN_WARNING 2001 printk(KERN_WARNING
2041 "Swap area shorter than signature indicates\n"); 2002 "Swap area shorter than signature indicates\n");
2042 goto bad_swap; 2003 return 0;
2043 } 2004 }
2044 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) 2005 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
2045 goto bad_swap; 2006 return 0;
2046 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 2007 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2047 goto bad_swap; 2008 return 0;
2048 2009
2049 /* OK, set up the swap map and apply the bad block list */ 2010 return maxpages;
2050 swap_map = vmalloc(maxpages); 2011}
2051 if (!swap_map) { 2012
2052 error = -ENOMEM; 2013static int setup_swap_map_and_extents(struct swap_info_struct *p,
2053 goto bad_swap; 2014 union swap_header *swap_header,
2054 } 2015 unsigned char *swap_map,
2016 unsigned long maxpages,
2017 sector_t *span)
2018{
2019 int i;
2020 unsigned int nr_good_pages;
2021 int nr_extents;
2055 2022
2056 memset(swap_map, 0, maxpages);
2057 nr_good_pages = maxpages - 1; /* omit header page */ 2023 nr_good_pages = maxpages - 1; /* omit header page */
2058 2024
2059 for (i = 0; i < swap_header->info.nr_badpages; i++) { 2025 for (i = 0; i < swap_header->info.nr_badpages; i++) {
2060 unsigned int page_nr = swap_header->info.badpages[i]; 2026 unsigned int page_nr = swap_header->info.badpages[i];
2061 if (page_nr == 0 || page_nr > swap_header->info.last_page) { 2027 if (page_nr == 0 || page_nr > swap_header->info.last_page)
2062 error = -EINVAL; 2028 return -EINVAL;
2063 goto bad_swap;
2064 }
2065 if (page_nr < maxpages) { 2029 if (page_nr < maxpages) {
2066 swap_map[page_nr] = SWAP_MAP_BAD; 2030 swap_map[page_nr] = SWAP_MAP_BAD;
2067 nr_good_pages--; 2031 nr_good_pages--;
2068 } 2032 }
2069 } 2033 }
2070 2034
2071 error = swap_cgroup_swapon(type, maxpages);
2072 if (error)
2073 goto bad_swap;
2074
2075 if (nr_good_pages) { 2035 if (nr_good_pages) {
2076 swap_map[0] = SWAP_MAP_BAD; 2036 swap_map[0] = SWAP_MAP_BAD;
2077 p->max = maxpages; 2037 p->max = maxpages;
2078 p->pages = nr_good_pages; 2038 p->pages = nr_good_pages;
2079 nr_extents = setup_swap_extents(p, &span); 2039 nr_extents = setup_swap_extents(p, span);
2080 if (nr_extents < 0) { 2040 if (nr_extents < 0)
2081 error = nr_extents; 2041 return nr_extents;
2082 goto bad_swap;
2083 }
2084 nr_good_pages = p->pages; 2042 nr_good_pages = p->pages;
2085 } 2043 }
2086 if (!nr_good_pages) { 2044 if (!nr_good_pages) {
2087 printk(KERN_WARNING "Empty swap-file\n"); 2045 printk(KERN_WARNING "Empty swap-file\n");
2046 return -EINVAL;
2047 }
2048
2049 return nr_extents;
2050}
2051
2052SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2053{
2054 struct swap_info_struct *p;
2055 char *name;
2056 struct file *swap_file = NULL;
2057 struct address_space *mapping;
2058 int i;
2059 int prio;
2060 int error;
2061 union swap_header *swap_header;
2062 int nr_extents;
2063 sector_t span;
2064 unsigned long maxpages;
2065 unsigned char *swap_map = NULL;
2066 struct page *page = NULL;
2067 struct inode *inode = NULL;
2068
2069 if (!capable(CAP_SYS_ADMIN))
2070 return -EPERM;
2071
2072 p = alloc_swap_info();
2073 if (IS_ERR(p))
2074 return PTR_ERR(p);
2075
2076 name = getname(specialfile);
2077 if (IS_ERR(name)) {
2078 error = PTR_ERR(name);
2079 name = NULL;
2080 goto bad_swap;
2081 }
2082 swap_file = filp_open(name, O_RDWR|O_LARGEFILE, 0);
2083 if (IS_ERR(swap_file)) {
2084 error = PTR_ERR(swap_file);
2085 swap_file = NULL;
2086 goto bad_swap;
2087 }
2088
2089 p->swap_file = swap_file;
2090 mapping = swap_file->f_mapping;
2091
2092 for (i = 0; i < nr_swapfiles; i++) {
2093 struct swap_info_struct *q = swap_info[i];
2094
2095 if (q == p || !q->swap_file)
2096 continue;
2097 if (mapping == q->swap_file->f_mapping) {
2098 error = -EBUSY;
2099 goto bad_swap;
2100 }
2101 }
2102
2103 inode = mapping->host;
2104 /* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */
2105 error = claim_swapfile(p, inode);
2106 if (unlikely(error))
2107 goto bad_swap;
2108
2109 /*
2110 * Read the swap header.
2111 */
2112 if (!mapping->a_ops->readpage) {
2088 error = -EINVAL; 2113 error = -EINVAL;
2089 goto bad_swap; 2114 goto bad_swap;
2090 } 2115 }
2116 page = read_mapping_page(mapping, 0, swap_file);
2117 if (IS_ERR(page)) {
2118 error = PTR_ERR(page);
2119 goto bad_swap;
2120 }
2121 swap_header = kmap(page);
2122
2123 maxpages = read_swap_header(p, swap_header, inode);
2124 if (unlikely(!maxpages)) {
2125 error = -EINVAL;
2126 goto bad_swap;
2127 }
2128
2129 /* OK, set up the swap map and apply the bad block list */
2130 swap_map = vzalloc(maxpages);
2131 if (!swap_map) {
2132 error = -ENOMEM;
2133 goto bad_swap;
2134 }
2135
2136 error = swap_cgroup_swapon(p->type, maxpages);
2137 if (error)
2138 goto bad_swap;
2139
2140 nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
2141 maxpages, &span);
2142 if (unlikely(nr_extents < 0)) {
2143 error = nr_extents;
2144 goto bad_swap;
2145 }
2091 2146
2092 if (p->bdev) { 2147 if (p->bdev) {
2093 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { 2148 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
@@ -2099,58 +2154,46 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2099 } 2154 }
2100 2155
2101 mutex_lock(&swapon_mutex); 2156 mutex_lock(&swapon_mutex);
2102 spin_lock(&swap_lock); 2157 prio = -1;
2103 if (swap_flags & SWAP_FLAG_PREFER) 2158 if (swap_flags & SWAP_FLAG_PREFER)
2104 p->prio = 2159 prio =
2105 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; 2160 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
2106 else 2161 enable_swap_info(p, prio, swap_map);
2107 p->prio = --least_priority;
2108 p->swap_map = swap_map;
2109 p->flags |= SWP_WRITEOK;
2110 nr_swap_pages += nr_good_pages;
2111 total_swap_pages += nr_good_pages;
2112 2162
2113 printk(KERN_INFO "Adding %uk swap on %s. " 2163 printk(KERN_INFO "Adding %uk swap on %s. "
2114 "Priority:%d extents:%d across:%lluk %s%s\n", 2164 "Priority:%d extents:%d across:%lluk %s%s\n",
2115 nr_good_pages<<(PAGE_SHIFT-10), name, p->prio, 2165 p->pages<<(PAGE_SHIFT-10), name, p->prio,
2116 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), 2166 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
2117 (p->flags & SWP_SOLIDSTATE) ? "SS" : "", 2167 (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
2118 (p->flags & SWP_DISCARDABLE) ? "D" : ""); 2168 (p->flags & SWP_DISCARDABLE) ? "D" : "");
2119 2169
2120 /* insert swap space into swap_list: */
2121 prev = -1;
2122 for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
2123 if (p->prio >= swap_info[i]->prio)
2124 break;
2125 prev = i;
2126 }
2127 p->next = i;
2128 if (prev < 0)
2129 swap_list.head = swap_list.next = type;
2130 else
2131 swap_info[prev]->next = type;
2132 spin_unlock(&swap_lock);
2133 mutex_unlock(&swapon_mutex); 2170 mutex_unlock(&swapon_mutex);
2134 atomic_inc(&proc_poll_event); 2171 atomic_inc(&proc_poll_event);
2135 wake_up_interruptible(&proc_poll_wait); 2172 wake_up_interruptible(&proc_poll_wait);
2136 2173
2174 if (S_ISREG(inode->i_mode))
2175 inode->i_flags |= S_SWAPFILE;
2137 error = 0; 2176 error = 0;
2138 goto out; 2177 goto out;
2139bad_swap: 2178bad_swap:
2140 if (bdev) { 2179 if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
2141 set_blocksize(bdev, p->old_block_size); 2180 set_blocksize(p->bdev, p->old_block_size);
2142 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2181 blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2143 } 2182 }
2144 destroy_swap_extents(p); 2183 destroy_swap_extents(p);
2145 swap_cgroup_swapoff(type); 2184 swap_cgroup_swapoff(p->type);
2146bad_swap_2:
2147 spin_lock(&swap_lock); 2185 spin_lock(&swap_lock);
2148 p->swap_file = NULL; 2186 p->swap_file = NULL;
2149 p->flags = 0; 2187 p->flags = 0;
2150 spin_unlock(&swap_lock); 2188 spin_unlock(&swap_lock);
2151 vfree(swap_map); 2189 vfree(swap_map);
2152 if (swap_file) 2190 if (swap_file) {
2191 if (inode && S_ISREG(inode->i_mode)) {
2192 mutex_unlock(&inode->i_mutex);
2193 inode = NULL;
2194 }
2153 filp_close(swap_file, NULL); 2195 filp_close(swap_file, NULL);
2196 }
2154out: 2197out:
2155 if (page && !IS_ERR(page)) { 2198 if (page && !IS_ERR(page)) {
2156 kunmap(page); 2199 kunmap(page);
@@ -2158,11 +2201,8 @@ out:
2158 } 2201 }
2159 if (name) 2202 if (name)
2160 putname(name); 2203 putname(name);
2161 if (did_down) { 2204 if (inode && S_ISREG(inode->i_mode))
2162 if (!error)
2163 inode->i_flags |= S_SWAPFILE;
2164 mutex_unlock(&inode->i_mutex); 2205 mutex_unlock(&inode->i_mutex);
2165 }
2166 return error; 2206 return error;
2167} 2207}
2168 2208
diff --git a/mm/truncate.c b/mm/truncate.c
index d64296be00d3..a95667529135 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -106,9 +106,8 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
106 cancel_dirty_page(page, PAGE_CACHE_SIZE); 106 cancel_dirty_page(page, PAGE_CACHE_SIZE);
107 107
108 clear_page_mlock(page); 108 clear_page_mlock(page);
109 remove_from_page_cache(page);
110 ClearPageMappedToDisk(page); 109 ClearPageMappedToDisk(page);
111 page_cache_release(page); /* pagecache ref */ 110 delete_from_page_cache(page);
112 return 0; 111 return 0;
113} 112}
114 113
@@ -322,11 +321,12 @@ EXPORT_SYMBOL(truncate_inode_pages);
322 * pagetables. 321 * pagetables.
323 */ 322 */
324unsigned long invalidate_mapping_pages(struct address_space *mapping, 323unsigned long invalidate_mapping_pages(struct address_space *mapping,
325 pgoff_t start, pgoff_t end) 324 pgoff_t start, pgoff_t end)
326{ 325{
327 struct pagevec pvec; 326 struct pagevec pvec;
328 pgoff_t next = start; 327 pgoff_t next = start;
329 unsigned long ret = 0; 328 unsigned long ret;
329 unsigned long count = 0;
330 int i; 330 int i;
331 331
332 pagevec_init(&pvec, 0); 332 pagevec_init(&pvec, 0);
@@ -353,9 +353,15 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
353 if (lock_failed) 353 if (lock_failed)
354 continue; 354 continue;
355 355
356 ret += invalidate_inode_page(page); 356 ret = invalidate_inode_page(page);
357
358 unlock_page(page); 357 unlock_page(page);
358 /*
359 * Invalidation is a hint that the page is no longer
360 * of interest and try to speed up its reclaim.
361 */
362 if (!ret)
363 deactivate_page(page);
364 count += ret;
359 if (next > end) 365 if (next > end)
360 break; 366 break;
361 } 367 }
@@ -363,7 +369,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
363 mem_cgroup_uncharge_end(); 369 mem_cgroup_uncharge_end();
364 cond_resched(); 370 cond_resched();
365 } 371 }
366 return ret; 372 return count;
367} 373}
368EXPORT_SYMBOL(invalidate_mapping_pages); 374EXPORT_SYMBOL(invalidate_mapping_pages);
369 375
@@ -389,7 +395,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
389 395
390 clear_page_mlock(page); 396 clear_page_mlock(page);
391 BUG_ON(page_has_private(page)); 397 BUG_ON(page_has_private(page));
392 __remove_from_page_cache(page); 398 __delete_from_page_cache(page);
393 spin_unlock_irq(&mapping->tree_lock); 399 spin_unlock_irq(&mapping->tree_lock);
394 mem_cgroup_uncharge_cache_page(page); 400 mem_cgroup_uncharge_cache_page(page);
395 401
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f9b166732e70..5d6030235d7a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -261,8 +261,15 @@ struct vmap_area {
261}; 261};
262 262
263static DEFINE_SPINLOCK(vmap_area_lock); 263static DEFINE_SPINLOCK(vmap_area_lock);
264static struct rb_root vmap_area_root = RB_ROOT;
265static LIST_HEAD(vmap_area_list); 264static LIST_HEAD(vmap_area_list);
265static struct rb_root vmap_area_root = RB_ROOT;
266
267/* The vmap cache globals are protected by vmap_area_lock */
268static struct rb_node *free_vmap_cache;
269static unsigned long cached_hole_size;
270static unsigned long cached_vstart;
271static unsigned long cached_align;
272
266static unsigned long vmap_area_pcpu_hole; 273static unsigned long vmap_area_pcpu_hole;
267 274
268static struct vmap_area *__find_vmap_area(unsigned long addr) 275static struct vmap_area *__find_vmap_area(unsigned long addr)
@@ -331,9 +338,11 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
331 struct rb_node *n; 338 struct rb_node *n;
332 unsigned long addr; 339 unsigned long addr;
333 int purged = 0; 340 int purged = 0;
341 struct vmap_area *first;
334 342
335 BUG_ON(!size); 343 BUG_ON(!size);
336 BUG_ON(size & ~PAGE_MASK); 344 BUG_ON(size & ~PAGE_MASK);
345 BUG_ON(!is_power_of_2(align));
337 346
338 va = kmalloc_node(sizeof(struct vmap_area), 347 va = kmalloc_node(sizeof(struct vmap_area),
339 gfp_mask & GFP_RECLAIM_MASK, node); 348 gfp_mask & GFP_RECLAIM_MASK, node);
@@ -341,79 +350,106 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
341 return ERR_PTR(-ENOMEM); 350 return ERR_PTR(-ENOMEM);
342 351
343retry: 352retry:
344 addr = ALIGN(vstart, align);
345
346 spin_lock(&vmap_area_lock); 353 spin_lock(&vmap_area_lock);
347 if (addr + size - 1 < addr) 354 /*
348 goto overflow; 355 * Invalidate cache if we have more permissive parameters.
356 * cached_hole_size notes the largest hole noticed _below_
357 * the vmap_area cached in free_vmap_cache: if size fits
358 * into that hole, we want to scan from vstart to reuse
359 * the hole instead of allocating above free_vmap_cache.
360 * Note that __free_vmap_area may update free_vmap_cache
361 * without updating cached_hole_size or cached_align.
362 */
363 if (!free_vmap_cache ||
364 size < cached_hole_size ||
365 vstart < cached_vstart ||
366 align < cached_align) {
367nocache:
368 cached_hole_size = 0;
369 free_vmap_cache = NULL;
370 }
371 /* record if we encounter less permissive parameters */
372 cached_vstart = vstart;
373 cached_align = align;
374
375 /* find starting point for our search */
376 if (free_vmap_cache) {
377 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
378 addr = ALIGN(first->va_end + PAGE_SIZE, align);
379 if (addr < vstart)
380 goto nocache;
381 if (addr + size - 1 < addr)
382 goto overflow;
383
384 } else {
385 addr = ALIGN(vstart, align);
386 if (addr + size - 1 < addr)
387 goto overflow;
349 388
350 /* XXX: could have a last_hole cache */ 389 n = vmap_area_root.rb_node;
351 n = vmap_area_root.rb_node; 390 first = NULL;
352 if (n) {
353 struct vmap_area *first = NULL;
354 391
355 do { 392 while (n) {
356 struct vmap_area *tmp; 393 struct vmap_area *tmp;
357 tmp = rb_entry(n, struct vmap_area, rb_node); 394 tmp = rb_entry(n, struct vmap_area, rb_node);
358 if (tmp->va_end >= addr) { 395 if (tmp->va_end >= addr) {
359 if (!first && tmp->va_start < addr + size)
360 first = tmp;
361 n = n->rb_left;
362 } else {
363 first = tmp; 396 first = tmp;
397 if (tmp->va_start <= addr)
398 break;
399 n = n->rb_left;
400 } else
364 n = n->rb_right; 401 n = n->rb_right;
365 } 402 }
366 } while (n);
367 403
368 if (!first) 404 if (!first)
369 goto found; 405 goto found;
370
371 if (first->va_end < addr) {
372 n = rb_next(&first->rb_node);
373 if (n)
374 first = rb_entry(n, struct vmap_area, rb_node);
375 else
376 goto found;
377 }
378
379 while (addr + size > first->va_start && addr + size <= vend) {
380 addr = ALIGN(first->va_end + PAGE_SIZE, align);
381 if (addr + size - 1 < addr)
382 goto overflow;
383
384 n = rb_next(&first->rb_node);
385 if (n)
386 first = rb_entry(n, struct vmap_area, rb_node);
387 else
388 goto found;
389 }
390 } 406 }
391found: 407
392 if (addr + size > vend) { 408 /* from the starting point, walk areas until a suitable hole is found */
393overflow: 409 while (addr + size >= first->va_start && addr + size <= vend) {
394 spin_unlock(&vmap_area_lock); 410 if (addr + cached_hole_size < first->va_start)
395 if (!purged) { 411 cached_hole_size = first->va_start - addr;
396 purge_vmap_area_lazy(); 412 addr = ALIGN(first->va_end + PAGE_SIZE, align);
397 purged = 1; 413 if (addr + size - 1 < addr)
398 goto retry; 414 goto overflow;
399 } 415
400 if (printk_ratelimit()) 416 n = rb_next(&first->rb_node);
401 printk(KERN_WARNING 417 if (n)
402 "vmap allocation for size %lu failed: " 418 first = rb_entry(n, struct vmap_area, rb_node);
403 "use vmalloc=<size> to increase size.\n", size); 419 else
404 kfree(va); 420 goto found;
405 return ERR_PTR(-EBUSY);
406 } 421 }
407 422
408 BUG_ON(addr & (align-1)); 423found:
424 if (addr + size > vend)
425 goto overflow;
409 426
410 va->va_start = addr; 427 va->va_start = addr;
411 va->va_end = addr + size; 428 va->va_end = addr + size;
412 va->flags = 0; 429 va->flags = 0;
413 __insert_vmap_area(va); 430 __insert_vmap_area(va);
431 free_vmap_cache = &va->rb_node;
414 spin_unlock(&vmap_area_lock); 432 spin_unlock(&vmap_area_lock);
415 433
434 BUG_ON(va->va_start & (align-1));
435 BUG_ON(va->va_start < vstart);
436 BUG_ON(va->va_end > vend);
437
416 return va; 438 return va;
439
440overflow:
441 spin_unlock(&vmap_area_lock);
442 if (!purged) {
443 purge_vmap_area_lazy();
444 purged = 1;
445 goto retry;
446 }
447 if (printk_ratelimit())
448 printk(KERN_WARNING
449 "vmap allocation for size %lu failed: "
450 "use vmalloc=<size> to increase size.\n", size);
451 kfree(va);
452 return ERR_PTR(-EBUSY);
417} 453}
418 454
419static void rcu_free_va(struct rcu_head *head) 455static void rcu_free_va(struct rcu_head *head)
@@ -426,6 +462,22 @@ static void rcu_free_va(struct rcu_head *head)
426static void __free_vmap_area(struct vmap_area *va) 462static void __free_vmap_area(struct vmap_area *va)
427{ 463{
428 BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 464 BUG_ON(RB_EMPTY_NODE(&va->rb_node));
465
466 if (free_vmap_cache) {
467 if (va->va_end < cached_vstart) {
468 free_vmap_cache = NULL;
469 } else {
470 struct vmap_area *cache;
471 cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
472 if (va->va_start <= cache->va_start) {
473 free_vmap_cache = rb_prev(&va->rb_node);
474 /*
475 * We don't try to update cached_hole_size or
476 * cached_align, but it won't go very wrong.
477 */
478 }
479 }
480 }
429 rb_erase(&va->rb_node, &vmap_area_root); 481 rb_erase(&va->rb_node, &vmap_area_root);
430 RB_CLEAR_NODE(&va->rb_node); 482 RB_CLEAR_NODE(&va->rb_node);
431 list_del_rcu(&va->list); 483 list_del_rcu(&va->list);
@@ -1951,8 +2003,6 @@ finished:
1951 * should know vmalloc() area is valid and can use memcpy(). 2003 * should know vmalloc() area is valid and can use memcpy().
1952 * This is for routines which have to access vmalloc area without 2004 * This is for routines which have to access vmalloc area without
1953 * any informaion, as /dev/kmem. 2005 * any informaion, as /dev/kmem.
1954 *
1955 * The caller should guarantee KM_USER1 is not used.
1956 */ 2006 */
1957 2007
1958long vwrite(char *buf, char *addr, unsigned long count) 2008long vwrite(char *buf, char *addr, unsigned long count)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6771ea70bfe7..060e4c191403 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -514,7 +514,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
514 514
515 freepage = mapping->a_ops->freepage; 515 freepage = mapping->a_ops->freepage;
516 516
517 __remove_from_page_cache(page); 517 __delete_from_page_cache(page);
518 spin_unlock_irq(&mapping->tree_lock); 518 spin_unlock_irq(&mapping->tree_lock);
519 mem_cgroup_uncharge_cache_page(page); 519 mem_cgroup_uncharge_cache_page(page);
520 520
@@ -2397,9 +2397,9 @@ loop_again:
2397 * cause too much scanning of the lower zones. 2397 * cause too much scanning of the lower zones.
2398 */ 2398 */
2399 for (i = 0; i <= end_zone; i++) { 2399 for (i = 0; i <= end_zone; i++) {
2400 int compaction;
2401 struct zone *zone = pgdat->node_zones + i; 2400 struct zone *zone = pgdat->node_zones + i;
2402 int nr_slab; 2401 int nr_slab;
2402 unsigned long balance_gap;
2403 2403
2404 if (!populated_zone(zone)) 2404 if (!populated_zone(zone))
2405 continue; 2405 continue;
@@ -2416,11 +2416,20 @@ loop_again:
2416 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask); 2416 mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
2417 2417
2418 /* 2418 /*
2419 * We put equal pressure on every zone, unless one 2419 * We put equal pressure on every zone, unless
2420 * zone has way too many pages free already. 2420 * one zone has way too many pages free
2421 * already. The "too many pages" is defined
2422 * as the high wmark plus a "gap" where the
2423 * gap is either the low watermark or 1%
2424 * of the zone, whichever is smaller.
2421 */ 2425 */
2426 balance_gap = min(low_wmark_pages(zone),
2427 (zone->present_pages +
2428 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2429 KSWAPD_ZONE_BALANCE_GAP_RATIO);
2422 if (!zone_watermark_ok_safe(zone, order, 2430 if (!zone_watermark_ok_safe(zone, order,
2423 8*high_wmark_pages(zone), end_zone, 0)) 2431 high_wmark_pages(zone) + balance_gap,
2432 end_zone, 0))
2424 shrink_zone(priority, zone, &sc); 2433 shrink_zone(priority, zone, &sc);
2425 reclaim_state->reclaimed_slab = 0; 2434 reclaim_state->reclaimed_slab = 0;
2426 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 2435 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
@@ -2428,24 +2437,9 @@ loop_again:
2428 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2437 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2429 total_scanned += sc.nr_scanned; 2438 total_scanned += sc.nr_scanned;
2430 2439
2431 compaction = 0;
2432 if (order &&
2433 zone_watermark_ok(zone, 0,
2434 high_wmark_pages(zone),
2435 end_zone, 0) &&
2436 !zone_watermark_ok(zone, order,
2437 high_wmark_pages(zone),
2438 end_zone, 0)) {
2439 compact_zone_order(zone,
2440 order,
2441 sc.gfp_mask, false,
2442 COMPACT_MODE_KSWAPD);
2443 compaction = 1;
2444 }
2445
2446 if (zone->all_unreclaimable) 2440 if (zone->all_unreclaimable)
2447 continue; 2441 continue;
2448 if (!compaction && nr_slab == 0 && 2442 if (nr_slab == 0 &&
2449 !zone_reclaimable(zone)) 2443 !zone_reclaimable(zone))
2450 zone->all_unreclaimable = 1; 2444 zone->all_unreclaimable = 1;
2451 /* 2445 /*
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 0c3b5048773e..772b39b87d95 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -500,8 +500,12 @@ void refresh_cpu_vm_stats(int cpu)
500 * z = the zone from which the allocation occurred. 500 * z = the zone from which the allocation occurred.
501 * 501 *
502 * Must be called with interrupts disabled. 502 * Must be called with interrupts disabled.
503 *
504 * When __GFP_OTHER_NODE is set assume the node of the preferred
505 * zone is the local node. This is useful for daemons who allocate
506 * memory on behalf of other processes.
503 */ 507 */
504void zone_statistics(struct zone *preferred_zone, struct zone *z) 508void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
505{ 509{
506 if (z->zone_pgdat == preferred_zone->zone_pgdat) { 510 if (z->zone_pgdat == preferred_zone->zone_pgdat) {
507 __inc_zone_state(z, NUMA_HIT); 511 __inc_zone_state(z, NUMA_HIT);
@@ -509,7 +513,8 @@ void zone_statistics(struct zone *preferred_zone, struct zone *z)
509 __inc_zone_state(z, NUMA_MISS); 513 __inc_zone_state(z, NUMA_MISS);
510 __inc_zone_state(preferred_zone, NUMA_FOREIGN); 514 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
511 } 515 }
512 if (z->node == numa_node_id()) 516 if (z->node == ((flags & __GFP_OTHER_NODE) ?
517 preferred_zone->node : numa_node_id()))
513 __inc_zone_state(z, NUMA_LOCAL); 518 __inc_zone_state(z, NUMA_LOCAL);
514 else 519 else
515 __inc_zone_state(z, NUMA_OTHER); 520 __inc_zone_state(z, NUMA_OTHER);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ae610f046de5..e34ea9e5e28b 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -720,6 +720,7 @@ static int vlan_dev_init(struct net_device *dev)
720 dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid; 720 dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
721#endif 721#endif
722 722
723 dev->needed_headroom = real_dev->needed_headroom;
723 if (real_dev->features & NETIF_F_HW_VLAN_TX) { 724 if (real_dev->features & NETIF_F_HW_VLAN_TX) {
724 dev->header_ops = real_dev->header_ops; 725 dev->header_ops = real_dev->header_ops;
725 dev->hard_header_len = real_dev->hard_header_len; 726 dev->hard_header_len = real_dev->hard_header_len;
diff --git a/net/9p/client.c b/net/9p/client.c
index 347ec0cd2718..2ccbf04d37df 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -223,7 +223,7 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
223 223
224 req = &c->reqs[row][col]; 224 req = &c->reqs[row][col];
225 if (!req->tc) { 225 if (!req->tc) {
226 req->wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); 226 req->wq = kmalloc(sizeof(wait_queue_head_t), GFP_NOFS);
227 if (!req->wq) { 227 if (!req->wq) {
228 printk(KERN_ERR "Couldn't grow tag array\n"); 228 printk(KERN_ERR "Couldn't grow tag array\n");
229 return ERR_PTR(-ENOMEM); 229 return ERR_PTR(-ENOMEM);
@@ -233,17 +233,17 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
233 P9_TRANS_PREF_PAYLOAD_SEP) { 233 P9_TRANS_PREF_PAYLOAD_SEP) {
234 int alloc_msize = min(c->msize, 4096); 234 int alloc_msize = min(c->msize, 4096);
235 req->tc = kmalloc(sizeof(struct p9_fcall)+alloc_msize, 235 req->tc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
236 GFP_KERNEL); 236 GFP_NOFS);
237 req->tc->capacity = alloc_msize; 237 req->tc->capacity = alloc_msize;
238 req->rc = kmalloc(sizeof(struct p9_fcall)+alloc_msize, 238 req->rc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
239 GFP_KERNEL); 239 GFP_NOFS);
240 req->rc->capacity = alloc_msize; 240 req->rc->capacity = alloc_msize;
241 } else { 241 } else {
242 req->tc = kmalloc(sizeof(struct p9_fcall)+c->msize, 242 req->tc = kmalloc(sizeof(struct p9_fcall)+c->msize,
243 GFP_KERNEL); 243 GFP_NOFS);
244 req->tc->capacity = c->msize; 244 req->tc->capacity = c->msize;
245 req->rc = kmalloc(sizeof(struct p9_fcall)+c->msize, 245 req->rc = kmalloc(sizeof(struct p9_fcall)+c->msize,
246 GFP_KERNEL); 246 GFP_NOFS);
247 req->rc->capacity = c->msize; 247 req->rc->capacity = c->msize;
248 } 248 }
249 if ((!req->tc) || (!req->rc)) { 249 if ((!req->tc) || (!req->rc)) {
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 2ce515b859b3..8a4084fa8b5a 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -205,7 +205,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
205 if (errcode) 205 if (errcode)
206 break; 206 break;
207 207
208 *sptr = kmalloc(len + 1, GFP_KERNEL); 208 *sptr = kmalloc(len + 1, GFP_NOFS);
209 if (*sptr == NULL) { 209 if (*sptr == NULL) {
210 errcode = -EFAULT; 210 errcode = -EFAULT;
211 break; 211 break;
@@ -273,7 +273,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
273 if (!errcode) { 273 if (!errcode) {
274 *wnames = 274 *wnames =
275 kmalloc(sizeof(char *) * *nwname, 275 kmalloc(sizeof(char *) * *nwname,
276 GFP_KERNEL); 276 GFP_NOFS);
277 if (!*wnames) 277 if (!*wnames)
278 errcode = -ENOMEM; 278 errcode = -ENOMEM;
279 } 279 }
@@ -317,7 +317,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
317 *wqids = 317 *wqids =
318 kmalloc(*nwqid * 318 kmalloc(*nwqid *
319 sizeof(struct p9_qid), 319 sizeof(struct p9_qid),
320 GFP_KERNEL); 320 GFP_NOFS);
321 if (*wqids == NULL) 321 if (*wqids == NULL)
322 errcode = -ENOMEM; 322 errcode = -ENOMEM;
323 } 323 }
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index d62b9aa58df8..9172ab78fcb0 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -41,9 +41,9 @@ EXPORT_SYMBOL(p9_release_req_pages);
41int 41int
42p9_nr_pages(struct p9_req_t *req) 42p9_nr_pages(struct p9_req_t *req)
43{ 43{
44 int start_page, end_page; 44 unsigned long start_page, end_page;
45 start_page = (unsigned long long)req->tc->pubuf >> PAGE_SHIFT; 45 start_page = (unsigned long)req->tc->pubuf >> PAGE_SHIFT;
46 end_page = ((unsigned long long)req->tc->pubuf + req->tc->pbuf_size + 46 end_page = ((unsigned long)req->tc->pubuf + req->tc->pbuf_size +
47 PAGE_SIZE - 1) >> PAGE_SHIFT; 47 PAGE_SIZE - 1) >> PAGE_SHIFT;
48 return end_page - start_page; 48 return end_page - start_page;
49} 49}
@@ -69,8 +69,8 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
69 *pdata_off = (size_t)req->tc->pubuf & (PAGE_SIZE-1); 69 *pdata_off = (size_t)req->tc->pubuf & (PAGE_SIZE-1);
70 70
71 if (*pdata_off) 71 if (*pdata_off)
72 first_page_bytes = min((PAGE_SIZE - *pdata_off), 72 first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off),
73 req->tc->pbuf_size); 73 req->tc->pbuf_size);
74 74
75 rpinfo = req->tc->private; 75 rpinfo = req->tc->private;
76 pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf, 76 pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf,
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index a30471e51740..aa5672b15eae 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -350,7 +350,7 @@ static void p9_read_work(struct work_struct *work)
350 350
351 if (m->req->rc == NULL) { 351 if (m->req->rc == NULL) {
352 m->req->rc = kmalloc(sizeof(struct p9_fcall) + 352 m->req->rc = kmalloc(sizeof(struct p9_fcall) +
353 m->client->msize, GFP_KERNEL); 353 m->client->msize, GFP_NOFS);
354 if (!m->req->rc) { 354 if (!m->req->rc) {
355 m->req = NULL; 355 m->req = NULL;
356 err = -ENOMEM; 356 err = -ENOMEM;
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 29a54ccd213d..150e0c4bbf40 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -424,7 +424,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
424 struct p9_rdma_context *rpl_context = NULL; 424 struct p9_rdma_context *rpl_context = NULL;
425 425
426 /* Allocate an fcall for the reply */ 426 /* Allocate an fcall for the reply */
427 rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL); 427 rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS);
428 if (!rpl_context) { 428 if (!rpl_context) {
429 err = -ENOMEM; 429 err = -ENOMEM;
430 goto err_close; 430 goto err_close;
@@ -437,7 +437,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
437 */ 437 */
438 if (!req->rc) { 438 if (!req->rc) {
439 req->rc = kmalloc(sizeof(struct p9_fcall)+client->msize, 439 req->rc = kmalloc(sizeof(struct p9_fcall)+client->msize,
440 GFP_KERNEL); 440 GFP_NOFS);
441 if (req->rc) { 441 if (req->rc) {
442 req->rc->sdata = (char *) req->rc + 442 req->rc->sdata = (char *) req->rc +
443 sizeof(struct p9_fcall); 443 sizeof(struct p9_fcall);
@@ -468,7 +468,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
468 req->rc = NULL; 468 req->rc = NULL;
469 469
470 /* Post the request */ 470 /* Post the request */
471 c = kmalloc(sizeof *c, GFP_KERNEL); 471 c = kmalloc(sizeof *c, GFP_NOFS);
472 if (!c) { 472 if (!c) {
473 err = -ENOMEM; 473 err = -ENOMEM;
474 goto err_free1; 474 goto err_free1;
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 9b550ed9c711..e8f046b07182 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -43,6 +43,7 @@
43#include <net/9p/client.h> 43#include <net/9p/client.h>
44#include <net/9p/transport.h> 44#include <net/9p/transport.h>
45#include <linux/scatterlist.h> 45#include <linux/scatterlist.h>
46#include <linux/swap.h>
46#include <linux/virtio.h> 47#include <linux/virtio.h>
47#include <linux/virtio_9p.h> 48#include <linux/virtio_9p.h>
48#include "trans_common.h" 49#include "trans_common.h"
@@ -51,6 +52,8 @@
51 52
52/* a single mutex to manage channel initialization and attachment */ 53/* a single mutex to manage channel initialization and attachment */
53static DEFINE_MUTEX(virtio_9p_lock); 54static DEFINE_MUTEX(virtio_9p_lock);
55static DECLARE_WAIT_QUEUE_HEAD(vp_wq);
56static atomic_t vp_pinned = ATOMIC_INIT(0);
54 57
55/** 58/**
56 * struct virtio_chan - per-instance transport information 59 * struct virtio_chan - per-instance transport information
@@ -78,7 +81,10 @@ struct virtio_chan {
78 struct virtqueue *vq; 81 struct virtqueue *vq;
79 int ring_bufs_avail; 82 int ring_bufs_avail;
80 wait_queue_head_t *vc_wq; 83 wait_queue_head_t *vc_wq;
81 84 /* This is global limit. Since we don't have a global structure,
85 * will be placing it in each channel.
86 */
87 int p9_max_pages;
82 /* Scatterlist: can be too big for stack. */ 88 /* Scatterlist: can be too big for stack. */
83 struct scatterlist sg[VIRTQUEUE_NUM]; 89 struct scatterlist sg[VIRTQUEUE_NUM];
84 90
@@ -141,34 +147,36 @@ static void req_done(struct virtqueue *vq)
141 147
142 P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n"); 148 P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n");
143 149
144 do { 150 while (1) {
145 spin_lock_irqsave(&chan->lock, flags); 151 spin_lock_irqsave(&chan->lock, flags);
146 rc = virtqueue_get_buf(chan->vq, &len); 152 rc = virtqueue_get_buf(chan->vq, &len);
147 153
148 if (rc != NULL) { 154 if (rc == NULL) {
149 if (!chan->ring_bufs_avail) {
150 chan->ring_bufs_avail = 1;
151 wake_up(chan->vc_wq);
152 }
153 spin_unlock_irqrestore(&chan->lock, flags);
154 P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
155 P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n",
156 rc->tag);
157 req = p9_tag_lookup(chan->client, rc->tag);
158 req->status = REQ_STATUS_RCVD;
159 if (req->tc->private) {
160 struct trans_rpage_info *rp = req->tc->private;
161 /*Release pages */
162 p9_release_req_pages(rp);
163 if (rp->rp_alloc)
164 kfree(rp);
165 req->tc->private = NULL;
166 }
167 p9_client_cb(chan->client, req);
168 } else {
169 spin_unlock_irqrestore(&chan->lock, flags); 155 spin_unlock_irqrestore(&chan->lock, flags);
156 break;
157 }
158
159 chan->ring_bufs_avail = 1;
160 spin_unlock_irqrestore(&chan->lock, flags);
161 /* Wakeup if anyone waiting for VirtIO ring space. */
162 wake_up(chan->vc_wq);
163 P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
164 P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
165 req = p9_tag_lookup(chan->client, rc->tag);
166 if (req->tc->private) {
167 struct trans_rpage_info *rp = req->tc->private;
168 int p = rp->rp_nr_pages;
169 /*Release pages */
170 p9_release_req_pages(rp);
171 atomic_sub(p, &vp_pinned);
172 wake_up(&vp_wq);
173 if (rp->rp_alloc)
174 kfree(rp);
175 req->tc->private = NULL;
170 } 176 }
171 } while (rc != NULL); 177 req->status = REQ_STATUS_RCVD;
178 p9_client_cb(chan->client, req);
179 }
172} 180}
173 181
174/** 182/**
@@ -263,7 +271,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
263 271
264 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n"); 272 P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
265 273
266req_retry:
267 req->status = REQ_STATUS_SENT; 274 req->status = REQ_STATUS_SENT;
268 275
269 if (req->tc->pbuf_size && (req->tc->pubuf && P9_IS_USER_CONTEXT)) { 276 if (req->tc->pbuf_size && (req->tc->pubuf && P9_IS_USER_CONTEXT)) {
@@ -271,6 +278,14 @@ req_retry:
271 int rpinfo_size = sizeof(struct trans_rpage_info) + 278 int rpinfo_size = sizeof(struct trans_rpage_info) +
272 sizeof(struct page *) * nr_pages; 279 sizeof(struct page *) * nr_pages;
273 280
281 if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
282 err = wait_event_interruptible(vp_wq,
283 atomic_read(&vp_pinned) < chan->p9_max_pages);
284 if (err == -ERESTARTSYS)
285 return err;
286 P9_DPRINTK(P9_DEBUG_TRANS, "9p: May gup pages now.\n");
287 }
288
274 if (rpinfo_size <= (req->tc->capacity - req->tc->size)) { 289 if (rpinfo_size <= (req->tc->capacity - req->tc->size)) {
275 /* We can use sdata */ 290 /* We can use sdata */
276 req->tc->private = req->tc->sdata + req->tc->size; 291 req->tc->private = req->tc->sdata + req->tc->size;
@@ -293,9 +308,12 @@ req_retry:
293 if (rpinfo->rp_alloc) 308 if (rpinfo->rp_alloc)
294 kfree(rpinfo); 309 kfree(rpinfo);
295 return err; 310 return err;
311 } else {
312 atomic_add(rpinfo->rp_nr_pages, &vp_pinned);
296 } 313 }
297 } 314 }
298 315
316req_retry_pinned:
299 spin_lock_irqsave(&chan->lock, flags); 317 spin_lock_irqsave(&chan->lock, flags);
300 318
301 /* Handle out VirtIO ring buffers */ 319 /* Handle out VirtIO ring buffers */
@@ -356,7 +374,7 @@ req_retry:
356 return err; 374 return err;
357 375
358 P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n"); 376 P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
359 goto req_retry; 377 goto req_retry_pinned;
360 } else { 378 } else {
361 spin_unlock_irqrestore(&chan->lock, flags); 379 spin_unlock_irqrestore(&chan->lock, flags);
362 P9_DPRINTK(P9_DEBUG_TRANS, 380 P9_DPRINTK(P9_DEBUG_TRANS,
@@ -453,6 +471,8 @@ static int p9_virtio_probe(struct virtio_device *vdev)
453 } 471 }
454 init_waitqueue_head(chan->vc_wq); 472 init_waitqueue_head(chan->vc_wq);
455 chan->ring_bufs_avail = 1; 473 chan->ring_bufs_avail = 1;
474 /* Ceiling limit to avoid denial of service attacks */
475 chan->p9_max_pages = nr_free_buffer_pages()/4;
456 476
457 mutex_lock(&virtio_9p_lock); 477 mutex_lock(&virtio_9p_lock);
458 list_add_tail(&chan->chan_list, &virtio_chan_list); 478 list_add_tail(&chan->chan_list, &virtio_chan_list);
diff --git a/net/9p/util.c b/net/9p/util.c
index e048701a72d2..b84619b5ba22 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -92,7 +92,7 @@ int p9_idpool_get(struct p9_idpool *p)
92 unsigned long flags; 92 unsigned long flags;
93 93
94retry: 94retry:
95 if (idr_pre_get(&p->pool, GFP_KERNEL) == 0) 95 if (idr_pre_get(&p->pool, GFP_NOFS) == 0)
96 return 0; 96 return 0;
97 97
98 spin_lock_irqsave(&p->lock, flags); 98 spin_lock_irqsave(&p->lock, flags);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 3d4f4b043406..206e771e82d1 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1051,6 +1051,7 @@ static int atalk_release(struct socket *sock)
1051{ 1051{
1052 struct sock *sk = sock->sk; 1052 struct sock *sk = sock->sk;
1053 1053
1054 sock_hold(sk);
1054 lock_sock(sk); 1055 lock_sock(sk);
1055 if (sk) { 1056 if (sk) {
1056 sock_orphan(sk); 1057 sock_orphan(sk);
@@ -1058,6 +1059,8 @@ static int atalk_release(struct socket *sock)
1058 atalk_destroy_socket(sk); 1059 atalk_destroy_socket(sk);
1059 } 1060 }
1060 release_sock(sk); 1061 release_sock(sk);
1062 sock_put(sk);
1063
1061 return 0; 1064 return 0;
1062} 1065}
1063 1066
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index f97af5590ba1..008ff6c4eecf 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -739,6 +739,9 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
739 nf_bridge->mask |= BRNF_PKT_TYPE; 739 nf_bridge->mask |= BRNF_PKT_TYPE;
740 } 740 }
741 741
742 if (br_parse_ip_options(skb))
743 return NF_DROP;
744
742 /* The physdev module checks on this */ 745 /* The physdev module checks on this */
743 nf_bridge->mask |= BRNF_BRIDGED; 746 nf_bridge->mask |= BRNF_BRIDGED;
744 nf_bridge->physoutdev = skb->dev; 747 nf_bridge->physoutdev = skb->dev;
diff --git a/net/ceph/armor.c b/net/ceph/armor.c
index eb2a666b0be7..1fc1ee11dfa2 100644
--- a/net/ceph/armor.c
+++ b/net/ceph/armor.c
@@ -78,8 +78,10 @@ int ceph_unarmor(char *dst, const char *src, const char *end)
78 while (src < end) { 78 while (src < end) {
79 int a, b, c, d; 79 int a, b, c, d;
80 80
81 if (src < end && src[0] == '\n') 81 if (src[0] == '\n') {
82 src++; 82 src++;
83 continue;
84 }
83 if (src + 4 > end) 85 if (src + 4 > end)
84 return -EINVAL; 86 return -EINVAL;
85 a = decode_bits(src[0]); 87 a = decode_bits(src[0]);
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index f3e4a13fea0c..95f96ab94bba 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -62,6 +62,7 @@ const char *ceph_msg_type_name(int type)
62 case CEPH_MSG_OSD_MAP: return "osd_map"; 62 case CEPH_MSG_OSD_MAP: return "osd_map";
63 case CEPH_MSG_OSD_OP: return "osd_op"; 63 case CEPH_MSG_OSD_OP: return "osd_op";
64 case CEPH_MSG_OSD_OPREPLY: return "osd_opreply"; 64 case CEPH_MSG_OSD_OPREPLY: return "osd_opreply";
65 case CEPH_MSG_WATCH_NOTIFY: return "watch_notify";
65 default: return "unknown"; 66 default: return "unknown";
66 } 67 }
67} 68}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 3e20a122ffa2..02212ed50852 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -22,10 +22,15 @@
22#define OSD_OPREPLY_FRONT_LEN 512 22#define OSD_OPREPLY_FRONT_LEN 512
23 23
24static const struct ceph_connection_operations osd_con_ops; 24static const struct ceph_connection_operations osd_con_ops;
25static int __kick_requests(struct ceph_osd_client *osdc,
26 struct ceph_osd *kickosd);
27 25
28static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd); 26static void send_queued(struct ceph_osd_client *osdc);
27static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
28static void __register_request(struct ceph_osd_client *osdc,
29 struct ceph_osd_request *req);
30static void __unregister_linger_request(struct ceph_osd_client *osdc,
31 struct ceph_osd_request *req);
32static int __send_request(struct ceph_osd_client *osdc,
33 struct ceph_osd_request *req);
29 34
30static int op_needs_trail(int op) 35static int op_needs_trail(int op)
31{ 36{
@@ -34,6 +39,7 @@ static int op_needs_trail(int op)
34 case CEPH_OSD_OP_SETXATTR: 39 case CEPH_OSD_OP_SETXATTR:
35 case CEPH_OSD_OP_CMPXATTR: 40 case CEPH_OSD_OP_CMPXATTR:
36 case CEPH_OSD_OP_CALL: 41 case CEPH_OSD_OP_CALL:
42 case CEPH_OSD_OP_NOTIFY:
37 return 1; 43 return 1;
38 default: 44 default:
39 return 0; 45 return 0;
@@ -209,6 +215,8 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
209 init_completion(&req->r_completion); 215 init_completion(&req->r_completion);
210 init_completion(&req->r_safe_completion); 216 init_completion(&req->r_safe_completion);
211 INIT_LIST_HEAD(&req->r_unsafe_item); 217 INIT_LIST_HEAD(&req->r_unsafe_item);
218 INIT_LIST_HEAD(&req->r_linger_item);
219 INIT_LIST_HEAD(&req->r_linger_osd);
212 req->r_flags = flags; 220 req->r_flags = flags;
213 221
214 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); 222 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
@@ -315,6 +323,24 @@ static void osd_req_encode_op(struct ceph_osd_request *req,
315 break; 323 break;
316 case CEPH_OSD_OP_STARTSYNC: 324 case CEPH_OSD_OP_STARTSYNC:
317 break; 325 break;
326 case CEPH_OSD_OP_NOTIFY:
327 {
328 __le32 prot_ver = cpu_to_le32(src->watch.prot_ver);
329 __le32 timeout = cpu_to_le32(src->watch.timeout);
330
331 BUG_ON(!req->r_trail);
332
333 ceph_pagelist_append(req->r_trail,
334 &prot_ver, sizeof(prot_ver));
335 ceph_pagelist_append(req->r_trail,
336 &timeout, sizeof(timeout));
337 }
338 case CEPH_OSD_OP_NOTIFY_ACK:
339 case CEPH_OSD_OP_WATCH:
340 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
341 dst->watch.ver = cpu_to_le64(src->watch.ver);
342 dst->watch.flag = src->watch.flag;
343 break;
318 default: 344 default:
319 pr_err("unrecognized osd opcode %d\n", dst->op); 345 pr_err("unrecognized osd opcode %d\n", dst->op);
320 WARN_ON(1); 346 WARN_ON(1);
@@ -529,6 +555,45 @@ __lookup_request_ge(struct ceph_osd_client *osdc,
529 return NULL; 555 return NULL;
530} 556}
531 557
558/*
559 * Resubmit requests pending on the given osd.
560 */
561static void __kick_osd_requests(struct ceph_osd_client *osdc,
562 struct ceph_osd *osd)
563{
564 struct ceph_osd_request *req, *nreq;
565 int err;
566
567 dout("__kick_osd_requests osd%d\n", osd->o_osd);
568 err = __reset_osd(osdc, osd);
569 if (err == -EAGAIN)
570 return;
571
572 list_for_each_entry(req, &osd->o_requests, r_osd_item) {
573 list_move(&req->r_req_lru_item, &osdc->req_unsent);
574 dout("requeued %p tid %llu osd%d\n", req, req->r_tid,
575 osd->o_osd);
576 if (!req->r_linger)
577 req->r_flags |= CEPH_OSD_FLAG_RETRY;
578 }
579
580 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
581 r_linger_osd) {
582 __unregister_linger_request(osdc, req);
583 __register_request(osdc, req);
584 list_move(&req->r_req_lru_item, &osdc->req_unsent);
585 dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
586 osd->o_osd);
587 }
588}
589
590static void kick_osd_requests(struct ceph_osd_client *osdc,
591 struct ceph_osd *kickosd)
592{
593 mutex_lock(&osdc->request_mutex);
594 __kick_osd_requests(osdc, kickosd);
595 mutex_unlock(&osdc->request_mutex);
596}
532 597
533/* 598/*
534 * If the osd connection drops, we need to resubmit all requests. 599 * If the osd connection drops, we need to resubmit all requests.
@@ -543,7 +608,8 @@ static void osd_reset(struct ceph_connection *con)
543 dout("osd_reset osd%d\n", osd->o_osd); 608 dout("osd_reset osd%d\n", osd->o_osd);
544 osdc = osd->o_osdc; 609 osdc = osd->o_osdc;
545 down_read(&osdc->map_sem); 610 down_read(&osdc->map_sem);
546 kick_requests(osdc, osd); 611 kick_osd_requests(osdc, osd);
612 send_queued(osdc);
547 up_read(&osdc->map_sem); 613 up_read(&osdc->map_sem);
548} 614}
549 615
@@ -561,6 +627,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc)
561 atomic_set(&osd->o_ref, 1); 627 atomic_set(&osd->o_ref, 1);
562 osd->o_osdc = osdc; 628 osd->o_osdc = osdc;
563 INIT_LIST_HEAD(&osd->o_requests); 629 INIT_LIST_HEAD(&osd->o_requests);
630 INIT_LIST_HEAD(&osd->o_linger_requests);
564 INIT_LIST_HEAD(&osd->o_osd_lru); 631 INIT_LIST_HEAD(&osd->o_osd_lru);
565 osd->o_incarnation = 1; 632 osd->o_incarnation = 1;
566 633
@@ -650,7 +717,8 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
650 int ret = 0; 717 int ret = 0;
651 718
652 dout("__reset_osd %p osd%d\n", osd, osd->o_osd); 719 dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
653 if (list_empty(&osd->o_requests)) { 720 if (list_empty(&osd->o_requests) &&
721 list_empty(&osd->o_linger_requests)) {
654 __remove_osd(osdc, osd); 722 __remove_osd(osdc, osd);
655 } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd], 723 } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
656 &osd->o_con.peer_addr, 724 &osd->o_con.peer_addr,
@@ -723,10 +791,9 @@ static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
723 * Register request, assign tid. If this is the first request, set up 791 * Register request, assign tid. If this is the first request, set up
724 * the timeout event. 792 * the timeout event.
725 */ 793 */
726static void register_request(struct ceph_osd_client *osdc, 794static void __register_request(struct ceph_osd_client *osdc,
727 struct ceph_osd_request *req) 795 struct ceph_osd_request *req)
728{ 796{
729 mutex_lock(&osdc->request_mutex);
730 req->r_tid = ++osdc->last_tid; 797 req->r_tid = ++osdc->last_tid;
731 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); 798 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
732 INIT_LIST_HEAD(&req->r_req_lru_item); 799 INIT_LIST_HEAD(&req->r_req_lru_item);
@@ -740,6 +807,13 @@ static void register_request(struct ceph_osd_client *osdc,
740 dout(" first request, scheduling timeout\n"); 807 dout(" first request, scheduling timeout\n");
741 __schedule_osd_timeout(osdc); 808 __schedule_osd_timeout(osdc);
742 } 809 }
810}
811
812static void register_request(struct ceph_osd_client *osdc,
813 struct ceph_osd_request *req)
814{
815 mutex_lock(&osdc->request_mutex);
816 __register_request(osdc, req);
743 mutex_unlock(&osdc->request_mutex); 817 mutex_unlock(&osdc->request_mutex);
744} 818}
745 819
@@ -758,9 +832,14 @@ static void __unregister_request(struct ceph_osd_client *osdc,
758 ceph_con_revoke(&req->r_osd->o_con, req->r_request); 832 ceph_con_revoke(&req->r_osd->o_con, req->r_request);
759 833
760 list_del_init(&req->r_osd_item); 834 list_del_init(&req->r_osd_item);
761 if (list_empty(&req->r_osd->o_requests)) 835 if (list_empty(&req->r_osd->o_requests) &&
836 list_empty(&req->r_osd->o_linger_requests)) {
837 dout("moving osd to %p lru\n", req->r_osd);
762 __move_osd_to_lru(osdc, req->r_osd); 838 __move_osd_to_lru(osdc, req->r_osd);
763 req->r_osd = NULL; 839 }
840 if (list_empty(&req->r_osd_item) &&
841 list_empty(&req->r_linger_item))
842 req->r_osd = NULL;
764 } 843 }
765 844
766 ceph_osdc_put_request(req); 845 ceph_osdc_put_request(req);
@@ -781,20 +860,72 @@ static void __cancel_request(struct ceph_osd_request *req)
781 ceph_con_revoke(&req->r_osd->o_con, req->r_request); 860 ceph_con_revoke(&req->r_osd->o_con, req->r_request);
782 req->r_sent = 0; 861 req->r_sent = 0;
783 } 862 }
784 list_del_init(&req->r_req_lru_item);
785} 863}
786 864
865static void __register_linger_request(struct ceph_osd_client *osdc,
866 struct ceph_osd_request *req)
867{
868 dout("__register_linger_request %p\n", req);
869 list_add_tail(&req->r_linger_item, &osdc->req_linger);
870 list_add_tail(&req->r_linger_osd, &req->r_osd->o_linger_requests);
871}
872
873static void __unregister_linger_request(struct ceph_osd_client *osdc,
874 struct ceph_osd_request *req)
875{
876 dout("__unregister_linger_request %p\n", req);
877 if (req->r_osd) {
878 list_del_init(&req->r_linger_item);
879 list_del_init(&req->r_linger_osd);
880
881 if (list_empty(&req->r_osd->o_requests) &&
882 list_empty(&req->r_osd->o_linger_requests)) {
883 dout("moving osd to %p lru\n", req->r_osd);
884 __move_osd_to_lru(osdc, req->r_osd);
885 }
886 req->r_osd = NULL;
887 }
888}
889
890void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
891 struct ceph_osd_request *req)
892{
893 mutex_lock(&osdc->request_mutex);
894 if (req->r_linger) {
895 __unregister_linger_request(osdc, req);
896 ceph_osdc_put_request(req);
897 }
898 mutex_unlock(&osdc->request_mutex);
899}
900EXPORT_SYMBOL(ceph_osdc_unregister_linger_request);
901
902void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
903 struct ceph_osd_request *req)
904{
905 if (!req->r_linger) {
906 dout("set_request_linger %p\n", req);
907 req->r_linger = 1;
908 /*
909 * caller is now responsible for calling
910 * unregister_linger_request
911 */
912 ceph_osdc_get_request(req);
913 }
914}
915EXPORT_SYMBOL(ceph_osdc_set_request_linger);
916
787/* 917/*
788 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct 918 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
789 * (as needed), and set the request r_osd appropriately. If there is 919 * (as needed), and set the request r_osd appropriately. If there is
790 * no up osd, set r_osd to NULL. 920 * no up osd, set r_osd to NULL. Move the request to the appropiate list
921 * (unsent, homeless) or leave on in-flight lru.
791 * 922 *
792 * Return 0 if unchanged, 1 if changed, or negative on error. 923 * Return 0 if unchanged, 1 if changed, or negative on error.
793 * 924 *
794 * Caller should hold map_sem for read and request_mutex. 925 * Caller should hold map_sem for read and request_mutex.
795 */ 926 */
796static int __map_osds(struct ceph_osd_client *osdc, 927static int __map_request(struct ceph_osd_client *osdc,
797 struct ceph_osd_request *req) 928 struct ceph_osd_request *req)
798{ 929{
799 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; 930 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
800 struct ceph_pg pgid; 931 struct ceph_pg pgid;
@@ -802,11 +933,13 @@ static int __map_osds(struct ceph_osd_client *osdc,
802 int o = -1, num = 0; 933 int o = -1, num = 0;
803 int err; 934 int err;
804 935
805 dout("map_osds %p tid %lld\n", req, req->r_tid); 936 dout("map_request %p tid %lld\n", req, req->r_tid);
806 err = ceph_calc_object_layout(&reqhead->layout, req->r_oid, 937 err = ceph_calc_object_layout(&reqhead->layout, req->r_oid,
807 &req->r_file_layout, osdc->osdmap); 938 &req->r_file_layout, osdc->osdmap);
808 if (err) 939 if (err) {
940 list_move(&req->r_req_lru_item, &osdc->req_notarget);
809 return err; 941 return err;
942 }
810 pgid = reqhead->layout.ol_pgid; 943 pgid = reqhead->layout.ol_pgid;
811 req->r_pgid = pgid; 944 req->r_pgid = pgid;
812 945
@@ -823,7 +956,7 @@ static int __map_osds(struct ceph_osd_client *osdc,
823 (req->r_osd == NULL && o == -1)) 956 (req->r_osd == NULL && o == -1))
824 return 0; /* no change */ 957 return 0; /* no change */
825 958
826 dout("map_osds tid %llu pgid %d.%x osd%d (was osd%d)\n", 959 dout("map_request tid %llu pgid %d.%x osd%d (was osd%d)\n",
827 req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o, 960 req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o,
828 req->r_osd ? req->r_osd->o_osd : -1); 961 req->r_osd ? req->r_osd->o_osd : -1);
829 962
@@ -841,10 +974,12 @@ static int __map_osds(struct ceph_osd_client *osdc,
841 if (!req->r_osd && o >= 0) { 974 if (!req->r_osd && o >= 0) {
842 err = -ENOMEM; 975 err = -ENOMEM;
843 req->r_osd = create_osd(osdc); 976 req->r_osd = create_osd(osdc);
844 if (!req->r_osd) 977 if (!req->r_osd) {
978 list_move(&req->r_req_lru_item, &osdc->req_notarget);
845 goto out; 979 goto out;
980 }
846 981
847 dout("map_osds osd %p is osd%d\n", req->r_osd, o); 982 dout("map_request osd %p is osd%d\n", req->r_osd, o);
848 req->r_osd->o_osd = o; 983 req->r_osd->o_osd = o;
849 req->r_osd->o_con.peer_name.num = cpu_to_le64(o); 984 req->r_osd->o_con.peer_name.num = cpu_to_le64(o);
850 __insert_osd(osdc, req->r_osd); 985 __insert_osd(osdc, req->r_osd);
@@ -855,6 +990,9 @@ static int __map_osds(struct ceph_osd_client *osdc,
855 if (req->r_osd) { 990 if (req->r_osd) {
856 __remove_osd_from_lru(req->r_osd); 991 __remove_osd_from_lru(req->r_osd);
857 list_add(&req->r_osd_item, &req->r_osd->o_requests); 992 list_add(&req->r_osd_item, &req->r_osd->o_requests);
993 list_move(&req->r_req_lru_item, &osdc->req_unsent);
994 } else {
995 list_move(&req->r_req_lru_item, &osdc->req_notarget);
858 } 996 }
859 err = 1; /* osd or pg changed */ 997 err = 1; /* osd or pg changed */
860 998
@@ -869,16 +1007,6 @@ static int __send_request(struct ceph_osd_client *osdc,
869 struct ceph_osd_request *req) 1007 struct ceph_osd_request *req)
870{ 1008{
871 struct ceph_osd_request_head *reqhead; 1009 struct ceph_osd_request_head *reqhead;
872 int err;
873
874 err = __map_osds(osdc, req);
875 if (err < 0)
876 return err;
877 if (req->r_osd == NULL) {
878 dout("send_request %p no up osds in pg\n", req);
879 ceph_monc_request_next_osdmap(&osdc->client->monc);
880 return 0;
881 }
882 1010
883 dout("send_request %p tid %llu to osd%d flags %d\n", 1011 dout("send_request %p tid %llu to osd%d flags %d\n",
884 req, req->r_tid, req->r_osd->o_osd, req->r_flags); 1012 req, req->r_tid, req->r_osd->o_osd, req->r_flags);
@@ -898,6 +1026,21 @@ static int __send_request(struct ceph_osd_client *osdc,
898} 1026}
899 1027
900/* 1028/*
1029 * Send any requests in the queue (req_unsent).
1030 */
1031static void send_queued(struct ceph_osd_client *osdc)
1032{
1033 struct ceph_osd_request *req, *tmp;
1034
1035 dout("send_queued\n");
1036 mutex_lock(&osdc->request_mutex);
1037 list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) {
1038 __send_request(osdc, req);
1039 }
1040 mutex_unlock(&osdc->request_mutex);
1041}
1042
1043/*
901 * Timeout callback, called every N seconds when 1 or more osd 1044 * Timeout callback, called every N seconds when 1 or more osd
902 * requests has been active for more than N seconds. When this 1045 * requests has been active for more than N seconds. When this
903 * happens, we ping all OSDs with requests who have timed out to 1046 * happens, we ping all OSDs with requests who have timed out to
@@ -916,30 +1059,13 @@ static void handle_timeout(struct work_struct *work)
916 unsigned long keepalive = 1059 unsigned long keepalive =
917 osdc->client->options->osd_keepalive_timeout * HZ; 1060 osdc->client->options->osd_keepalive_timeout * HZ;
918 unsigned long last_stamp = 0; 1061 unsigned long last_stamp = 0;
919 struct rb_node *p;
920 struct list_head slow_osds; 1062 struct list_head slow_osds;
921
922 dout("timeout\n"); 1063 dout("timeout\n");
923 down_read(&osdc->map_sem); 1064 down_read(&osdc->map_sem);
924 1065
925 ceph_monc_request_next_osdmap(&osdc->client->monc); 1066 ceph_monc_request_next_osdmap(&osdc->client->monc);
926 1067
927 mutex_lock(&osdc->request_mutex); 1068 mutex_lock(&osdc->request_mutex);
928 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
929 req = rb_entry(p, struct ceph_osd_request, r_node);
930
931 if (req->r_resend) {
932 int err;
933
934 dout("osdc resending prev failed %lld\n", req->r_tid);
935 err = __send_request(osdc, req);
936 if (err)
937 dout("osdc failed again on %lld\n", req->r_tid);
938 else
939 req->r_resend = false;
940 continue;
941 }
942 }
943 1069
944 /* 1070 /*
945 * reset osds that appear to be _really_ unresponsive. this 1071 * reset osds that appear to be _really_ unresponsive. this
@@ -963,7 +1089,7 @@ static void handle_timeout(struct work_struct *work)
963 BUG_ON(!osd); 1089 BUG_ON(!osd);
964 pr_warning(" tid %llu timed out on osd%d, will reset osd\n", 1090 pr_warning(" tid %llu timed out on osd%d, will reset osd\n",
965 req->r_tid, osd->o_osd); 1091 req->r_tid, osd->o_osd);
966 __kick_requests(osdc, osd); 1092 __kick_osd_requests(osdc, osd);
967 } 1093 }
968 1094
969 /* 1095 /*
@@ -991,7 +1117,7 @@ static void handle_timeout(struct work_struct *work)
991 1117
992 __schedule_osd_timeout(osdc); 1118 __schedule_osd_timeout(osdc);
993 mutex_unlock(&osdc->request_mutex); 1119 mutex_unlock(&osdc->request_mutex);
994 1120 send_queued(osdc);
995 up_read(&osdc->map_sem); 1121 up_read(&osdc->map_sem);
996} 1122}
997 1123
@@ -1035,7 +1161,6 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1035 numops * sizeof(struct ceph_osd_op)) 1161 numops * sizeof(struct ceph_osd_op))
1036 goto bad; 1162 goto bad;
1037 dout("handle_reply %p tid %llu result %d\n", msg, tid, (int)result); 1163 dout("handle_reply %p tid %llu result %d\n", msg, tid, (int)result);
1038
1039 /* lookup */ 1164 /* lookup */
1040 mutex_lock(&osdc->request_mutex); 1165 mutex_lock(&osdc->request_mutex);
1041 req = __lookup_request(osdc, tid); 1166 req = __lookup_request(osdc, tid);
@@ -1079,6 +1204,9 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1079 1204
1080 dout("handle_reply tid %llu flags %d\n", tid, flags); 1205 dout("handle_reply tid %llu flags %d\n", tid, flags);
1081 1206
1207 if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
1208 __register_linger_request(osdc, req);
1209
1082 /* either this is a read, or we got the safe response */ 1210 /* either this is a read, or we got the safe response */
1083 if (result < 0 || 1211 if (result < 0 ||
1084 (flags & CEPH_OSD_FLAG_ONDISK) || 1212 (flags & CEPH_OSD_FLAG_ONDISK) ||
@@ -1099,6 +1227,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1099 } 1227 }
1100 1228
1101done: 1229done:
1230 dout("req=%p req->r_linger=%d\n", req, req->r_linger);
1102 ceph_osdc_put_request(req); 1231 ceph_osdc_put_request(req);
1103 return; 1232 return;
1104 1233
@@ -1109,108 +1238,83 @@ bad:
1109 ceph_msg_dump(msg); 1238 ceph_msg_dump(msg);
1110} 1239}
1111 1240
1112 1241static void reset_changed_osds(struct ceph_osd_client *osdc)
1113static int __kick_requests(struct ceph_osd_client *osdc,
1114 struct ceph_osd *kickosd)
1115{ 1242{
1116 struct ceph_osd_request *req;
1117 struct rb_node *p, *n; 1243 struct rb_node *p, *n;
1118 int needmap = 0;
1119 int err;
1120 1244
1121 dout("kick_requests osd%d\n", kickosd ? kickosd->o_osd : -1); 1245 for (p = rb_first(&osdc->osds); p; p = n) {
1122 if (kickosd) { 1246 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
1123 err = __reset_osd(osdc, kickosd); 1247
1124 if (err == -EAGAIN) 1248 n = rb_next(p);
1125 return 1; 1249 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
1126 } else { 1250 memcmp(&osd->o_con.peer_addr,
1127 for (p = rb_first(&osdc->osds); p; p = n) { 1251 ceph_osd_addr(osdc->osdmap,
1128 struct ceph_osd *osd = 1252 osd->o_osd),
1129 rb_entry(p, struct ceph_osd, o_node); 1253 sizeof(struct ceph_entity_addr)) != 0)
1130 1254 __reset_osd(osdc, osd);
1131 n = rb_next(p);
1132 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
1133 memcmp(&osd->o_con.peer_addr,
1134 ceph_osd_addr(osdc->osdmap,
1135 osd->o_osd),
1136 sizeof(struct ceph_entity_addr)) != 0)
1137 __reset_osd(osdc, osd);
1138 }
1139 } 1255 }
1256}
1257
1258/*
1259 * Requeue requests whose mapping to an OSD has changed. If requests map to
1260 * no osd, request a new map.
1261 *
1262 * Caller should hold map_sem for read and request_mutex.
1263 */
1264static void kick_requests(struct ceph_osd_client *osdc)
1265{
1266 struct ceph_osd_request *req, *nreq;
1267 struct rb_node *p;
1268 int needmap = 0;
1269 int err;
1140 1270
1271 dout("kick_requests\n");
1272 mutex_lock(&osdc->request_mutex);
1141 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { 1273 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
1142 req = rb_entry(p, struct ceph_osd_request, r_node); 1274 req = rb_entry(p, struct ceph_osd_request, r_node);
1143 1275 err = __map_request(osdc, req);
1144 if (req->r_resend) { 1276 if (err < 0)
1145 dout(" r_resend set on tid %llu\n", req->r_tid); 1277 continue; /* error */
1146 __cancel_request(req); 1278 if (req->r_osd == NULL) {
1147 goto kick; 1279 dout("%p tid %llu maps to no osd\n", req, req->r_tid);
1148 } 1280 needmap++; /* request a newer map */
1149 if (req->r_osd && kickosd == req->r_osd) { 1281 } else if (err > 0) {
1150 __cancel_request(req); 1282 dout("%p tid %llu requeued on osd%d\n", req, req->r_tid,
1151 goto kick; 1283 req->r_osd ? req->r_osd->o_osd : -1);
1284 if (!req->r_linger)
1285 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1152 } 1286 }
1287 }
1288
1289 list_for_each_entry_safe(req, nreq, &osdc->req_linger,
1290 r_linger_item) {
1291 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
1153 1292
1154 err = __map_osds(osdc, req); 1293 err = __map_request(osdc, req);
1155 if (err == 0) 1294 if (err == 0)
1156 continue; /* no change */ 1295 continue; /* no change and no osd was specified */
1157 if (err < 0) { 1296 if (err < 0)
1158 /* 1297 continue; /* hrm! */
1159 * FIXME: really, we should set the request
1160 * error and fail if this isn't a 'nofail'
1161 * request, but that's a fair bit more
1162 * complicated to do. So retry!
1163 */
1164 dout(" setting r_resend on %llu\n", req->r_tid);
1165 req->r_resend = true;
1166 continue;
1167 }
1168 if (req->r_osd == NULL) { 1298 if (req->r_osd == NULL) {
1169 dout("tid %llu maps to no valid osd\n", req->r_tid); 1299 dout("tid %llu maps to no valid osd\n", req->r_tid);
1170 needmap++; /* request a newer map */ 1300 needmap++; /* request a newer map */
1171 continue; 1301 continue;
1172 } 1302 }
1173 1303
1174kick: 1304 dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
1175 dout("kicking %p tid %llu osd%d\n", req, req->r_tid,
1176 req->r_osd ? req->r_osd->o_osd : -1); 1305 req->r_osd ? req->r_osd->o_osd : -1);
1177 req->r_flags |= CEPH_OSD_FLAG_RETRY; 1306 __unregister_linger_request(osdc, req);
1178 err = __send_request(osdc, req); 1307 __register_request(osdc, req);
1179 if (err) {
1180 dout(" setting r_resend on %llu\n", req->r_tid);
1181 req->r_resend = true;
1182 }
1183 } 1308 }
1184
1185 return needmap;
1186}
1187
1188/*
1189 * Resubmit osd requests whose osd or osd address has changed. Request
1190 * a new osd map if osds are down, or we are otherwise unable to determine
1191 * how to direct a request.
1192 *
1193 * Close connections to down osds.
1194 *
1195 * If @who is specified, resubmit requests for that specific osd.
1196 *
1197 * Caller should hold map_sem for read and request_mutex.
1198 */
1199static void kick_requests(struct ceph_osd_client *osdc,
1200 struct ceph_osd *kickosd)
1201{
1202 int needmap;
1203
1204 mutex_lock(&osdc->request_mutex);
1205 needmap = __kick_requests(osdc, kickosd);
1206 mutex_unlock(&osdc->request_mutex); 1309 mutex_unlock(&osdc->request_mutex);
1207 1310
1208 if (needmap) { 1311 if (needmap) {
1209 dout("%d requests for down osds, need new map\n", needmap); 1312 dout("%d requests for down osds, need new map\n", needmap);
1210 ceph_monc_request_next_osdmap(&osdc->client->monc); 1313 ceph_monc_request_next_osdmap(&osdc->client->monc);
1211 } 1314 }
1212
1213} 1315}
1316
1317
1214/* 1318/*
1215 * Process updated osd map. 1319 * Process updated osd map.
1216 * 1320 *
@@ -1263,6 +1367,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1263 ceph_osdmap_destroy(osdc->osdmap); 1367 ceph_osdmap_destroy(osdc->osdmap);
1264 osdc->osdmap = newmap; 1368 osdc->osdmap = newmap;
1265 } 1369 }
1370 kick_requests(osdc);
1371 reset_changed_osds(osdc);
1266 } else { 1372 } else {
1267 dout("ignoring incremental map %u len %d\n", 1373 dout("ignoring incremental map %u len %d\n",
1268 epoch, maplen); 1374 epoch, maplen);
@@ -1300,6 +1406,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1300 osdc->osdmap = newmap; 1406 osdc->osdmap = newmap;
1301 if (oldmap) 1407 if (oldmap)
1302 ceph_osdmap_destroy(oldmap); 1408 ceph_osdmap_destroy(oldmap);
1409 kick_requests(osdc);
1303 } 1410 }
1304 p += maplen; 1411 p += maplen;
1305 nr_maps--; 1412 nr_maps--;
@@ -1308,8 +1415,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1308done: 1415done:
1309 downgrade_write(&osdc->map_sem); 1416 downgrade_write(&osdc->map_sem);
1310 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch); 1417 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
1311 if (newmap) 1418 send_queued(osdc);
1312 kick_requests(osdc, NULL);
1313 up_read(&osdc->map_sem); 1419 up_read(&osdc->map_sem);
1314 wake_up_all(&osdc->client->auth_wq); 1420 wake_up_all(&osdc->client->auth_wq);
1315 return; 1421 return;
@@ -1322,6 +1428,223 @@ bad:
1322} 1428}
1323 1429
1324/* 1430/*
1431 * watch/notify callback event infrastructure
1432 *
1433 * These callbacks are used both for watch and notify operations.
1434 */
1435static void __release_event(struct kref *kref)
1436{
1437 struct ceph_osd_event *event =
1438 container_of(kref, struct ceph_osd_event, kref);
1439
1440 dout("__release_event %p\n", event);
1441 kfree(event);
1442}
1443
1444static void get_event(struct ceph_osd_event *event)
1445{
1446 kref_get(&event->kref);
1447}
1448
1449void ceph_osdc_put_event(struct ceph_osd_event *event)
1450{
1451 kref_put(&event->kref, __release_event);
1452}
1453EXPORT_SYMBOL(ceph_osdc_put_event);
1454
1455static void __insert_event(struct ceph_osd_client *osdc,
1456 struct ceph_osd_event *new)
1457{
1458 struct rb_node **p = &osdc->event_tree.rb_node;
1459 struct rb_node *parent = NULL;
1460 struct ceph_osd_event *event = NULL;
1461
1462 while (*p) {
1463 parent = *p;
1464 event = rb_entry(parent, struct ceph_osd_event, node);
1465 if (new->cookie < event->cookie)
1466 p = &(*p)->rb_left;
1467 else if (new->cookie > event->cookie)
1468 p = &(*p)->rb_right;
1469 else
1470 BUG();
1471 }
1472
1473 rb_link_node(&new->node, parent, p);
1474 rb_insert_color(&new->node, &osdc->event_tree);
1475}
1476
1477static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
1478 u64 cookie)
1479{
1480 struct rb_node **p = &osdc->event_tree.rb_node;
1481 struct rb_node *parent = NULL;
1482 struct ceph_osd_event *event = NULL;
1483
1484 while (*p) {
1485 parent = *p;
1486 event = rb_entry(parent, struct ceph_osd_event, node);
1487 if (cookie < event->cookie)
1488 p = &(*p)->rb_left;
1489 else if (cookie > event->cookie)
1490 p = &(*p)->rb_right;
1491 else
1492 return event;
1493 }
1494 return NULL;
1495}
1496
1497static void __remove_event(struct ceph_osd_event *event)
1498{
1499 struct ceph_osd_client *osdc = event->osdc;
1500
1501 if (!RB_EMPTY_NODE(&event->node)) {
1502 dout("__remove_event removed %p\n", event);
1503 rb_erase(&event->node, &osdc->event_tree);
1504 ceph_osdc_put_event(event);
1505 } else {
1506 dout("__remove_event didn't remove %p\n", event);
1507 }
1508}
1509
1510int ceph_osdc_create_event(struct ceph_osd_client *osdc,
1511 void (*event_cb)(u64, u64, u8, void *),
1512 int one_shot, void *data,
1513 struct ceph_osd_event **pevent)
1514{
1515 struct ceph_osd_event *event;
1516
1517 event = kmalloc(sizeof(*event), GFP_NOIO);
1518 if (!event)
1519 return -ENOMEM;
1520
1521 dout("create_event %p\n", event);
1522 event->cb = event_cb;
1523 event->one_shot = one_shot;
1524 event->data = data;
1525 event->osdc = osdc;
1526 INIT_LIST_HEAD(&event->osd_node);
1527 kref_init(&event->kref); /* one ref for us */
1528 kref_get(&event->kref); /* one ref for the caller */
1529 init_completion(&event->completion);
1530
1531 spin_lock(&osdc->event_lock);
1532 event->cookie = ++osdc->event_count;
1533 __insert_event(osdc, event);
1534 spin_unlock(&osdc->event_lock);
1535
1536 *pevent = event;
1537 return 0;
1538}
1539EXPORT_SYMBOL(ceph_osdc_create_event);
1540
1541void ceph_osdc_cancel_event(struct ceph_osd_event *event)
1542{
1543 struct ceph_osd_client *osdc = event->osdc;
1544
1545 dout("cancel_event %p\n", event);
1546 spin_lock(&osdc->event_lock);
1547 __remove_event(event);
1548 spin_unlock(&osdc->event_lock);
1549 ceph_osdc_put_event(event); /* caller's */
1550}
1551EXPORT_SYMBOL(ceph_osdc_cancel_event);
1552
1553
1554static void do_event_work(struct work_struct *work)
1555{
1556 struct ceph_osd_event_work *event_work =
1557 container_of(work, struct ceph_osd_event_work, work);
1558 struct ceph_osd_event *event = event_work->event;
1559 u64 ver = event_work->ver;
1560 u64 notify_id = event_work->notify_id;
1561 u8 opcode = event_work->opcode;
1562
1563 dout("do_event_work completing %p\n", event);
1564 event->cb(ver, notify_id, opcode, event->data);
1565 complete(&event->completion);
1566 dout("do_event_work completed %p\n", event);
1567 ceph_osdc_put_event(event);
1568 kfree(event_work);
1569}
1570
1571
1572/*
1573 * Process osd watch notifications
1574 */
1575void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1576{
1577 void *p, *end;
1578 u8 proto_ver;
1579 u64 cookie, ver, notify_id;
1580 u8 opcode;
1581 struct ceph_osd_event *event;
1582 struct ceph_osd_event_work *event_work;
1583
1584 p = msg->front.iov_base;
1585 end = p + msg->front.iov_len;
1586
1587 ceph_decode_8_safe(&p, end, proto_ver, bad);
1588 ceph_decode_8_safe(&p, end, opcode, bad);
1589 ceph_decode_64_safe(&p, end, cookie, bad);
1590 ceph_decode_64_safe(&p, end, ver, bad);
1591 ceph_decode_64_safe(&p, end, notify_id, bad);
1592
1593 spin_lock(&osdc->event_lock);
1594 event = __find_event(osdc, cookie);
1595 if (event) {
1596 get_event(event);
1597 if (event->one_shot)
1598 __remove_event(event);
1599 }
1600 spin_unlock(&osdc->event_lock);
1601 dout("handle_watch_notify cookie %lld ver %lld event %p\n",
1602 cookie, ver, event);
1603 if (event) {
1604 event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
1605 INIT_WORK(&event_work->work, do_event_work);
1606 if (!event_work) {
1607 dout("ERROR: could not allocate event_work\n");
1608 goto done_err;
1609 }
1610 event_work->event = event;
1611 event_work->ver = ver;
1612 event_work->notify_id = notify_id;
1613 event_work->opcode = opcode;
1614 if (!queue_work(osdc->notify_wq, &event_work->work)) {
1615 dout("WARNING: failed to queue notify event work\n");
1616 goto done_err;
1617 }
1618 }
1619
1620 return;
1621
1622done_err:
1623 complete(&event->completion);
1624 ceph_osdc_put_event(event);
1625 return;
1626
1627bad:
1628 pr_err("osdc handle_watch_notify corrupt msg\n");
1629 return;
1630}
1631
1632int ceph_osdc_wait_event(struct ceph_osd_event *event, unsigned long timeout)
1633{
1634 int err;
1635
1636 dout("wait_event %p\n", event);
1637 err = wait_for_completion_interruptible_timeout(&event->completion,
1638 timeout * HZ);
1639 ceph_osdc_put_event(event);
1640 if (err > 0)
1641 err = 0;
1642 dout("wait_event %p returns %d\n", event, err);
1643 return err;
1644}
1645EXPORT_SYMBOL(ceph_osdc_wait_event);
1646
1647/*
1325 * Register request, send initial attempt. 1648 * Register request, send initial attempt.
1326 */ 1649 */
1327int ceph_osdc_start_request(struct ceph_osd_client *osdc, 1650int ceph_osdc_start_request(struct ceph_osd_client *osdc,
@@ -1347,15 +1670,22 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1347 * the request still han't been touched yet. 1670 * the request still han't been touched yet.
1348 */ 1671 */
1349 if (req->r_sent == 0) { 1672 if (req->r_sent == 0) {
1350 rc = __send_request(osdc, req); 1673 rc = __map_request(osdc, req);
1351 if (rc) { 1674 if (rc < 0)
1352 if (nofail) { 1675 return rc;
1353 dout("osdc_start_request failed send, " 1676 if (req->r_osd == NULL) {
1354 " marking %lld\n", req->r_tid); 1677 dout("send_request %p no up osds in pg\n", req);
1355 req->r_resend = true; 1678 ceph_monc_request_next_osdmap(&osdc->client->monc);
1356 rc = 0; 1679 } else {
1357 } else { 1680 rc = __send_request(osdc, req);
1358 __unregister_request(osdc, req); 1681 if (rc) {
1682 if (nofail) {
1683 dout("osdc_start_request failed send, "
1684 " will retry %lld\n", req->r_tid);
1685 rc = 0;
1686 } else {
1687 __unregister_request(osdc, req);
1688 }
1359 } 1689 }
1360 } 1690 }
1361 } 1691 }
@@ -1441,9 +1771,15 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
1441 INIT_LIST_HEAD(&osdc->osd_lru); 1771 INIT_LIST_HEAD(&osdc->osd_lru);
1442 osdc->requests = RB_ROOT; 1772 osdc->requests = RB_ROOT;
1443 INIT_LIST_HEAD(&osdc->req_lru); 1773 INIT_LIST_HEAD(&osdc->req_lru);
1774 INIT_LIST_HEAD(&osdc->req_unsent);
1775 INIT_LIST_HEAD(&osdc->req_notarget);
1776 INIT_LIST_HEAD(&osdc->req_linger);
1444 osdc->num_requests = 0; 1777 osdc->num_requests = 0;
1445 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); 1778 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
1446 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); 1779 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
1780 spin_lock_init(&osdc->event_lock);
1781 osdc->event_tree = RB_ROOT;
1782 osdc->event_count = 0;
1447 1783
1448 schedule_delayed_work(&osdc->osds_timeout_work, 1784 schedule_delayed_work(&osdc->osds_timeout_work,
1449 round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ)); 1785 round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ));
@@ -1463,6 +1799,13 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
1463 "osd_op_reply"); 1799 "osd_op_reply");
1464 if (err < 0) 1800 if (err < 0)
1465 goto out_msgpool; 1801 goto out_msgpool;
1802
1803 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
1804 if (IS_ERR(osdc->notify_wq)) {
1805 err = PTR_ERR(osdc->notify_wq);
1806 osdc->notify_wq = NULL;
1807 goto out_msgpool;
1808 }
1466 return 0; 1809 return 0;
1467 1810
1468out_msgpool: 1811out_msgpool:
@@ -1476,6 +1819,8 @@ EXPORT_SYMBOL(ceph_osdc_init);
1476 1819
1477void ceph_osdc_stop(struct ceph_osd_client *osdc) 1820void ceph_osdc_stop(struct ceph_osd_client *osdc)
1478{ 1821{
1822 flush_workqueue(osdc->notify_wq);
1823 destroy_workqueue(osdc->notify_wq);
1479 cancel_delayed_work_sync(&osdc->timeout_work); 1824 cancel_delayed_work_sync(&osdc->timeout_work);
1480 cancel_delayed_work_sync(&osdc->osds_timeout_work); 1825 cancel_delayed_work_sync(&osdc->osds_timeout_work);
1481 if (osdc->osdmap) { 1826 if (osdc->osdmap) {
@@ -1483,6 +1828,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
1483 osdc->osdmap = NULL; 1828 osdc->osdmap = NULL;
1484 } 1829 }
1485 remove_old_osds(osdc, 1); 1830 remove_old_osds(osdc, 1);
1831 WARN_ON(!RB_EMPTY_ROOT(&osdc->osds));
1486 mempool_destroy(osdc->req_mempool); 1832 mempool_destroy(osdc->req_mempool);
1487 ceph_msgpool_destroy(&osdc->msgpool_op); 1833 ceph_msgpool_destroy(&osdc->msgpool_op);
1488 ceph_msgpool_destroy(&osdc->msgpool_op_reply); 1834 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
@@ -1591,6 +1937,9 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
1591 case CEPH_MSG_OSD_OPREPLY: 1937 case CEPH_MSG_OSD_OPREPLY:
1592 handle_reply(osdc, msg, con); 1938 handle_reply(osdc, msg, con);
1593 break; 1939 break;
1940 case CEPH_MSG_WATCH_NOTIFY:
1941 handle_watch_notify(osdc, msg);
1942 break;
1594 1943
1595 default: 1944 default:
1596 pr_err("received unknown message type %d %s\n", type, 1945 pr_err("received unknown message type %d %s\n", type,
@@ -1684,6 +2033,7 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con,
1684 2033
1685 switch (type) { 2034 switch (type) {
1686 case CEPH_MSG_OSD_MAP: 2035 case CEPH_MSG_OSD_MAP:
2036 case CEPH_MSG_WATCH_NOTIFY:
1687 return ceph_msg_new(type, front, GFP_NOFS); 2037 return ceph_msg_new(type, front, GFP_NOFS);
1688 case CEPH_MSG_OSD_OPREPLY: 2038 case CEPH_MSG_OSD_OPREPLY:
1689 return get_reply(con, hdr, skip); 2039 return get_reply(con, hdr, skip);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 36e603c78ce9..706502ff64aa 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -350,7 +350,7 @@ static int __init init_net_drop_monitor(void)
350 struct per_cpu_dm_data *data; 350 struct per_cpu_dm_data *data;
351 int cpu, rc; 351 int cpu, rc;
352 352
353 printk(KERN_INFO "Initalizing network drop monitor service\n"); 353 printk(KERN_INFO "Initializing network drop monitor service\n");
354 354
355 if (sizeof(void *) > 8) { 355 if (sizeof(void *) > 8) {
356 printk(KERN_ERR "Unable to store program counters on this arch, Drop monitor failed\n"); 356 printk(KERN_ERR "Unable to store program counters on this arch, Drop monitor failed\n");
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index c1a71bb738da..a1086fb0c0c7 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1457,6 +1457,9 @@ static int __ethtool_set_sg(struct net_device *dev, u32 data)
1457{ 1457{
1458 int err; 1458 int err;
1459 1459
1460 if (!dev->ethtool_ops->set_sg)
1461 return -EOPNOTSUPP;
1462
1460 if (data && !(dev->features & NETIF_F_ALL_CSUM)) 1463 if (data && !(dev->features & NETIF_F_ALL_CSUM))
1461 return -EINVAL; 1464 return -EINVAL;
1462 1465
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 0c55eaa70e39..aeeece72b72f 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3761,7 +3761,10 @@ static int __init pktgen_create_thread(int cpu)
3761 list_add_tail(&t->th_list, &pktgen_threads); 3761 list_add_tail(&t->th_list, &pktgen_threads);
3762 init_completion(&t->start_done); 3762 init_completion(&t->start_done);
3763 3763
3764 p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu); 3764 p = kthread_create_on_node(pktgen_thread_worker,
3765 t,
3766 cpu_to_node(cpu),
3767 "kpktgend_%d", cpu);
3765 if (IS_ERR(p)) { 3768 if (IS_ERR(p)) {
3766 pr_err("kernel_thread() failed for cpu %d\n", t->cpu); 3769 pr_err("kernel_thread() failed for cpu %d\n", t->cpu);
3767 list_del(&t->th_list); 3770 list_del(&t->th_list);
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 0c2826337919..116d3fd3d669 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -435,10 +435,10 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
435 udpdest.sin_addr.s_addr = htonl(network | addr.station); 435 udpdest.sin_addr.s_addr = htonl(network | addr.station);
436 } 436 }
437 437
438 memset(&ah, 0, sizeof(ah));
438 ah.port = port; 439 ah.port = port;
439 ah.cb = cb & 0x7f; 440 ah.cb = cb & 0x7f;
440 ah.code = 2; /* magic */ 441 ah.code = 2; /* magic */
441 ah.pad = 0;
442 442
443 /* tack our header on the front of the iovec */ 443 /* tack our header on the front of the iovec */
444 size = sizeof(struct aunhdr); 444 size = sizeof(struct aunhdr);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index b09ed0d080f9..ffcea0d1678e 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -387,7 +387,7 @@ ipt_do_table(struct sk_buff *skb,
387 verdict = (unsigned)(-v) - 1; 387 verdict = (unsigned)(-v) - 1;
388 break; 388 break;
389 } 389 }
390 if (*stackptr == 0) { 390 if (*stackptr <= origptr) {
391 e = get_entry(table_base, 391 e = get_entry(table_base,
392 private->underflow[hook]); 392 private->underflow[hook]);
393 pr_debug("Underflow (this is normal) " 393 pr_debug("Underflow (this is normal) "
@@ -427,10 +427,10 @@ ipt_do_table(struct sk_buff *skb,
427 /* Verdict */ 427 /* Verdict */
428 break; 428 break;
429 } while (!acpar.hotdrop); 429 } while (!acpar.hotdrop);
430 xt_info_rdunlock_bh();
431 pr_debug("Exiting %s; resetting sp from %u to %u\n", 430 pr_debug("Exiting %s; resetting sp from %u to %u\n",
432 __func__, *stackptr, origptr); 431 __func__, *stackptr, origptr);
433 *stackptr = origptr; 432 *stackptr = origptr;
433 xt_info_rdunlock_bh();
434#ifdef DEBUG_ALLOW_ALL 434#ifdef DEBUG_ALLOW_ALL
435 return NF_ACCEPT; 435 return NF_ACCEPT;
436#else 436#else
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 403ca57f6011..d609ac3cb9a4 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -664,8 +664,11 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
664 char buffer[PROC_WRITELEN+1]; 664 char buffer[PROC_WRITELEN+1];
665 unsigned long nodenum; 665 unsigned long nodenum;
666 666
667 if (copy_from_user(buffer, input, PROC_WRITELEN)) 667 if (size > PROC_WRITELEN)
668 return -EIO;
669 if (copy_from_user(buffer, input, size))
668 return -EFAULT; 670 return -EFAULT;
671 buffer[size] = 0;
669 672
670 if (*buffer == '+') { 673 if (*buffer == '+') {
671 nodenum = simple_strtoul(buffer+1, NULL, 10); 674 nodenum = simple_strtoul(buffer+1, NULL, 10);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index c9598a9067d7..0b2af9b85cec 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -410,7 +410,7 @@ ip6t_do_table(struct sk_buff *skb,
410 verdict = (unsigned)(-v) - 1; 410 verdict = (unsigned)(-v) - 1;
411 break; 411 break;
412 } 412 }
413 if (*stackptr == 0) 413 if (*stackptr <= origptr)
414 e = get_entry(table_base, 414 e = get_entry(table_base,
415 private->underflow[hook]); 415 private->underflow[hook]);
416 else 416 else
@@ -441,8 +441,8 @@ ip6t_do_table(struct sk_buff *skb,
441 break; 441 break;
442 } while (!acpar.hotdrop); 442 } while (!acpar.hotdrop);
443 443
444 xt_info_rdunlock_bh();
445 *stackptr = origptr; 444 *stackptr = origptr;
445 xt_info_rdunlock_bh();
446 446
447#ifdef DEBUG_ALLOW_ALL 447#ifdef DEBUG_ALLOW_ALL
448 return NF_ACCEPT; 448 return NF_ACCEPT;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 7cb65ef79f9c..6dcf5e7d661b 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -17,6 +17,16 @@
17 17
18static struct ctl_table empty[1]; 18static struct ctl_table empty[1];
19 19
20static ctl_table ipv6_static_skeleton[] = {
21 {
22 .procname = "neigh",
23 .maxlen = 0,
24 .mode = 0555,
25 .child = empty,
26 },
27 { }
28};
29
20static ctl_table ipv6_table_template[] = { 30static ctl_table ipv6_table_template[] = {
21 { 31 {
22 .procname = "route", 32 .procname = "route",
@@ -37,12 +47,6 @@ static ctl_table ipv6_table_template[] = {
37 .mode = 0644, 47 .mode = 0644,
38 .proc_handler = proc_dointvec 48 .proc_handler = proc_dointvec
39 }, 49 },
40 {
41 .procname = "neigh",
42 .maxlen = 0,
43 .mode = 0555,
44 .child = empty,
45 },
46 { } 50 { }
47}; 51};
48 52
@@ -160,7 +164,7 @@ static struct ctl_table_header *ip6_base;
160 164
161int ipv6_static_sysctl_register(void) 165int ipv6_static_sysctl_register(void)
162{ 166{
163 ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty); 167 ip6_base = register_sysctl_paths(net_ipv6_ctl_path, ipv6_static_skeleton);
164 if (ip6_base == NULL) 168 if (ip6_base == NULL)
165 return -ENOMEM; 169 return -ENOMEM;
166 return 0; 170 return 0;
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 2731b51923d1..9680226640ef 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -148,7 +148,6 @@ static void ipx_destroy_socket(struct sock *sk)
148 ipx_remove_socket(sk); 148 ipx_remove_socket(sk);
149 skb_queue_purge(&sk->sk_receive_queue); 149 skb_queue_purge(&sk->sk_receive_queue);
150 sk_refcnt_debug_dec(sk); 150 sk_refcnt_debug_dec(sk);
151 sock_put(sk);
152} 151}
153 152
154/* 153/*
@@ -1404,6 +1403,7 @@ static int ipx_release(struct socket *sock)
1404 sk_refcnt_debug_release(sk); 1403 sk_refcnt_debug_release(sk);
1405 ipx_destroy_socket(sk); 1404 ipx_destroy_socket(sk);
1406 release_sock(sk); 1405 release_sock(sk);
1406 sock_put(sk);
1407out: 1407out:
1408 return 0; 1408 return 0;
1409} 1409}
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 8d9ce0accc98..a8193f52c13c 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -283,7 +283,7 @@ static __net_init int l2tp_eth_init_net(struct net *net)
283 return 0; 283 return 0;
284} 284}
285 285
286static __net_initdata struct pernet_operations l2tp_eth_net_ops = { 286static struct pernet_operations l2tp_eth_net_ops = {
287 .init = l2tp_eth_init_net, 287 .init = l2tp_eth_init_net,
288 .id = &l2tp_eth_net_id, 288 .id = &l2tp_eth_net_id,
289 .size = sizeof(struct l2tp_eth_net), 289 .size = sizeof(struct l2tp_eth_net),
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 618a615acc9d..d6b48230a540 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -94,16 +94,28 @@ static int
94find_set_type_get(const char *name, u8 family, u8 revision, 94find_set_type_get(const char *name, u8 family, u8 revision,
95 struct ip_set_type **found) 95 struct ip_set_type **found)
96{ 96{
97 struct ip_set_type *type;
98 int err;
99
97 rcu_read_lock(); 100 rcu_read_lock();
98 *found = find_set_type(name, family, revision); 101 *found = find_set_type(name, family, revision);
99 if (*found) { 102 if (*found) {
100 int err = !try_module_get((*found)->me); 103 err = !try_module_get((*found)->me) ? -EFAULT : 0;
101 rcu_read_unlock(); 104 goto unlock;
102 return err ? -EFAULT : 0;
103 } 105 }
106 /* Make sure the type is loaded but we don't support the revision */
107 list_for_each_entry_rcu(type, &ip_set_type_list, list)
108 if (STREQ(type->name, name)) {
109 err = -IPSET_ERR_FIND_TYPE;
110 goto unlock;
111 }
104 rcu_read_unlock(); 112 rcu_read_unlock();
105 113
106 return try_to_load_type(name); 114 return try_to_load_type(name);
115
116unlock:
117 rcu_read_unlock();
118 return err;
107} 119}
108 120
109/* Find a given set type by name and family. 121/* Find a given set type by name and family.
@@ -116,7 +128,7 @@ find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max)
116 struct ip_set_type *type; 128 struct ip_set_type *type;
117 bool found = false; 129 bool found = false;
118 130
119 *min = *max = 0; 131 *min = 255; *max = 0;
120 rcu_read_lock(); 132 rcu_read_lock();
121 list_for_each_entry_rcu(type, &ip_set_type_list, list) 133 list_for_each_entry_rcu(type, &ip_set_type_list, list)
122 if (STREQ(type->name, name) && 134 if (STREQ(type->name, name) &&
@@ -124,7 +136,7 @@ find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max)
124 found = true; 136 found = true;
125 if (type->revision < *min) 137 if (type->revision < *min)
126 *min = type->revision; 138 *min = type->revision;
127 else if (type->revision > *max) 139 if (type->revision > *max)
128 *max = type->revision; 140 *max = type->revision;
129 } 141 }
130 rcu_read_unlock(); 142 rcu_read_unlock();
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index adbe787ea5dc..b9214145d357 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -150,6 +150,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
150 struct hash_ipport4_elem data = { }; 150 struct hash_ipport4_elem data = { };
151 u32 ip, ip_to, p, port, port_to; 151 u32 ip, ip_to, p, port, port_to;
152 u32 timeout = h->timeout; 152 u32 timeout = h->timeout;
153 bool with_ports = false;
153 int ret; 154 int ret;
154 155
155 if (unlikely(!tb[IPSET_ATTR_IP] || 156 if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -172,21 +173,15 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
172 173
173 if (tb[IPSET_ATTR_PROTO]) { 174 if (tb[IPSET_ATTR_PROTO]) {
174 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); 175 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
176 with_ports = ip_set_proto_with_ports(data.proto);
175 177
176 if (data.proto == 0) 178 if (data.proto == 0)
177 return -IPSET_ERR_INVALID_PROTO; 179 return -IPSET_ERR_INVALID_PROTO;
178 } else 180 } else
179 return -IPSET_ERR_MISSING_PROTO; 181 return -IPSET_ERR_MISSING_PROTO;
180 182
181 switch (data.proto) { 183 if (!(with_ports || data.proto == IPPROTO_ICMP))
182 case IPPROTO_UDP:
183 case IPPROTO_TCP:
184 case IPPROTO_ICMP:
185 break;
186 default:
187 data.port = 0; 184 data.port = 0;
188 break;
189 }
190 185
191 if (tb[IPSET_ATTR_TIMEOUT]) { 186 if (tb[IPSET_ATTR_TIMEOUT]) {
192 if (!with_timeout(h->timeout)) 187 if (!with_timeout(h->timeout))
@@ -195,7 +190,6 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
195 } 190 }
196 191
197 if (adt == IPSET_TEST || 192 if (adt == IPSET_TEST ||
198 !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
199 !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || 193 !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
200 tb[IPSET_ATTR_PORT_TO])) { 194 tb[IPSET_ATTR_PORT_TO])) {
201 ret = adtfn(set, &data, timeout); 195 ret = adtfn(set, &data, timeout);
@@ -219,13 +213,12 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
219 } else 213 } else
220 ip_to = ip; 214 ip_to = ip;
221 215
222 port = ntohs(data.port); 216 port_to = port = ntohs(data.port);
223 if (tb[IPSET_ATTR_PORT_TO]) { 217 if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
224 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); 218 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
225 if (port > port_to) 219 if (port > port_to)
226 swap(port, port_to); 220 swap(port, port_to);
227 } else 221 }
228 port_to = port;
229 222
230 for (; !before(ip_to, ip); ip++) 223 for (; !before(ip_to, ip); ip++)
231 for (p = port; p <= port_to; p++) { 224 for (p = port; p <= port_to; p++) {
@@ -361,6 +354,7 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
361 struct hash_ipport6_elem data = { }; 354 struct hash_ipport6_elem data = { };
362 u32 port, port_to; 355 u32 port, port_to;
363 u32 timeout = h->timeout; 356 u32 timeout = h->timeout;
357 bool with_ports = false;
364 int ret; 358 int ret;
365 359
366 if (unlikely(!tb[IPSET_ATTR_IP] || 360 if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -385,21 +379,15 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
385 379
386 if (tb[IPSET_ATTR_PROTO]) { 380 if (tb[IPSET_ATTR_PROTO]) {
387 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); 381 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
382 with_ports = ip_set_proto_with_ports(data.proto);
388 383
389 if (data.proto == 0) 384 if (data.proto == 0)
390 return -IPSET_ERR_INVALID_PROTO; 385 return -IPSET_ERR_INVALID_PROTO;
391 } else 386 } else
392 return -IPSET_ERR_MISSING_PROTO; 387 return -IPSET_ERR_MISSING_PROTO;
393 388
394 switch (data.proto) { 389 if (!(with_ports || data.proto == IPPROTO_ICMPV6))
395 case IPPROTO_UDP:
396 case IPPROTO_TCP:
397 case IPPROTO_ICMPV6:
398 break;
399 default:
400 data.port = 0; 390 data.port = 0;
401 break;
402 }
403 391
404 if (tb[IPSET_ATTR_TIMEOUT]) { 392 if (tb[IPSET_ATTR_TIMEOUT]) {
405 if (!with_timeout(h->timeout)) 393 if (!with_timeout(h->timeout))
@@ -407,9 +395,7 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
407 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 395 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
408 } 396 }
409 397
410 if (adt == IPSET_TEST || 398 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
411 !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
412 !tb[IPSET_ATTR_PORT_TO]) {
413 ret = adtfn(set, &data, timeout); 399 ret = adtfn(set, &data, timeout);
414 return ip_set_eexist(ret, flags) ? 0 : ret; 400 return ip_set_eexist(ret, flags) ? 0 : ret;
415 } 401 }
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 22e23abb86c6..4642872df6e1 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -154,6 +154,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
154 struct hash_ipportip4_elem data = { }; 154 struct hash_ipportip4_elem data = { };
155 u32 ip, ip_to, p, port, port_to; 155 u32 ip, ip_to, p, port, port_to;
156 u32 timeout = h->timeout; 156 u32 timeout = h->timeout;
157 bool with_ports = false;
157 int ret; 158 int ret;
158 159
159 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || 160 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
@@ -180,21 +181,15 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
180 181
181 if (tb[IPSET_ATTR_PROTO]) { 182 if (tb[IPSET_ATTR_PROTO]) {
182 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); 183 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
184 with_ports = ip_set_proto_with_ports(data.proto);
183 185
184 if (data.proto == 0) 186 if (data.proto == 0)
185 return -IPSET_ERR_INVALID_PROTO; 187 return -IPSET_ERR_INVALID_PROTO;
186 } else 188 } else
187 return -IPSET_ERR_MISSING_PROTO; 189 return -IPSET_ERR_MISSING_PROTO;
188 190
189 switch (data.proto) { 191 if (!(with_ports || data.proto == IPPROTO_ICMP))
190 case IPPROTO_UDP:
191 case IPPROTO_TCP:
192 case IPPROTO_ICMP:
193 break;
194 default:
195 data.port = 0; 192 data.port = 0;
196 break;
197 }
198 193
199 if (tb[IPSET_ATTR_TIMEOUT]) { 194 if (tb[IPSET_ATTR_TIMEOUT]) {
200 if (!with_timeout(h->timeout)) 195 if (!with_timeout(h->timeout))
@@ -203,7 +198,6 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
203 } 198 }
204 199
205 if (adt == IPSET_TEST || 200 if (adt == IPSET_TEST ||
206 !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
207 !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || 201 !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
208 tb[IPSET_ATTR_PORT_TO])) { 202 tb[IPSET_ATTR_PORT_TO])) {
209 ret = adtfn(set, &data, timeout); 203 ret = adtfn(set, &data, timeout);
@@ -227,13 +221,12 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
227 } else 221 } else
228 ip_to = ip; 222 ip_to = ip;
229 223
230 port = ntohs(data.port); 224 port_to = port = ntohs(data.port);
231 if (tb[IPSET_ATTR_PORT_TO]) { 225 if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
232 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); 226 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
233 if (port > port_to) 227 if (port > port_to)
234 swap(port, port_to); 228 swap(port, port_to);
235 } else 229 }
236 port_to = port;
237 230
238 for (; !before(ip_to, ip); ip++) 231 for (; !before(ip_to, ip); ip++)
239 for (p = port; p <= port_to; p++) { 232 for (p = port; p <= port_to; p++) {
@@ -375,6 +368,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
375 struct hash_ipportip6_elem data = { }; 368 struct hash_ipportip6_elem data = { };
376 u32 port, port_to; 369 u32 port, port_to;
377 u32 timeout = h->timeout; 370 u32 timeout = h->timeout;
371 bool with_ports = false;
378 int ret; 372 int ret;
379 373
380 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || 374 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
@@ -403,21 +397,15 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
403 397
404 if (tb[IPSET_ATTR_PROTO]) { 398 if (tb[IPSET_ATTR_PROTO]) {
405 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); 399 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
400 with_ports = ip_set_proto_with_ports(data.proto);
406 401
407 if (data.proto == 0) 402 if (data.proto == 0)
408 return -IPSET_ERR_INVALID_PROTO; 403 return -IPSET_ERR_INVALID_PROTO;
409 } else 404 } else
410 return -IPSET_ERR_MISSING_PROTO; 405 return -IPSET_ERR_MISSING_PROTO;
411 406
412 switch (data.proto) { 407 if (!(with_ports || data.proto == IPPROTO_ICMPV6))
413 case IPPROTO_UDP:
414 case IPPROTO_TCP:
415 case IPPROTO_ICMPV6:
416 break;
417 default:
418 data.port = 0; 408 data.port = 0;
419 break;
420 }
421 409
422 if (tb[IPSET_ATTR_TIMEOUT]) { 410 if (tb[IPSET_ATTR_TIMEOUT]) {
423 if (!with_timeout(h->timeout)) 411 if (!with_timeout(h->timeout))
@@ -425,9 +413,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
425 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 413 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
426 } 414 }
427 415
428 if (adt == IPSET_TEST || 416 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
429 !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
430 !tb[IPSET_ATTR_PORT_TO]) {
431 ret = adtfn(set, &data, timeout); 417 ret = adtfn(set, &data, timeout);
432 return ip_set_eexist(ret, flags) ? 0 : ret; 418 return ip_set_eexist(ret, flags) ? 0 : ret;
433 } 419 }
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 6033e8b54bbd..2cb84a54b7ad 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -174,6 +174,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
174 struct hash_ipportnet4_elem data = { .cidr = HOST_MASK }; 174 struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
175 u32 ip, ip_to, p, port, port_to; 175 u32 ip, ip_to, p, port, port_to;
176 u32 timeout = h->timeout; 176 u32 timeout = h->timeout;
177 bool with_ports = false;
177 int ret; 178 int ret;
178 179
179 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || 180 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
@@ -208,21 +209,15 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
208 209
209 if (tb[IPSET_ATTR_PROTO]) { 210 if (tb[IPSET_ATTR_PROTO]) {
210 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); 211 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
212 with_ports = ip_set_proto_with_ports(data.proto);
211 213
212 if (data.proto == 0) 214 if (data.proto == 0)
213 return -IPSET_ERR_INVALID_PROTO; 215 return -IPSET_ERR_INVALID_PROTO;
214 } else 216 } else
215 return -IPSET_ERR_MISSING_PROTO; 217 return -IPSET_ERR_MISSING_PROTO;
216 218
217 switch (data.proto) { 219 if (!(with_ports || data.proto == IPPROTO_ICMP))
218 case IPPROTO_UDP:
219 case IPPROTO_TCP:
220 case IPPROTO_ICMP:
221 break;
222 default:
223 data.port = 0; 220 data.port = 0;
224 break;
225 }
226 221
227 if (tb[IPSET_ATTR_TIMEOUT]) { 222 if (tb[IPSET_ATTR_TIMEOUT]) {
228 if (!with_timeout(h->timeout)) 223 if (!with_timeout(h->timeout))
@@ -231,7 +226,6 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
231 } 226 }
232 227
233 if (adt == IPSET_TEST || 228 if (adt == IPSET_TEST ||
234 !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
235 !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || 229 !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
236 tb[IPSET_ATTR_PORT_TO])) { 230 tb[IPSET_ATTR_PORT_TO])) {
237 ret = adtfn(set, &data, timeout); 231 ret = adtfn(set, &data, timeout);
@@ -255,13 +249,12 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
255 } else 249 } else
256 ip_to = ip; 250 ip_to = ip;
257 251
258 port = ntohs(data.port); 252 port_to = port = ntohs(data.port);
259 if (tb[IPSET_ATTR_PORT_TO]) { 253 if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
260 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); 254 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
261 if (port > port_to) 255 if (port > port_to)
262 swap(port, port_to); 256 swap(port, port_to);
263 } else 257 }
264 port_to = port;
265 258
266 for (; !before(ip_to, ip); ip++) 259 for (; !before(ip_to, ip); ip++)
267 for (p = port; p <= port_to; p++) { 260 for (p = port; p <= port_to; p++) {
@@ -429,6 +422,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
429 struct hash_ipportnet6_elem data = { .cidr = HOST_MASK }; 422 struct hash_ipportnet6_elem data = { .cidr = HOST_MASK };
430 u32 port, port_to; 423 u32 port, port_to;
431 u32 timeout = h->timeout; 424 u32 timeout = h->timeout;
425 bool with_ports = false;
432 int ret; 426 int ret;
433 427
434 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || 428 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
@@ -465,21 +459,15 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
465 459
466 if (tb[IPSET_ATTR_PROTO]) { 460 if (tb[IPSET_ATTR_PROTO]) {
467 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); 461 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
462 with_ports = ip_set_proto_with_ports(data.proto);
468 463
469 if (data.proto == 0) 464 if (data.proto == 0)
470 return -IPSET_ERR_INVALID_PROTO; 465 return -IPSET_ERR_INVALID_PROTO;
471 } else 466 } else
472 return -IPSET_ERR_MISSING_PROTO; 467 return -IPSET_ERR_MISSING_PROTO;
473 468
474 switch (data.proto) { 469 if (!(with_ports || data.proto == IPPROTO_ICMPV6))
475 case IPPROTO_UDP:
476 case IPPROTO_TCP:
477 case IPPROTO_ICMPV6:
478 break;
479 default:
480 data.port = 0; 470 data.port = 0;
481 break;
482 }
483 471
484 if (tb[IPSET_ATTR_TIMEOUT]) { 472 if (tb[IPSET_ATTR_TIMEOUT]) {
485 if (!with_timeout(h->timeout)) 473 if (!with_timeout(h->timeout))
@@ -487,9 +475,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
487 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 475 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
488 } 476 }
489 477
490 if (adt == IPSET_TEST || 478 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
491 !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
492 !tb[IPSET_ATTR_PORT_TO]) {
493 ret = adtfn(set, &data, timeout); 479 ret = adtfn(set, &data, timeout);
494 return ip_set_eexist(ret, flags) ? 0 : ret; 480 return ip_set_eexist(ret, flags) ? 0 : ret;
495 } 481 }
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 34a165626ee9..8598676f2a05 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -170,6 +170,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
170 struct hash_netport4_elem data = { .cidr = HOST_MASK }; 170 struct hash_netport4_elem data = { .cidr = HOST_MASK };
171 u32 port, port_to; 171 u32 port, port_to;
172 u32 timeout = h->timeout; 172 u32 timeout = h->timeout;
173 bool with_ports = false;
173 int ret; 174 int ret;
174 175
175 if (unlikely(!tb[IPSET_ATTR_IP] || 176 if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -198,21 +199,15 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
198 199
199 if (tb[IPSET_ATTR_PROTO]) { 200 if (tb[IPSET_ATTR_PROTO]) {
200 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); 201 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
202 with_ports = ip_set_proto_with_ports(data.proto);
201 203
202 if (data.proto == 0) 204 if (data.proto == 0)
203 return -IPSET_ERR_INVALID_PROTO; 205 return -IPSET_ERR_INVALID_PROTO;
204 } else 206 } else
205 return -IPSET_ERR_MISSING_PROTO; 207 return -IPSET_ERR_MISSING_PROTO;
206 208
207 switch (data.proto) { 209 if (!(with_ports || data.proto == IPPROTO_ICMP))
208 case IPPROTO_UDP:
209 case IPPROTO_TCP:
210 case IPPROTO_ICMP:
211 break;
212 default:
213 data.port = 0; 210 data.port = 0;
214 break;
215 }
216 211
217 if (tb[IPSET_ATTR_TIMEOUT]) { 212 if (tb[IPSET_ATTR_TIMEOUT]) {
218 if (!with_timeout(h->timeout)) 213 if (!with_timeout(h->timeout))
@@ -220,9 +215,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
220 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 215 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
221 } 216 }
222 217
223 if (adt == IPSET_TEST || 218 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
224 !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
225 !tb[IPSET_ATTR_PORT_TO]) {
226 ret = adtfn(set, &data, timeout); 219 ret = adtfn(set, &data, timeout);
227 return ip_set_eexist(ret, flags) ? 0 : ret; 220 return ip_set_eexist(ret, flags) ? 0 : ret;
228 } 221 }
@@ -390,6 +383,7 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
390 struct hash_netport6_elem data = { .cidr = HOST_MASK }; 383 struct hash_netport6_elem data = { .cidr = HOST_MASK };
391 u32 port, port_to; 384 u32 port, port_to;
392 u32 timeout = h->timeout; 385 u32 timeout = h->timeout;
386 bool with_ports = false;
393 int ret; 387 int ret;
394 388
395 if (unlikely(!tb[IPSET_ATTR_IP] || 389 if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -418,21 +412,15 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
418 412
419 if (tb[IPSET_ATTR_PROTO]) { 413 if (tb[IPSET_ATTR_PROTO]) {
420 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); 414 data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
415 with_ports = ip_set_proto_with_ports(data.proto);
421 416
422 if (data.proto == 0) 417 if (data.proto == 0)
423 return -IPSET_ERR_INVALID_PROTO; 418 return -IPSET_ERR_INVALID_PROTO;
424 } else 419 } else
425 return -IPSET_ERR_MISSING_PROTO; 420 return -IPSET_ERR_MISSING_PROTO;
426 421
427 switch (data.proto) { 422 if (!(with_ports || data.proto == IPPROTO_ICMPV6))
428 case IPPROTO_UDP:
429 case IPPROTO_TCP:
430 case IPPROTO_ICMPV6:
431 break;
432 default:
433 data.port = 0; 423 data.port = 0;
434 break;
435 }
436 424
437 if (tb[IPSET_ATTR_TIMEOUT]) { 425 if (tb[IPSET_ATTR_TIMEOUT]) {
438 if (!with_timeout(h->timeout)) 426 if (!with_timeout(h->timeout))
@@ -440,9 +428,7 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
440 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 428 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
441 } 429 }
442 430
443 if (adt == IPSET_TEST || 431 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
444 !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
445 !tb[IPSET_ATTR_PORT_TO]) {
446 ret = adtfn(set, &data, timeout); 432 ret = adtfn(set, &data, timeout);
447 return ip_set_eexist(ret, flags) ? 0 : ret; 433 return ip_set_eexist(ret, flags) ? 0 : ret;
448 } 434 }
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 5c48ffb60c28..2dc6de13ac18 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -43,6 +43,8 @@ EXPORT_SYMBOL(register_ip_vs_app);
43EXPORT_SYMBOL(unregister_ip_vs_app); 43EXPORT_SYMBOL(unregister_ip_vs_app);
44EXPORT_SYMBOL(register_ip_vs_app_inc); 44EXPORT_SYMBOL(register_ip_vs_app_inc);
45 45
46static DEFINE_MUTEX(__ip_vs_app_mutex);
47
46/* 48/*
47 * Get an ip_vs_app object 49 * Get an ip_vs_app object
48 */ 50 */
@@ -167,14 +169,13 @@ int
167register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto, 169register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
168 __u16 port) 170 __u16 port)
169{ 171{
170 struct netns_ipvs *ipvs = net_ipvs(net);
171 int result; 172 int result;
172 173
173 mutex_lock(&ipvs->app_mutex); 174 mutex_lock(&__ip_vs_app_mutex);
174 175
175 result = ip_vs_app_inc_new(net, app, proto, port); 176 result = ip_vs_app_inc_new(net, app, proto, port);
176 177
177 mutex_unlock(&ipvs->app_mutex); 178 mutex_unlock(&__ip_vs_app_mutex);
178 179
179 return result; 180 return result;
180} 181}
@@ -189,11 +190,11 @@ int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
189 /* increase the module use count */ 190 /* increase the module use count */
190 ip_vs_use_count_inc(); 191 ip_vs_use_count_inc();
191 192
192 mutex_lock(&ipvs->app_mutex); 193 mutex_lock(&__ip_vs_app_mutex);
193 194
194 list_add(&app->a_list, &ipvs->app_list); 195 list_add(&app->a_list, &ipvs->app_list);
195 196
196 mutex_unlock(&ipvs->app_mutex); 197 mutex_unlock(&__ip_vs_app_mutex);
197 198
198 return 0; 199 return 0;
199} 200}
@@ -205,10 +206,9 @@ int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
205 */ 206 */
206void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app) 207void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
207{ 208{
208 struct netns_ipvs *ipvs = net_ipvs(net);
209 struct ip_vs_app *inc, *nxt; 209 struct ip_vs_app *inc, *nxt;
210 210
211 mutex_lock(&ipvs->app_mutex); 211 mutex_lock(&__ip_vs_app_mutex);
212 212
213 list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) { 213 list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
214 ip_vs_app_inc_release(net, inc); 214 ip_vs_app_inc_release(net, inc);
@@ -216,7 +216,7 @@ void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
216 216
217 list_del(&app->a_list); 217 list_del(&app->a_list);
218 218
219 mutex_unlock(&ipvs->app_mutex); 219 mutex_unlock(&__ip_vs_app_mutex);
220 220
221 /* decrease the module use count */ 221 /* decrease the module use count */
222 ip_vs_use_count_dec(); 222 ip_vs_use_count_dec();
@@ -501,7 +501,7 @@ static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
501 struct net *net = seq_file_net(seq); 501 struct net *net = seq_file_net(seq);
502 struct netns_ipvs *ipvs = net_ipvs(net); 502 struct netns_ipvs *ipvs = net_ipvs(net);
503 503
504 mutex_lock(&ipvs->app_mutex); 504 mutex_lock(&__ip_vs_app_mutex);
505 505
506 return *pos ? ip_vs_app_idx(ipvs, *pos - 1) : SEQ_START_TOKEN; 506 return *pos ? ip_vs_app_idx(ipvs, *pos - 1) : SEQ_START_TOKEN;
507} 507}
@@ -535,9 +535,7 @@ static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
535 535
536static void ip_vs_app_seq_stop(struct seq_file *seq, void *v) 536static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
537{ 537{
538 struct netns_ipvs *ipvs = net_ipvs(seq_file_net(seq)); 538 mutex_unlock(&__ip_vs_app_mutex);
539
540 mutex_unlock(&ipvs->app_mutex);
541} 539}
542 540
543static int ip_vs_app_seq_show(struct seq_file *seq, void *v) 541static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
@@ -583,7 +581,6 @@ static int __net_init __ip_vs_app_init(struct net *net)
583 struct netns_ipvs *ipvs = net_ipvs(net); 581 struct netns_ipvs *ipvs = net_ipvs(net);
584 582
585 INIT_LIST_HEAD(&ipvs->app_list); 583 INIT_LIST_HEAD(&ipvs->app_list);
586 __mutex_init(&ipvs->app_mutex, "ipvs->app_mutex", &ipvs->app_key);
587 proc_net_fops_create(net, "ip_vs_app", 0, &ip_vs_app_fops); 584 proc_net_fops_create(net, "ip_vs_app", 0, &ip_vs_app_fops);
588 return 0; 585 return 0;
589} 586}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index b799cea31f95..33733c8872e7 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3605,7 +3605,7 @@ int __net_init __ip_vs_control_init(struct net *net)
3605 3605
3606 /* procfs stats */ 3606 /* procfs stats */
3607 ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); 3607 ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
3608 if (ipvs->tot_stats.cpustats) { 3608 if (!ipvs->tot_stats.cpustats) {
3609 pr_err("%s(): alloc_percpu.\n", __func__); 3609 pr_err("%s(): alloc_percpu.\n", __func__);
3610 return -ENOMEM; 3610 return -ENOMEM;
3611 } 3611 }
diff --git a/net/socket.c b/net/socket.c
index 937d0fcf74bc..5212447c86e7 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2588,23 +2588,123 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
2588 2588
2589static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) 2589static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2590{ 2590{
2591 struct compat_ethtool_rxnfc __user *compat_rxnfc;
2592 bool convert_in = false, convert_out = false;
2593 size_t buf_size = ALIGN(sizeof(struct ifreq), 8);
2594 struct ethtool_rxnfc __user *rxnfc;
2591 struct ifreq __user *ifr; 2595 struct ifreq __user *ifr;
2596 u32 rule_cnt = 0, actual_rule_cnt;
2597 u32 ethcmd;
2592 u32 data; 2598 u32 data;
2593 void __user *datap; 2599 int ret;
2600
2601 if (get_user(data, &ifr32->ifr_ifru.ifru_data))
2602 return -EFAULT;
2594 2603
2595 ifr = compat_alloc_user_space(sizeof(*ifr)); 2604 compat_rxnfc = compat_ptr(data);
2596 2605
2597 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) 2606 if (get_user(ethcmd, &compat_rxnfc->cmd))
2598 return -EFAULT; 2607 return -EFAULT;
2599 2608
2600 if (get_user(data, &ifr32->ifr_ifru.ifru_data)) 2609 /* Most ethtool structures are defined without padding.
2610 * Unfortunately struct ethtool_rxnfc is an exception.
2611 */
2612 switch (ethcmd) {
2613 default:
2614 break;
2615 case ETHTOOL_GRXCLSRLALL:
2616 /* Buffer size is variable */
2617 if (get_user(rule_cnt, &compat_rxnfc->rule_cnt))
2618 return -EFAULT;
2619 if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32))
2620 return -ENOMEM;
2621 buf_size += rule_cnt * sizeof(u32);
2622 /* fall through */
2623 case ETHTOOL_GRXRINGS:
2624 case ETHTOOL_GRXCLSRLCNT:
2625 case ETHTOOL_GRXCLSRULE:
2626 convert_out = true;
2627 /* fall through */
2628 case ETHTOOL_SRXCLSRLDEL:
2629 case ETHTOOL_SRXCLSRLINS:
2630 buf_size += sizeof(struct ethtool_rxnfc);
2631 convert_in = true;
2632 break;
2633 }
2634
2635 ifr = compat_alloc_user_space(buf_size);
2636 rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
2637
2638 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
2601 return -EFAULT; 2639 return -EFAULT;
2602 2640
2603 datap = compat_ptr(data); 2641 if (put_user(convert_in ? rxnfc : compat_ptr(data),
2604 if (put_user(datap, &ifr->ifr_ifru.ifru_data)) 2642 &ifr->ifr_ifru.ifru_data))
2605 return -EFAULT; 2643 return -EFAULT;
2606 2644
2607 return dev_ioctl(net, SIOCETHTOOL, ifr); 2645 if (convert_in) {
2646 /* We expect there to be holes between fs.m_u and
2647 * fs.ring_cookie and at the end of fs, but nowhere else.
2648 */
2649 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_u) +
2650 sizeof(compat_rxnfc->fs.m_u) !=
2651 offsetof(struct ethtool_rxnfc, fs.m_u) +
2652 sizeof(rxnfc->fs.m_u));
2653 BUILD_BUG_ON(
2654 offsetof(struct compat_ethtool_rxnfc, fs.location) -
2655 offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) !=
2656 offsetof(struct ethtool_rxnfc, fs.location) -
2657 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
2658
2659 if (copy_in_user(rxnfc, compat_rxnfc,
2660 (void *)(&rxnfc->fs.m_u + 1) -
2661 (void *)rxnfc) ||
2662 copy_in_user(&rxnfc->fs.ring_cookie,
2663 &compat_rxnfc->fs.ring_cookie,
2664 (void *)(&rxnfc->fs.location + 1) -
2665 (void *)&rxnfc->fs.ring_cookie) ||
2666 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
2667 sizeof(rxnfc->rule_cnt)))
2668 return -EFAULT;
2669 }
2670
2671 ret = dev_ioctl(net, SIOCETHTOOL, ifr);
2672 if (ret)
2673 return ret;
2674
2675 if (convert_out) {
2676 if (copy_in_user(compat_rxnfc, rxnfc,
2677 (const void *)(&rxnfc->fs.m_u + 1) -
2678 (const void *)rxnfc) ||
2679 copy_in_user(&compat_rxnfc->fs.ring_cookie,
2680 &rxnfc->fs.ring_cookie,
2681 (const void *)(&rxnfc->fs.location + 1) -
2682 (const void *)&rxnfc->fs.ring_cookie) ||
2683 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
2684 sizeof(rxnfc->rule_cnt)))
2685 return -EFAULT;
2686
2687 if (ethcmd == ETHTOOL_GRXCLSRLALL) {
2688 /* As an optimisation, we only copy the actual
2689 * number of rules that the underlying
2690 * function returned. Since Mallory might
2691 * change the rule count in user memory, we
2692 * check that it is less than the rule count
2693 * originally given (as the user buffer size),
2694 * which has been range-checked.
2695 */
2696 if (get_user(actual_rule_cnt, &rxnfc->rule_cnt))
2697 return -EFAULT;
2698 if (actual_rule_cnt < rule_cnt)
2699 rule_cnt = actual_rule_cnt;
2700 if (copy_in_user(&compat_rxnfc->rule_locs[0],
2701 &rxnfc->rule_locs[0],
2702 rule_cnt * sizeof(u32)))
2703 return -EFAULT;
2704 }
2705 }
2706
2707 return 0;
2608} 2708}
2609 2709
2610static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) 2710static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32)
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d575f0534868..f83a3d1da81b 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1907,7 +1907,7 @@ int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1907 return res; 1907 return res;
1908} 1908}
1909 1909
1910int xfrm_init_state(struct xfrm_state *x) 1910int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
1911{ 1911{
1912 struct xfrm_state_afinfo *afinfo; 1912 struct xfrm_state_afinfo *afinfo;
1913 struct xfrm_mode *inner_mode; 1913 struct xfrm_mode *inner_mode;
@@ -1980,12 +1980,25 @@ int xfrm_init_state(struct xfrm_state *x)
1980 if (x->outer_mode == NULL) 1980 if (x->outer_mode == NULL)
1981 goto error; 1981 goto error;
1982 1982
1983 if (init_replay) {
1984 err = xfrm_init_replay(x);
1985 if (err)
1986 goto error;
1987 }
1988
1983 x->km.state = XFRM_STATE_VALID; 1989 x->km.state = XFRM_STATE_VALID;
1984 1990
1985error: 1991error:
1986 return err; 1992 return err;
1987} 1993}
1988 1994
1995EXPORT_SYMBOL(__xfrm_init_state);
1996
1997int xfrm_init_state(struct xfrm_state *x)
1998{
1999 return __xfrm_init_state(x, true);
2000}
2001
1989EXPORT_SYMBOL(xfrm_init_state); 2002EXPORT_SYMBOL(xfrm_init_state);
1990 2003
1991int __net_init xfrm_state_init(struct net *net) 2004int __net_init xfrm_state_init(struct net *net)
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 706385ae3e4b..fc152d28753c 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -511,7 +511,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
511 511
512 xfrm_mark_get(attrs, &x->mark); 512 xfrm_mark_get(attrs, &x->mark);
513 513
514 err = xfrm_init_state(x); 514 err = __xfrm_init_state(x, false);
515 if (err) 515 if (err)
516 goto error; 516 goto error;
517 517
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
index 6501a50e17f0..6129020c41a9 100755
--- a/scripts/bloat-o-meter
+++ b/scripts/bloat-o-meter
@@ -17,7 +17,9 @@ def getsizes(file):
17 sym = {} 17 sym = {}
18 for l in os.popen("nm --size-sort " + file).readlines(): 18 for l in os.popen("nm --size-sort " + file).readlines():
19 size, type, name = l[:-1].split() 19 size, type, name = l[:-1].split()
20 if type in "tTdDbB": 20 if type in "tTdDbBrR":
21 # strip generated symbols
22 if name[:6] == "__mod_": continue
21 # function names begin with '.' on 64-bit powerpc 23 # function names begin with '.' on 64-bit powerpc
22 if "." in name[1:]: name = "static." + name.split(".")[0] 24 if "." in name[1:]: name = "static." + name.split(".")[0]
23 sym[name] = sym.get(name, 0) + int(size, 16) 25 sym[name] = sym.get(name, 0) + int(size, 16)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 58848e3e392c..8f9e394298cd 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2804,9 +2804,9 @@ sub process {
2804 WARN("consider using a completion\n" . $herecurr); 2804 WARN("consider using a completion\n" . $herecurr);
2805 2805
2806 } 2806 }
2807# recommend strict_strto* over simple_strto* 2807# recommend kstrto* over simple_strto*
2808 if ($line =~ /\bsimple_(strto.*?)\s*\(/) { 2808 if ($line =~ /\bsimple_(strto.*?)\s*\(/) {
2809 WARN("consider using strict_$1 in preference to simple_$1\n" . $herecurr); 2809 WARN("consider using kstrto* in preference to simple_$1\n" . $herecurr);
2810 } 2810 }
2811# check for __initcall(), use device_initcall() explicitly please 2811# check for __initcall(), use device_initcall() explicitly please
2812 if ($line =~ /^.\s*__initcall\s*\(/) { 2812 if ($line =~ /^.\s*__initcall\s*\(/) {
@@ -2902,6 +2902,11 @@ sub process {
2902 $line =~ /DEVICE_ATTR.*S_IWUGO/ ) { 2902 $line =~ /DEVICE_ATTR.*S_IWUGO/ ) {
2903 WARN("Exporting world writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr); 2903 WARN("Exporting world writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr);
2904 } 2904 }
2905
2906 # Check for memset with swapped arguments
2907 if ($line =~ /memset.*\,(\ |)(0x|)0(\ |0|)\);/) {
2908 ERROR("memset size is 3rd argument, not the second.\n" . $herecurr);
2909 }
2905 } 2910 }
2906 2911
2907 # If we have no input at all, then there is nothing to report on 2912 # If we have no input at all, then there is nothing to report on
@@ -2944,6 +2949,7 @@ sub process {
2944 if ($rpt_cleaners) { 2949 if ($rpt_cleaners) {
2945 print "NOTE: whitespace errors detected, you may wish to use scripts/cleanpatch or\n"; 2950 print "NOTE: whitespace errors detected, you may wish to use scripts/cleanpatch or\n";
2946 print " scripts/cleanfile\n\n"; 2951 print " scripts/cleanfile\n\n";
2952 $rpt_cleaners = 0;
2947 } 2953 }
2948 } 2954 }
2949 2955
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 139e0fff8e31..d29a8d75cb22 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -420,6 +420,14 @@ foreach my $file (@ARGV) {
420 420
421 open(my $patch, "< $file") 421 open(my $patch, "< $file")
422 or die "$P: Can't open $file: $!\n"; 422 or die "$P: Can't open $file: $!\n";
423
424 # We can check arbitrary information before the patch
425 # like the commit message, mail headers, etc...
426 # This allows us to match arbitrary keywords against any part
427 # of a git format-patch generated file (subject tags, etc...)
428
429 my $patch_prefix = ""; #Parsing the intro
430
423 while (<$patch>) { 431 while (<$patch>) {
424 my $patch_line = $_; 432 my $patch_line = $_;
425 if (m/^\+\+\+\s+(\S+)/) { 433 if (m/^\+\+\+\s+(\S+)/) {
@@ -428,13 +436,14 @@ foreach my $file (@ARGV) {
428 $filename =~ s@\n@@; 436 $filename =~ s@\n@@;
429 $lastfile = $filename; 437 $lastfile = $filename;
430 push(@files, $filename); 438 push(@files, $filename);
439 $patch_prefix = "^[+-].*"; #Now parsing the actual patch
431 } elsif (m/^\@\@ -(\d+),(\d+)/) { 440 } elsif (m/^\@\@ -(\d+),(\d+)/) {
432 if ($email_git_blame) { 441 if ($email_git_blame) {
433 push(@range, "$lastfile:$1:$2"); 442 push(@range, "$lastfile:$1:$2");
434 } 443 }
435 } elsif ($keywords) { 444 } elsif ($keywords) {
436 foreach my $line (keys %keyword_hash) { 445 foreach my $line (keys %keyword_hash) {
437 if ($patch_line =~ m/^[+-].*$keyword_hash{$line}/x) { 446 if ($patch_line =~ m/${patch_prefix}$keyword_hash{$line}/x) {
438 push(@keyword_tvi, $line); 447 push(@keyword_tvi, $line);
439 } 448 }
440 } 449 }
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f1a03f223495..5d582de91c19 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1265,6 +1265,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
1265 case 0x10ec0660: 1265 case 0x10ec0660:
1266 case 0x10ec0662: 1266 case 0x10ec0662:
1267 case 0x10ec0663: 1267 case 0x10ec0663:
1268 case 0x10ec0665:
1268 case 0x10ec0862: 1269 case 0x10ec0862:
1269 case 0x10ec0889: 1270 case 0x10ec0889:
1270 set_eapd(codec, 0x14, 1); 1271 set_eapd(codec, 0x14, 1);
@@ -4240,6 +4241,7 @@ static void alc_power_eapd(struct hda_codec *codec)
4240 case 0x10ec0660: 4241 case 0x10ec0660:
4241 case 0x10ec0662: 4242 case 0x10ec0662:
4242 case 0x10ec0663: 4243 case 0x10ec0663:
4244 case 0x10ec0665:
4243 case 0x10ec0862: 4245 case 0x10ec0862:
4244 case 0x10ec0889: 4246 case 0x10ec0889:
4245 set_eapd(codec, 0x14, 0); 4247 set_eapd(codec, 0x14, 0);
@@ -16006,9 +16008,12 @@ static int alc861_auto_create_multi_out_ctls(struct hda_codec *codec,
16006 return err; 16008 return err;
16007 } else { 16009 } else {
16008 const char *name = pfx; 16010 const char *name = pfx;
16009 if (!name) 16011 int index = i;
16012 if (!name) {
16010 name = chname[i]; 16013 name = chname[i];
16011 err = __alc861_create_out_sw(codec, name, nid, i, 3); 16014 index = 0;
16015 }
16016 err = __alc861_create_out_sw(codec, name, nid, index, 3);
16012 if (err < 0) 16017 if (err < 0)
16013 return err; 16018 return err;
16014 } 16019 }
@@ -17159,16 +17164,19 @@ static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec,
17159 return err; 17164 return err;
17160 } else { 17165 } else {
17161 const char *name = pfx; 17166 const char *name = pfx;
17162 if (!name) 17167 int index = i;
17168 if (!name) {
17163 name = chname[i]; 17169 name = chname[i];
17170 index = 0;
17171 }
17164 err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, 17172 err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL,
17165 name, i, 17173 name, index,
17166 HDA_COMPOSE_AMP_VAL(nid_v, 3, 0, 17174 HDA_COMPOSE_AMP_VAL(nid_v, 3, 0,
17167 HDA_OUTPUT)); 17175 HDA_OUTPUT));
17168 if (err < 0) 17176 if (err < 0)
17169 return err; 17177 return err;
17170 err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, 17178 err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE,
17171 name, i, 17179 name, index,
17172 HDA_COMPOSE_AMP_VAL(nid_s, 3, 2, 17180 HDA_COMPOSE_AMP_VAL(nid_s, 3, 2,
17173 HDA_INPUT)); 17181 HDA_INPUT));
17174 if (err < 0) 17182 if (err < 0)
@@ -19217,12 +19225,15 @@ static int alc662_auto_create_multi_out_ctls(struct hda_codec *codec,
19217 return err; 19225 return err;
19218 } else { 19226 } else {
19219 const char *name = pfx; 19227 const char *name = pfx;
19220 if (!name) 19228 int index = i;
19229 if (!name) {
19221 name = chname[i]; 19230 name = chname[i];
19222 err = __alc662_add_vol_ctl(spec, name, nid, i, 3); 19231 index = 0;
19232 }
19233 err = __alc662_add_vol_ctl(spec, name, nid, index, 3);
19223 if (err < 0) 19234 if (err < 0)
19224 return err; 19235 return err;
19225 err = __alc662_add_sw_ctl(spec, name, mix, i, 3); 19236 err = __alc662_add_sw_ctl(spec, name, mix, index, 3);
19226 if (err < 0) 19237 if (err < 0)
19227 return err; 19238 return err;
19228 } 19239 }
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 63b0054200a8..1371b57c11e8 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -159,6 +159,7 @@ struct via_spec {
159#endif 159#endif
160}; 160};
161 161
162static enum VIA_HDA_CODEC get_codec_type(struct hda_codec *codec);
162static struct via_spec * via_new_spec(struct hda_codec *codec) 163static struct via_spec * via_new_spec(struct hda_codec *codec)
163{ 164{
164 struct via_spec *spec; 165 struct via_spec *spec;
@@ -169,6 +170,10 @@ static struct via_spec * via_new_spec(struct hda_codec *codec)
169 170
170 codec->spec = spec; 171 codec->spec = spec;
171 spec->codec = codec; 172 spec->codec = codec;
173 spec->codec_type = get_codec_type(codec);
174 /* VT1708BCE & VT1708S are almost same */
175 if (spec->codec_type == VT1708BCE)
176 spec->codec_type = VT1708S;
172 return spec; 177 return spec;
173} 178}
174 179
@@ -1101,6 +1106,7 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol,
1101 struct hda_codec *codec = snd_kcontrol_chip(kcontrol); 1106 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
1102 struct via_spec *spec = codec->spec; 1107 struct via_spec *spec = codec->spec;
1103 unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); 1108 unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
1109 int ret;
1104 1110
1105 if (!spec->mux_nids[adc_idx]) 1111 if (!spec->mux_nids[adc_idx])
1106 return -EINVAL; 1112 return -EINVAL;
@@ -1109,12 +1115,14 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol,
1109 AC_VERB_GET_POWER_STATE, 0x00) != AC_PWRST_D0) 1115 AC_VERB_GET_POWER_STATE, 0x00) != AC_PWRST_D0)
1110 snd_hda_codec_write(codec, spec->mux_nids[adc_idx], 0, 1116 snd_hda_codec_write(codec, spec->mux_nids[adc_idx], 0,
1111 AC_VERB_SET_POWER_STATE, AC_PWRST_D0); 1117 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
1112 /* update jack power state */
1113 set_jack_power_state(codec);
1114 1118
1115 return snd_hda_input_mux_put(codec, spec->input_mux, ucontrol, 1119 ret = snd_hda_input_mux_put(codec, spec->input_mux, ucontrol,
1116 spec->mux_nids[adc_idx], 1120 spec->mux_nids[adc_idx],
1117 &spec->cur_mux[adc_idx]); 1121 &spec->cur_mux[adc_idx]);
1122 /* update jack power state */
1123 set_jack_power_state(codec);
1124
1125 return ret;
1118} 1126}
1119 1127
1120static int via_independent_hp_info(struct snd_kcontrol *kcontrol, 1128static int via_independent_hp_info(struct snd_kcontrol *kcontrol,
@@ -1188,8 +1196,16 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol,
1188 /* Get Independent Mode index of headphone pin widget */ 1196 /* Get Independent Mode index of headphone pin widget */
1189 spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel 1197 spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel
1190 ? 1 : 0; 1198 ? 1 : 0;
1191 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, pinsel); 1199 if (spec->codec_type == VT1718S)
1200 snd_hda_codec_write(codec, nid, 0,
1201 AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0);
1202 else
1203 snd_hda_codec_write(codec, nid, 0,
1204 AC_VERB_SET_CONNECT_SEL, pinsel);
1192 1205
1206 if (spec->codec_type == VT1812)
1207 snd_hda_codec_write(codec, 0x35, 0,
1208 AC_VERB_SET_CONNECT_SEL, pinsel);
1193 if (spec->multiout.hp_nid && spec->multiout.hp_nid 1209 if (spec->multiout.hp_nid && spec->multiout.hp_nid
1194 != spec->multiout.dac_nids[HDA_FRONT]) 1210 != spec->multiout.dac_nids[HDA_FRONT])
1195 snd_hda_codec_setup_stream(codec, spec->multiout.hp_nid, 1211 snd_hda_codec_setup_stream(codec, spec->multiout.hp_nid,
@@ -1208,6 +1224,8 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol,
1208 activate_ctl(codec, "Headphone Playback Switch", 1224 activate_ctl(codec, "Headphone Playback Switch",
1209 spec->hp_independent_mode); 1225 spec->hp_independent_mode);
1210 } 1226 }
1227 /* update jack power state */
1228 set_jack_power_state(codec);
1211 return 0; 1229 return 0;
1212} 1230}
1213 1231
@@ -1248,9 +1266,12 @@ static int via_hp_build(struct hda_codec *codec)
1248 break; 1266 break;
1249 } 1267 }
1250 1268
1251 nums = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS); 1269 if (spec->codec_type != VT1708) {
1252 if (nums <= 1) 1270 nums = snd_hda_get_connections(codec, nid,
1253 return 0; 1271 conn, HDA_MAX_CONNECTIONS);
1272 if (nums <= 1)
1273 return 0;
1274 }
1254 1275
1255 knew = via_clone_control(spec, &via_hp_mixer[0]); 1276 knew = via_clone_control(spec, &via_hp_mixer[0]);
1256 if (knew == NULL) 1277 if (knew == NULL)
@@ -1310,6 +1331,11 @@ static void mute_aa_path(struct hda_codec *codec, int mute)
1310 start_idx = 2; 1331 start_idx = 2;
1311 end_idx = 4; 1332 end_idx = 4;
1312 break; 1333 break;
1334 case VT1718S:
1335 nid_mixer = 0x21;
1336 start_idx = 1;
1337 end_idx = 3;
1338 break;
1313 default: 1339 default:
1314 return; 1340 return;
1315 } 1341 }
@@ -2185,10 +2211,6 @@ static int via_init(struct hda_codec *codec)
2185 for (i = 0; i < spec->num_iverbs; i++) 2211 for (i = 0; i < spec->num_iverbs; i++)
2186 snd_hda_sequence_write(codec, spec->init_verbs[i]); 2212 snd_hda_sequence_write(codec, spec->init_verbs[i]);
2187 2213
2188 spec->codec_type = get_codec_type(codec);
2189 if (spec->codec_type == VT1708BCE)
2190 spec->codec_type = VT1708S; /* VT1708BCE & VT1708S are almost
2191 same */
2192 /* Lydia Add for EAPD enable */ 2214 /* Lydia Add for EAPD enable */
2193 if (!spec->dig_in_nid) { /* No Digital In connection */ 2215 if (!spec->dig_in_nid) { /* No Digital In connection */
2194 if (spec->dig_in_pin) { 2216 if (spec->dig_in_pin) {
@@ -2438,7 +2460,14 @@ static int vt_auto_create_analog_input_ctls(struct hda_codec *codec,
2438 else 2460 else
2439 type_idx = 0; 2461 type_idx = 0;
2440 label = hda_get_autocfg_input_label(codec, cfg, i); 2462 label = hda_get_autocfg_input_label(codec, cfg, i);
2441 err = via_new_analog_input(spec, label, type_idx, idx, cap_nid); 2463 if (spec->codec_type == VT1708S ||
2464 spec->codec_type == VT1702 ||
2465 spec->codec_type == VT1716S)
2466 err = via_new_analog_input(spec, label, type_idx,
2467 idx+1, cap_nid);
2468 else
2469 err = via_new_analog_input(spec, label, type_idx,
2470 idx, cap_nid);
2442 if (err < 0) 2471 if (err < 0)
2443 return err; 2472 return err;
2444 snd_hda_add_imux_item(imux, label, idx, NULL); 2473 snd_hda_add_imux_item(imux, label, idx, NULL);
@@ -4147,6 +4176,11 @@ static int patch_vt1708S(struct hda_codec *codec)
4147 spec->stream_name_analog = "VT1708BCE Analog"; 4176 spec->stream_name_analog = "VT1708BCE Analog";
4148 spec->stream_name_digital = "VT1708BCE Digital"; 4177 spec->stream_name_digital = "VT1708BCE Digital";
4149 } 4178 }
4179 /* correct names for VT1818S */
4180 if (codec->vendor_id == 0x11060440) {
4181 spec->stream_name_analog = "VT1818S Analog";
4182 spec->stream_name_digital = "VT1818S Digital";
4183 }
4150 return 0; 4184 return 0;
4151} 4185}
4152 4186
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 1f7217f703ee..ff29380c9ed3 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -772,6 +772,7 @@ static int sgtl5000_pcm_hw_params(struct snd_pcm_substream *substream,
772 return 0; 772 return 0;
773} 773}
774 774
775#ifdef CONFIG_REGULATOR
775static int ldo_regulator_is_enabled(struct regulator_dev *dev) 776static int ldo_regulator_is_enabled(struct regulator_dev *dev)
776{ 777{
777 struct ldo_regulator *ldo = rdev_get_drvdata(dev); 778 struct ldo_regulator *ldo = rdev_get_drvdata(dev);
@@ -901,6 +902,19 @@ static int ldo_regulator_remove(struct snd_soc_codec *codec)
901 902
902 return 0; 903 return 0;
903} 904}
905#else
906static int ldo_regulator_register(struct snd_soc_codec *codec,
907 struct regulator_init_data *init_data,
908 int voltage)
909{
910 return -EINVAL;
911}
912
913static int ldo_regulator_remove(struct snd_soc_codec *codec)
914{
915 return 0;
916}
917#endif
904 918
905/* 919/*
906 * set dac bias 920 * set dac bias
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index e76847a9438b..48ffd406a71d 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -486,7 +486,8 @@ static struct snd_soc_dai_driver uda134x_dai = {
486static int uda134x_soc_probe(struct snd_soc_codec *codec) 486static int uda134x_soc_probe(struct snd_soc_codec *codec)
487{ 487{
488 struct uda134x_priv *uda134x; 488 struct uda134x_priv *uda134x;
489 struct uda134x_platform_data *pd = dev_get_drvdata(codec->card->dev); 489 struct uda134x_platform_data *pd = codec->card->dev->platform_data;
490
490 int ret; 491 int ret;
491 492
492 printk(KERN_INFO "UDA134X SoC Audio Codec\n"); 493 printk(KERN_INFO "UDA134X SoC Audio Codec\n");
diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
index 3cb700751078..dc9d551f6788 100644
--- a/sound/soc/samsung/s3c24xx_uda134x.c
+++ b/sound/soc/samsung/s3c24xx_uda134x.c
@@ -219,7 +219,7 @@ static struct snd_soc_ops s3c24xx_uda134x_ops = {
219static struct snd_soc_dai_link s3c24xx_uda134x_dai_link = { 219static struct snd_soc_dai_link s3c24xx_uda134x_dai_link = {
220 .name = "UDA134X", 220 .name = "UDA134X",
221 .stream_name = "UDA134X", 221 .stream_name = "UDA134X",
222 .codec_name = "uda134x-hifi", 222 .codec_name = "uda134x-codec",
223 .codec_dai_name = "uda134x-hifi", 223 .codec_dai_name = "uda134x-hifi",
224 .cpu_dai_name = "s3c24xx-iis", 224 .cpu_dai_name = "s3c24xx-iis",
225 .ops = &s3c24xx_uda134x_ops, 225 .ops = &s3c24xx_uda134x_ops,
@@ -314,6 +314,7 @@ static int s3c24xx_uda134x_probe(struct platform_device *pdev)
314 314
315 platform_set_drvdata(s3c24xx_uda134x_snd_device, 315 platform_set_drvdata(s3c24xx_uda134x_snd_device,
316 &snd_soc_s3c24xx_uda134x); 316 &snd_soc_s3c24xx_uda134x);
317 platform_device_add_data(s3c24xx_uda134x_snd_device, &s3c24xx_uda134x, sizeof(s3c24xx_uda134x));
317 ret = platform_device_add(s3c24xx_uda134x_snd_device); 318 ret = platform_device_add(s3c24xx_uda134x_snd_device);
318 if (ret) { 319 if (ret) {
319 printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: Unable to add\n"); 320 printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: Unable to add\n");
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 17efacdb248a..4dda58926bc5 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -259,8 +259,6 @@ static ssize_t codec_reg_write_file(struct file *file,
259 while (*start == ' ') 259 while (*start == ' ')
260 start++; 260 start++;
261 reg = simple_strtoul(start, &start, 16); 261 reg = simple_strtoul(start, &start, 16);
262 if ((reg >= codec->driver->reg_cache_size) || (reg % step))
263 return -EINVAL;
264 while (*start == ' ') 262 while (*start == ' ')
265 start++; 263 start++;
266 if (strict_strtoul(start, 16, &value)) 264 if (strict_strtoul(start, 16, &value))
diff --git a/sound/sound_firmware.c b/sound/sound_firmware.c
index 340a0bc5303e..7e96249536b4 100644
--- a/sound/sound_firmware.c
+++ b/sound/sound_firmware.c
@@ -19,7 +19,7 @@ static int do_mod_firmware_load(const char *fn, char **fp)
19 printk(KERN_INFO "Unable to load '%s'.\n", fn); 19 printk(KERN_INFO "Unable to load '%s'.\n", fn);
20 return 0; 20 return 0;
21 } 21 }
22 l = filp->f_path.dentry->d_inode->i_size; 22 l = i_size_read(filp->f_path.dentry->d_inode);
23 if (l <= 0 || l > 131072) 23 if (l <= 0 || l > 131072)
24 { 24 {
25 printk(KERN_INFO "Invalid firmware '%s'\n", fn); 25 printk(KERN_INFO "Invalid firmware '%s'\n", fn);
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 40722f8711ad..a90662af2d6b 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -41,6 +41,7 @@
41#include <linux/list.h> 41#include <linux/list.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/string.h> 43#include <linux/string.h>
44#include <linux/ctype.h>
44#include <linux/usb.h> 45#include <linux/usb.h>
45#include <linux/moduleparam.h> 46#include <linux/moduleparam.h>
46#include <linux/mutex.h> 47#include <linux/mutex.h>
@@ -283,6 +284,15 @@ static int snd_usb_audio_dev_free(struct snd_device *device)
283 return snd_usb_audio_free(chip); 284 return snd_usb_audio_free(chip);
284} 285}
285 286
287static void remove_trailing_spaces(char *str)
288{
289 char *p;
290
291 if (!*str)
292 return;
293 for (p = str + strlen(str) - 1; p >= str && isspace(*p); p--)
294 *p = 0;
295}
286 296
287/* 297/*
288 * create a chip instance and set its names. 298 * create a chip instance and set its names.
@@ -351,7 +361,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
351 snd_component_add(card, component); 361 snd_component_add(card, component);
352 362
353 /* retrieve the device string as shortname */ 363 /* retrieve the device string as shortname */
354 if (quirk && quirk->product_name) { 364 if (quirk && quirk->product_name && *quirk->product_name) {
355 strlcpy(card->shortname, quirk->product_name, sizeof(card->shortname)); 365 strlcpy(card->shortname, quirk->product_name, sizeof(card->shortname));
356 } else { 366 } else {
357 if (!dev->descriptor.iProduct || 367 if (!dev->descriptor.iProduct ||
@@ -363,9 +373,10 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
363 USB_ID_PRODUCT(chip->usb_id)); 373 USB_ID_PRODUCT(chip->usb_id));
364 } 374 }
365 } 375 }
376 remove_trailing_spaces(card->shortname);
366 377
367 /* retrieve the vendor and device strings as longname */ 378 /* retrieve the vendor and device strings as longname */
368 if (quirk && quirk->vendor_name) { 379 if (quirk && quirk->vendor_name && *quirk->vendor_name) {
369 len = strlcpy(card->longname, quirk->vendor_name, sizeof(card->longname)); 380 len = strlcpy(card->longname, quirk->vendor_name, sizeof(card->longname));
370 } else { 381 } else {
371 if (dev->descriptor.iManufacturer) 382 if (dev->descriptor.iManufacturer)
@@ -375,8 +386,11 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
375 len = 0; 386 len = 0;
376 /* we don't really care if there isn't any vendor string */ 387 /* we don't really care if there isn't any vendor string */
377 } 388 }
378 if (len > 0) 389 if (len > 0) {
379 strlcat(card->longname, " ", sizeof(card->longname)); 390 remove_trailing_spaces(card->longname);
391 if (*card->longname)
392 strlcat(card->longname, " ", sizeof(card->longname));
393 }
380 394
381 strlcat(card->longname, card->shortname, sizeof(card->longname)); 395 strlcat(card->longname, card->shortname, sizeof(card->longname));
382 396